Build: Follow the 'source_structure.rst'
This is the first patch to follow the first commit of source structure
document. The items under the 'secure_fw' folder are re-organized:
- Create/Move some folders/files to follow document.
- Rename some folders to foll, for example, 'secure_fw/services' to
'secure_fw/partitions'.
- Update affected files to make it work.
This is a big change, to make the structure meet the basic shape of
the structure document defined, and make it easier to be understood
for users. Staging changes are not applicable so they are combined
into one - and because it is not the final shape yet, so:
- Upcoming updates on the 'secure_fw' folder would follow up soon.
- Fine-tune about the 'source_structure.rst' would come, too.
Change-Id: I5c11175e0a4579cd9b42d3e3519dbffb87334d0b
Signed-off-by: Ken Liu <ken.liu@arm.com>
diff --git a/secure_fw/spm/model_ipc/CMakeLists.inc b/secure_fw/spm/model_ipc/CMakeLists.inc
new file mode 100644
index 0000000..a95fd99
--- /dev/null
+++ b/secure_fw/spm/model_ipc/CMakeLists.inc
@@ -0,0 +1,76 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2017-2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+#This file assumes it will be included from a project specific cmakefile.
+#
+#Inputs:
+# TFM_ROOT_DIR - directory where secure FW sourec is located.
+#
+#Outputs:
+# Will modify include directories to make the source compile.
+# ALL_SRC_C: C source files to be compiled will be added to this list.
+# This shall be added to your add_executable or add_library command.
+# ALL_SRC_CXX: C++ source files to be compiled will be added to this list.
+# This shall be added to your add_executable or add_library command.
+# ALL_SRC_ASM: assembly source files to be compiled will be added to this
+# list. This shall be added to your add_executable or add_library
+# command.
+# Include directories will be modified by using the include_directories()
+# commands as needed.
+
+if(NOT DEFINED TFM_ROOT_DIR)
+ message(FATAL_ERROR
+ "Please set TFM_ROOT_DIR before including this file.")
+endif()
+
+#Get the current directory where this file is located.
+set(SFW_IPC_SPM_DIR ${CMAKE_CURRENT_LIST_DIR})
+set(SFW_SPM_DIR "${SFW_IPC_SPM_DIR}/..")
+set(SFW_SPM_ARCH_DIR "${SFW_SPM_DIR}/arch")
+set(SFW_SPM_INIT_DIR "${SFW_SPM_DIR}/init")
+
+set (SFW_IPC_SPM_SRC
+ "${SFW_SPM_INIT_DIR}/tfm_boot_data.c"
+ "${SFW_SPM_INIT_DIR}/tfm_core.c"
+ "${SFW_IPC_SPM_DIR}/spm_ipc.c"
+ "${SFW_IPC_SPM_DIR}/spm_psa_client_call.c"
+ "${SFW_IPC_SPM_DIR}/tfm_core_svcalls_ipc.c"
+ "${SFW_IPC_SPM_DIR}/tfm_message_queue.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/tfm_utils.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/tfm_core_utils.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/spm_api.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/tfm_spm_services.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/tfm_secure_api.c"
+ "${SFW_IPC_SPM_DIR}/tfm_pools.c"
+ "${SFW_IPC_SPM_DIR}/tfm_thread.c"
+ "${SFW_IPC_SPM_DIR}/tfm_wait.c"
+ )
+
+if (DEFINED TFM_MULTI_CORE_TOPOLOGY AND TFM_MULTI_CORE_TOPOLOGY)
+ list(APPEND SFW_IPC_SPM_SRC "${SFW_IPC_SPM_DIR}/tfm_rpc.c"
+ "${SFW_IPC_SPM_DIR}/tfm_spe_mailbox.c"
+ "${SFW_IPC_SPM_DIR}/tfm_multi_core.c"
+ "${SFW_IPC_SPM_DIR}/tfm_multi_core_mem_check.c"
+ )
+else ()
+ list(APPEND SFW_IPC_SPM_SRC "${SFW_IPC_SPM_DIR}/tfm_nspm_ipc.c"
+ "${SFW_IPC_SPM_DIR}/tfm_psa_api_veneers.c"
+ "${SFW_IPC_SPM_DIR}/../runtime/tfm_core_mem_check.c"
+ )
+endif ()
+
+#Append all our source files to global lists.
+list(APPEND ALL_SRC_C ${SFW_IPC_SPM_SRC})
+unset(SFW_IPC_SPM_SRC)
+
+#Setting include directories
+embedded_include_directories(PATH ${SFW_IPC_SPM_DIR} ABSOLUTE)
+embedded_include_directories(PATH ${SFW_IPC_SPM_DIR}/include ABSOLUTE)
+embedded_include_directories(PATH ${TFM_ROOT_DIR}/interface/include ABSOLUTE)
+embedded_include_directories(PATH ${TFM_ROOT_DIR}/secure_fw/include ABSOLUTE)
+embedded_include_directories(PATH ${TFM_ROOT_DIR}/secure_fw/spm/include ABSOLUTE)
+embedded_include_directories(PATH ${TFM_ROOT_DIR}/secure_fw/spm/arch/include ABSOLUTE)
diff --git a/secure_fw/spm/model_ipc/include/tfm_internal_defines.h b/secure_fw/spm/model_ipc/include/tfm_internal_defines.h
new file mode 100644
index 0000000..e948e7e
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_internal_defines.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_INTERNAL_DEFINES_H__
+#define __TFM_INTERNAL_DEFINES_H__
+
+#include <inttypes.h>
+
+/* IPC internal return status */
+#define IPC_SUCCESS 0
+#define IPC_ERROR_BAD_PARAMETERS (INT32_MIN)
+#define IPC_ERROR_SHORT_BUFFER (INT32_MIN + 1)
+#define IPC_ERROR_VERSION (INT32_MIN + 2)
+#define IPC_ERROR_MEMORY_CHECK (INT32_MIN + 3)
+#define IPC_ERROR_GENERIC (INT32_MIN + 0x1F)
+
+#endif
diff --git a/secure_fw/spm/model_ipc/include/tfm_list.h b/secure_fw/spm/model_ipc/include/tfm_list.h
new file mode 100644
index 0000000..9764503
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_list.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_LIST_H__
+#define __TFM_LIST_H__
+
+/* List structure */
+struct tfm_list_node_t {
+ struct tfm_list_node_t *prev;
+ struct tfm_list_node_t *next;
+};
+
+/**
+ * \brief Initialize list head.
+ *
+ * \param[in] head List head need to be initialized.
+ */
+__STATIC_INLINE void tfm_list_init(struct tfm_list_node_t *head)
+{
+ head->next = head;
+ head->prev = head;
+}
+
+/**
+ * \brief Add one node to list tail.
+ *
+ * \param[in] head List head initialized by \ref tfm_list_init.
+ * \param[in] node List node want to be added.
+ */
+__STATIC_INLINE void
+tfm_list_add_tail(struct tfm_list_node_t *head, struct tfm_list_node_t *node)
+{
+ head->prev->next = node;
+ node->prev = head->prev;
+ head->prev = node;
+ node->next = head;
+}
+
+/**
+ * \brief Check if a list is empty.
+ *
+ * \param[in] head List head initialized by \ref tfm_list_init.
+ *
+ * \returns returns 1 for empty, or 0 for not.
+ */
+__STATIC_INLINE int32_t tfm_list_is_empty(struct tfm_list_node_t *head)
+{
+ return (head->next == head);
+}
+
+/**
+ * \brief Insert one node to list head.
+ *
+ * \param[in] head List head initialized by \ref tfm_list_init.
+ * \param[in] node List node want to be inserted.
+ */
+__STATIC_INLINE void
+tfm_list_insert_first(struct tfm_list_node_t *head,
+ struct tfm_list_node_t *node)
+{
+ node->next = head->next;
+ node->prev = head;
+ head->next->prev = node;
+ head->next = node;
+}
+
+/**
+ * \brief Retrieve the fist node from list.
+ *
+ * \param[in] head List head initialized by \ref tfm_list_init.
+ *
+ * \returns Returns the pointer to first list node.
+ */
+__STATIC_INLINE
+struct tfm_list_node_t *tfm_list_first_node(struct tfm_list_node_t *head)
+{
+ return head->next;
+}
+
+/**
+ * \brief Delete one node from list.
+ *
+ * \param[in] node List node want to be deleted.
+ */
+__STATIC_INLINE void tfm_list_del_node(struct tfm_list_node_t *node)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+/* Go through each node of a list */
+#define TFM_LIST_FOR_EACH(node, head) \
+ for (node = (head)->next; node != head; node = node->next)
+
+#endif
diff --git a/secure_fw/spm/model_ipc/include/tfm_message_queue.h b/secure_fw/spm/model_ipc/include/tfm_message_queue.h
new file mode 100644
index 0000000..a11fb3b
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_message_queue.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_MESSAGE_QUEUE_H__
+#define __TFM_MESSAGE_QUEUE_H__
+
+#include "psa/service.h"
+#include "tfm_wait.h"
+
+#define TFM_MSG_MAGIC 0x15154343
+/* Message struct to collect parameter from client */
+struct tfm_msg_body_t {
+ int32_t magic;
+ struct tfm_spm_service_t *service; /* RoT service pointer */
+ struct tfm_conn_handle_t *handle; /* Connected Service handle */
+ struct tfm_event_t ack_evnt; /* Event for ack reponse */
+ psa_msg_t msg; /* PSA message body */
+ psa_invec invec[PSA_MAX_IOVEC]; /* Put in/out vectors in msg body */
+ psa_outvec outvec[PSA_MAX_IOVEC];
+ psa_outvec *caller_outvec; /*
+ * Save caller outvec pointer for
+ * write length update
+ */
+#ifdef TFM_MULTI_CORE_TOPOLOGY
+ const void *caller_data; /*
+ * Pointer to the private data of the
+ * caller. It identifies the NSPE PSA
+ * client calls in multi-core topology
+ */
+#endif
+ struct tfm_msg_body_t *next; /* List operators */
+};
+
+struct tfm_msg_queue_t {
+ struct tfm_msg_body_t *head; /* Queue head */
+ struct tfm_msg_body_t *tail; /* Queue tail */
+ uint32_t size; /* Number of the queue member */
+};
+
+/**
+ * \brief Enqueue a message into message queue.
+ *
+ * \param[in] queue Message queue, it will be initialized
+ * if has not been initialized.
+ * \param[in] node Message queue node want to be enqueue.
+ *
+ * \retval IPC_SUCCESS Success.
+ * \retval IPC_ERROR_BAD_PARAMETERS Parameters error.
+ */
+int32_t tfm_msg_enqueue(struct tfm_msg_queue_t *queue,
+ struct tfm_msg_body_t *node);
+
+/**
+ * \brief Dequeue a message from message queue.
+ *
+ * \param[in] queue Message queue.
+ *
+ * \retval node pointer Success.
+ * \retval NULL Queue is NULL or size is zero.
+ */
+struct tfm_msg_body_t *tfm_msg_dequeue(struct tfm_msg_queue_t *queue);
+
+/**
+ * \brief Check if a message queue is empty.
+ *
+ * \param[in] queue Message queue.
+ *
+ * \returns Returns 1 for empty, or 0 for not.
+ */
+int32_t tfm_msg_queue_is_empty(struct tfm_msg_queue_t *queue);
+
+#endif
diff --git a/secure_fw/spm/model_ipc/include/tfm_multi_core.h b/secure_fw/spm/model_ipc/include/tfm_multi_core.h
new file mode 100644
index 0000000..2618804
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_multi_core.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef __TFM_MULTI_CORE_H__
+#define __TFM_MULTI_CORE_H__
+
+#include <stdbool.h>
+
+/* Security attributes of target memory region in memory access check. */
+struct security_attr_info_t {
+ bool is_valid; /* Whether the target memory region is valid */
+ bool is_secure; /* Secure memory or non-secure memory */
+};
+
+/* Memory access attributes of target memory region in memory access check. */
+struct mem_attr_info_t {
+ bool is_mpu_enabled; /* Whether memory protection unit(s) enabled */
+ bool is_valid; /* Whether the target memory region is valid */
+ bool is_xn; /* Execute Never or not */
+ bool is_priv_rd_allow; /* Privileged read is allowed or not */
+ bool is_priv_wr_allow; /* Privileged write is allowed or not */
+ bool is_unpriv_rd_allow; /* Unprivileged read is allowed or not */
+ bool is_unpriv_wr_allow; /* Unprivileged write is allowed or not */
+};
+
+/**
+ * \brief Retrieve general security isolation configuration information of the
+ * target memory region according to the system memory region layout and
+ * fill the \ref security_attr_info_t.
+ *
+ * \param[in] p Base address of target memory region
+ * \param[in] s Size of target memory region
+ * \param[out] p_attr Address of \ref security_attr_info_t to be filled
+ *
+ * \return void
+ *
+ * \note This function doesn't access any hardware security isolation unit.
+ */
+void tfm_get_mem_region_security_attr(const void *p, size_t s,
+ struct security_attr_info_t *p_attr);
+
+/**
+ * \brief Retrieve general secure memory protection configuration information of
+ * the target memory region according to the system memory region layout
+ * and symbol addresses and fill the \ref mem_attr_info_t.
+ *
+ * \param[in] p Base address of target memory region
+ * \param[in] s Size of target memory region
+ * \param[out] p_attr Address of \ref mem_attr_info_t to be filled
+ *
+ * \return void
+ *
+ * \note This function doesn't access any hardware memory protection unit.
+ * The \ref is_mpu_enabled field is set to false by default.
+ */
+void tfm_get_secure_mem_region_attr(const void *p, size_t s,
+ struct mem_attr_info_t *p_attr);
+
+/**
+ * \brief Retrieve general non-secure memory protection configuration
+ * information of the target memory region according to the system memory
+ * region layout and fill the \ref mem_attr_info_t.
+ *
+ * \param[in] p Base address of target memory region
+ * \param[in] s Size of target memory region
+ * \param[out] p_attr Address of \ref mem_attr_info_t to be filled
+ *
+ * \return void
+ *
+ * \note This function doesn't access any hardware memory protection unit.
+ * The \ref is_mpu_enabled field is set to false by default.
+ */
+void tfm_get_ns_mem_region_attr(const void *p, size_t s,
+ struct mem_attr_info_t *p_attr);
+
+#endif /* __TFM_MULTI_CORE_H__ */
diff --git a/secure_fw/spm/model_ipc/include/tfm_pools.h b/secure_fw/spm/model_ipc/include/tfm_pools.h
new file mode 100644
index 0000000..422484b
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_pools.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_POOLS_H__
+#define __TFM_POOLS_H__
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Resource pool - few known size resources allocation/free is required,
+ * so pool is more applicable than heap.
+ */
+
+/*
+ * Pool Instance:
+ * [ Pool Instance ] + N * [ Pool Chunks ]
+ */
+struct tfm_pool_chunk_t {
+ struct tfm_list_node_t list; /* Chunk list */
+ void *pool; /* Point to the parent pool */
+ uint8_t data[0]; /* Data indicator */
+};
+
+/*
+ * tfm_pool_chunk_t minus the zero length "data" member,
+ * required for standards compliant C
+ */
+struct tfm_pool_chunk_s_t {
+ struct tfm_list_node_t list; /* Chunk list */
+ void *pool; /* Point to the parent pool */
+};
+
+struct tfm_pool_instance_t {
+ size_t chunksz; /* Chunks size of pool member */
+ size_t chunk_count; /* A number of chunks in the pool */
+ struct tfm_list_node_t chunks_list; /* Chunk list head in pool */
+ struct tfm_pool_chunk_s_t chunks[0]; /* Data indicator */
+};
+
+/*
+ * This will declares a static memory pool variable with chunk memory.
+ * Parameters:
+ * name - Variable name, will be used when register
+ * chunksz - chunk size in bytes
+ * num - Number of chunks
+ */
+#define TFM_POOL_DECLARE(name, chunksz, num) \
+ static uint8_t name##_pool_buf[((chunksz) + \
+ sizeof(struct tfm_pool_chunk_t)) * (num) \
+ + sizeof(struct tfm_pool_instance_t)] \
+ __attribute__((aligned(4))); \
+ static struct tfm_pool_instance_t *name = \
+ (struct tfm_pool_instance_t *)name##_pool_buf
+
+/* Get the head size of memory pool */
+#define POOL_HEAD_SIZE (sizeof(struct tfm_pool_instance_t) + \
+ sizeof(struct tfm_pool_chunk_t))
+
+/* Get the whole size of memory pool */
+#define POOL_BUFFER_SIZE(name) sizeof(name##_pool_buf)
+
+/**
+ * \brief Register a memory pool.
+ *
+ * \param[in] pool Pointer to memory pool declared by
+ * \ref TFM_POOL_DECLARE
+ * \param[in] poolsz Size of the pool buffer.
+ * \param[in] chunksz Size of chunks.
+ * \param[in] num Number of chunks.
+ *
+ * \retval IPC_SUCCESS Success.
+ * \retval IPC_ERROR_BAD_PARAMETERS Parameters error.
+ */
+int32_t tfm_pool_init(struct tfm_pool_instance_t *pool, size_t poolsz,
+ size_t chunksz, size_t num);
+
+/**
+ * \brief Allocate a memory from pool.
+ *
+ * \param[in] pool pool pointer decleared by \ref TFM_POOL_DECLARE
+ *
+ * \retval buffer pointer Success.
+ * \retval NULL Failed.
+ */
+void *tfm_pool_alloc(struct tfm_pool_instance_t *pool);
+
+/**
+ * \brief Free the allocated memory.
+ *
+ * \param[in] ptr Buffer pointer want to free.
+ */
+void tfm_pool_free(void *ptr);
+
+/**
+ * \brief Checks whether a pointer points to a chunk data in the pool.
+ *
+ * \param[in] pool Pointer to memory pool declared by
+ * \ref TFM_POOL_DECLARE.
+ * \param[in] data The pointer to check.
+ *
+ * \retval true Data is a chunk data in the pool.
+ * \retval false Data is not a chunk data in the pool.
+ */
+bool is_valid_chunk_data_in_pool(struct tfm_pool_instance_t *pool,
+ uint8_t *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __TFM_POOLS_H__ */
diff --git a/secure_fw/spm/model_ipc/include/tfm_rpc.h b/secure_fw/spm/model_ipc/include/tfm_rpc.h
new file mode 100644
index 0000000..cf573f8
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_rpc.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/*
+ * Definitions of Remote Procedure Call (RPC) functionalities in TF-M, which
+ * sits between upper TF-M SPM and underlying mailbox implementation.
+ */
+
+#ifndef __TFM_RPC_H__
+#define __TFM_RPC_H__
+
+#ifdef TFM_MULTI_CORE_TOPOLOGY
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "cmsis_compiler.h"
+#include "psa/client.h"
+#include "psa/service.h"
+#include "tfm_thread.h"
+#include "tfm_wait.h"
+#include "tfm_message_queue.h"
+
+#define TFM_RPC_SUCCESS (0)
+#define TFM_RPC_INVAL_PARAM (INT32_MIN + 1)
+#define TFM_RPC_CONFLICT_CALLBACK (INT32_MIN + 2)
+
+/*
+ * This structure holds the parameters used in a PSA client call.
+ * The parameters are passed from non-secure core to secure core.
+ */
+struct client_call_params_t {
+ uint32_t sid;
+ psa_handle_t handle;
+ int32_t type;
+ const psa_invec *in_vec;
+ size_t in_len;
+ psa_outvec *out_vec;
+ size_t out_len;
+ uint32_t version;
+};
+
+/*
+ * The underlying mailbox communication implementation should provide
+ * the specific operations to complete the RPC functionalities.
+ *
+ * It includes the following operations:
+ * handle_req() - Handle PSA client call request from NSPE
+ * reply() - Reply PSA client call return result to NSPE. The parameter
+ * owner identifies the owner of the PSA client call.
+ * get_caller_data() - Get the private data of NSPE client from mailbox to
+ * identify the PSA client call.
+ */
+struct tfm_rpc_ops_t {
+ void (*handle_req)(void);
+ void (*reply)(const void *owner, int32_t ret);
+ const void * (*get_caller_data)(int32_t client_id);
+};
+
+/**
+ * \brief RPC handler for \ref psa_framework_version.
+ *
+ * \return version The version of the PSA Framework implementation
+ * that is providing the runtime services.
+ */
+uint32_t tfm_rpc_psa_framework_version(void);
+
+/**
+ * \brief RPC handler for \ref psa_version.
+ *
+ * \param[in] params Base address of parameters
+ * \param[in] ns_caller If 'true', indicate the non-secure caller
+ *
+ * \retval PSA_VERSION_NONE The RoT Service is not implemented, or the
+ * caller is not permitted to access the service.
+ * \retval > 0 The version of the implemented RoT Service.
+ */
+uint32_t tfm_rpc_psa_version(const struct client_call_params_t *params,
+ bool ns_caller);
+
+/**
+ * \brief RPC handler for \ref psa_connect.
+ *
+ * \param[in] params Base address of parameters
+ * \param[in] ns_caller If 'true', indicate the non-secure caller
+ *
+ * \retval PSA_SUCCESS Success.
+ * \retval PSA_CONNECTION_BUSY The SPM cannot make the connection
+ * at the moment.
+ * \retval "Does not return" The RoT Service ID and version are not
+ * supported, or the caller is not permitted to
+ * access the service.
+ */
+psa_status_t tfm_rpc_psa_connect(const struct client_call_params_t *params,
+ bool ns_caller);
+
+/**
+ * \brief RPC handler for \ref psa_call.
+ *
+ * \param[in] params Base address of parameters
+ * \param[in] ns_caller If 'true', indicate the non-secure caller
+ *
+ * \retval PSA_SUCCESS Success.
+ * \retval "Does not return" The call is invalid, one or more of the
+ * following are true:
+ * \arg An invalid handle was passed.
+ * \arg The connection is already handling a request.
+ * \arg An invalid memory reference was provided.
+ * \arg in_len + out_len > PSA_MAX_IOVEC.
+ * \arg The message is unrecognized or
+ * incorrectly formatted.
+ */
+psa_status_t tfm_rpc_psa_call(const struct client_call_params_t *params,
+ bool ns_caller);
+
+/**
+ * \brief RPC handler for \ref psa_close.
+ *
+ * \param[in] params Base address of parameters
+ * \param[in] ns_caller If 'true', indicate the non-secure caller
+ *
+ * \retval void Success.
+ * \retval "Does not return" The call is invalid, one or more of the
+ * following are true:
+ * \arg An invalid handle was provided that is not
+ * the null handle..
+ */
+void tfm_rpc_psa_close(const struct client_call_params_t *params,
+ bool ns_caller);
+
+/**
+ * \brief Register underlying mailbox communication operations.
+ *
+ * \param[in] ops_ptr Pointer to the specific operation structure.
+ *
+ * \retval TFM_RPC_SUCCESS Mailbox operations are successfully registered.
+ * \retval Other error code Fail to register mailbox operations.
+ */
+int32_t tfm_rpc_register_ops(const struct tfm_rpc_ops_t *ops_ptr);
+
+/**
+ * \brief Unregister underlying mailbox communication operations.
+ *
+ * Currently one and only one underlying mailbox communication implementation is
+ * allowed in runtime. Thus it is unnecessary to specify the mailbox
+ * communication operation callbacks to be unregistered.
+ *
+ * \param[in] void
+ */
+void tfm_rpc_unregister_ops(void);
+
+/**
+ * \brief Handling PSA client call request
+ *
+ * \param void
+ */
+void tfm_rpc_client_call_handler(void);
+
+/**
+ * \brief Reply PSA client call return result
+ *
+ * \param[in] owner A handle to identify the owner of the PSA
+ * client call.
+ * \param[in] ret PSA client call return result value.
+ */
+void tfm_rpc_client_call_reply(const void *owner, int32_t ret);
+
+/*
+ * Check if the message was allocated for a non-secure request via RPC
+ *
+ * \param[in] msg The message body context pointer
+ * \ref msg_body_t structures
+ *
+ * \retval true The message was allocated for a NS request via RPC.
+ * \retval false Otherwise.
+ */
+__STATIC_INLINE bool is_tfm_rpc_msg(const struct tfm_msg_body_t *msg)
+{
+ /*
+ * FIXME
+ * The ID should be smaller than 0 if the message is allocated by a
+ * non-secure caller.
+ * However, current TF-M implementation use 0 as the default non-secure
+ * caller ID. Therefore, treat the caller as non-secure when client_id == 0.
+ *
+ * This condition check should be improved after TF-M non-secure client ID
+ * management is implemented.
+ */
+ if (msg && (msg->msg.client_id <= 0) && !msg->ack_evnt.owner) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * \brief Set the private data of the NS caller in \ref msg_body_t, to identify
+ * the caller after PSA client call is compeleted.
+ *
+ * \param[in] msg The address of \ref msg_body_t structure
+ * \param[in] client_id The client ID of the NS caller.
+ */
+void tfm_rpc_set_caller_data(struct tfm_msg_body_t *msg, int32_t client_id);
+
+#else /* TFM_MULTI_CORE_TOPOLOGY */
+
+/* RPC is only available in multi-core scenario */
+#define is_tfm_rpc_msg(x) (false)
+
+#define tfm_rpc_client_call_handler() do {} while (0)
+
+#define tfm_rpc_client_call_reply(owner, ret) do {} while (0)
+
+#define tfm_rpc_set_caller_data(msg, client_id) do {} while (0)
+
+#endif /* TFM_MULTI_CORE_TOPOLOGY */
+#endif /* __TFM_RPC_H__ */
diff --git a/secure_fw/spm/model_ipc/include/tfm_spe_mailbox.h b/secure_fw/spm/model_ipc/include/tfm_spe_mailbox.h
new file mode 100644
index 0000000..fd07907
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_spe_mailbox.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef __TFM_SPE_MAILBOX_H__
+#define __TFM_SPE_MAILBOX_H__
+
+#include "tfm_mailbox.h"
+
+/* A single slot structure in SPE mailbox queue */
+struct secure_mailbox_slot_t {
+ struct mailbox_msg_t msg;
+
+ uint8_t ns_slot_idx;
+ mailbox_msg_handle_t msg_handle;
+};
+
+struct secure_mailbox_queue_t {
+ mailbox_queue_status_t empty_slots; /* bitmask of empty slots */
+
+ struct secure_mailbox_slot_t queue[NUM_MAILBOX_QUEUE_SLOT];
+ struct ns_mailbox_queue_t *ns_queue;
+ uint8_t cur_proc_slot_idx; /*
+ * The index of mailbox
+ * queue slot currently
+ * under processing.
+ */
+};
+
+/**
+ * \brief Handle mailbox message(s) from NSPE.
+ *
+ * \retval MAILBOX_SUCCESS Successfully get PSA client call return result.
+ * \retval Other return code Operation failed with an error code.
+ */
+int32_t tfm_mailbox_handle_msg(void);
+
+/**
+ * \brief Return PSA client call return result to NSPE.
+ *
+ * \param[in] handle The handle to the mailbox message
+ * \param[in] reply PSA client call return result to be written
+ * to NSPE.
+ *
+ * \retval MAILBOX_SUCCESS Operation succeeded.
+ * \retval Other return code Operation failed with an error code.
+ */
+int32_t tfm_mailbox_reply_msg(mailbox_msg_handle_t handle, int32_t reply);
+
+/**
+ * \brief SPE mailbox initialization
+ *
+ * \retval MAILBOX_SUCCESS Operation succeeded.
+ * \retval Other return code Operation failed with an error code.
+ */
+int32_t tfm_mailbox_init(void);
+
+/**
+ * \brief Platform specific initialization of SPE mailbox.
+ *
+ * \param[in] s_queue The base address of SPE mailbox queue.
+ *
+ * \retval MAILBOX_SUCCESS Operation succeeded.
+ * \retval Other return code Operation failed with an error code.
+ */
+int32_t tfm_mailbox_hal_init(struct secure_mailbox_queue_t *s_queue);
+
+/**
+ * \brief Notify NSPE that a PSA client call return result is replied.
+ * Implemented by platform specific inter-processor communication driver.
+ *
+ * \retval MAILBOX_SUCCESS The notification is successfully sent out.
+ * \retval Other return code Operation failed with an error code.
+ */
+int32_t tfm_mailbox_hal_notify_peer(void);
+
+/**
+ * \brief Enter critical section of NSPE mailbox
+ */
+void tfm_mailbox_hal_enter_critical(void);
+
+/**
+ * \brief Exit critical section of NSPE mailbox
+ */
+void tfm_mailbox_hal_exit_critical(void);
+
+#endif /* __TFM_SPE_MAILBOX_H__ */
diff --git a/secure_fw/spm/model_ipc/include/tfm_svcalls.h b/secure_fw/spm/model_ipc/include/tfm_svcalls.h
new file mode 100644
index 0000000..d553fc8
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_svcalls.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_SVCALLS_H__
+#define __TFM_SVCALLS_H__
+
+#include <stdint.h>
+
+/**
+ * \brief The C source of SVCall handlers
+ *
+ * \param[in] svc_args The arguments list.
+ * \param[in] exc_return EXC_RETURN value of the SVC.
+ *
+ * \returns EXC_RETURN value indicates where to return.
+ */
+uint32_t tfm_core_svc_handler(uint32_t *svc_args, uint32_t exc_return);
+
+#endif
diff --git a/secure_fw/spm/model_ipc/include/tfm_thread.h b/secure_fw/spm/model_ipc/include/tfm_thread.h
new file mode 100644
index 0000000..925967b
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_thread.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_THREAD_H__
+#define __TFM_THREAD_H__
+
+#include <stdint.h>
+#include <stddef.h>
+#include "tfm_arch.h"
+#include "cmsis_compiler.h"
+
+/* State code */
+#define THRD_STATE_CREATING 0
+#define THRD_STATE_RUNNING 1
+#define THRD_STATE_BLOCK 2
+#define THRD_STATE_DETACH 3
+#define THRD_STATE_INVALID 4
+
+/* Security attribute - default as security */
+#define THRD_ATTR_SECURE_OFFSET 16
+#define THRD_ATTR_SECURE (0)
+#define THRD_ATTR_NON_SECURE (1 << THRD_ATTR_SECURE_OFFSET)
+
+/* Lower value has higher priority */
+#define THRD_PRIOR_MASK 0xFF
+#define THRD_PRIOR_HIGHEST 0x0
+#define THRD_PRIOR_MEDIUM 0x7F
+#define THRD_PRIOR_LOWEST 0xFF
+
+/* Error code */
+#define THRD_SUCCESS 0
+#define THRD_ERR_INVALID_PARAM 1
+
+/* Thread entry function type */
+typedef void *(*tfm_core_thrd_entry_t)(void *);
+
+/* Thread context */
+struct tfm_core_thread_t {
+ tfm_core_thrd_entry_t pfn; /* entry function */
+ void *param; /* entry parameter */
+ uintptr_t stk_btm; /* stack bottom (lower address) */
+ uintptr_t stk_top; /* stack top (higher address)*/
+ uint32_t prior; /* priority */
+ uint32_t state; /* state */
+
+ struct tfm_arch_ctx_t arch_ctx; /* State context */
+ struct tfm_core_thread_t *next; /* next thread in list */
+};
+
+/*
+ * Initialize a thread context with the necessary info.
+ *
+ * Parameters :
+ * pth - pointer of caller provided thread context
+ * pfn - thread entry function
+ * param - thread entry function parameter
+ * stk_top - stack pointer top (higher address)
+ * stk_btm - stack pointer bottom (lower address)
+ *
+ * Notes :
+ * Thread contex rely on caller allocated memory; initialize members in
+ * context. This function does not insert thread into schedulable list.
+ */
+void tfm_core_thrd_init(struct tfm_core_thread_t *pth,
+ tfm_core_thrd_entry_t pfn, void *param,
+ uintptr_t stk_top, uintptr_t stk_btm);
+
+/*
+ * Set thread priority.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ * prior - priority value (0~255)
+ *
+ * Notes :
+ * Set thread priority. Priority is set to THRD_PRIOR_MEDIUM in
+ * tfm_core_thrd_init().
+ */
+void __STATIC_INLINE tfm_core_thrd_set_priority(struct tfm_core_thread_t *pth,
+ uint32_t prior)
+{
+ pth->prior &= ~THRD_PRIOR_MASK;
+ pth->prior |= prior & THRD_PRIOR_MASK;
+}
+
+/*
+ * Set thread security attribute.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ * attr_secure - THRD_ATTR_SECURE or THRD_ATTR_NON_SECURE
+ *
+ * Notes
+ * Reuse prior of thread context to shift down non-secure thread priority.
+ */
+void __STATIC_INLINE tfm_core_thrd_set_secure(struct tfm_core_thread_t *pth,
+ uint32_t attr_secure)
+{
+ pth->prior &= ~THRD_ATTR_NON_SECURE;
+ pth->prior |= attr_secure;
+}
+
+/*
+ * Set thread state.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ * new_state - new state of thread
+ *
+ * Return :
+ * None
+ *
+ * Notes :
+ * Thread state is not changed if invalid state value inputed.
+ */
+void tfm_core_thrd_set_state(struct tfm_core_thread_t *pth, uint32_t new_state);
+
+/*
+ * Get thread state.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ *
+ * Return :
+ * State of thread
+ */
+uint32_t __STATIC_INLINE tfm_core_thrd_get_state(struct tfm_core_thread_t *pth)
+{
+ return pth->state;
+}
+
+/*
+ * Set thread state return value.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ * retval - return value to be set for thread state
+ *
+ * Notes :
+ * This API is useful for blocked syscall blocking thread. Syscall
+ * could set its return value to the caller before caller goes.
+ */
+void __STATIC_INLINE tfm_core_thrd_set_retval(struct tfm_core_thread_t *pth,
+ uint32_t retval)
+{
+ TFM_STATE_RET_VAL(&pth->arch_ctx) = retval;
+}
+
+/*
+ * Validate thread context and insert it into schedulable list.
+ *
+ * Parameters :
+ * pth - pointer of thread context
+ *
+ * Return :
+ * THRD_SUCCESS for success. Or an error is returned.
+ *
+ * Notes :
+ * This function validates thread info. It returns error if thread info
+ * is not correct. Thread is avaliable after successful tfm_core_thrd_start().
+ */
+uint32_t tfm_core_thrd_start(struct tfm_core_thread_t *pth);
+
+/*
+ * Get current running thread.
+ *
+ * Return :
+ * Current running thread context pointer.
+ */
+struct tfm_core_thread_t *tfm_core_thrd_get_curr_thread(void);
+
+/*
+ * Get next running thread in list.
+ *
+ * Return :
+ * Pointer of next thread to be run.
+ */
+struct tfm_core_thread_t *tfm_core_thrd_get_next_thread(void);
+
+/*
+ * Start scheduler for existing threads
+ *
+ * Parameters:
+ * pth - pointer of the caller context collecting thread
+ *
+ * Notes :
+ * This function should be called only ONCE to start the scheduler.
+ * Caller needs to provide a thread object to collect current context.
+ * The usage of the collected context is caller defined.
+ */
+void tfm_core_thrd_start_scheduler(struct tfm_core_thread_t *pth);
+
+/*
+ * Activate a scheduling action after exception.
+ *
+ * Notes :
+ * This function could be called multiple times before scheduling.
+ */
+void tfm_core_thrd_activate_schedule(void);
+
+/*
+ * Save current architecture context into 'prev' thread and switch to 'next'.
+ *
+ * Parameters :
+ * p_actx - latest caller context
+ * prev - previous thread to be switched out
+ * next - thread to be run
+ *
+ * Notes :
+ * This function could be called multiple times before scheduling.
+ */
+void tfm_core_thrd_switch_context(struct tfm_arch_ctx_t *p_actx,
+ struct tfm_core_thread_t *prev,
+ struct tfm_core_thread_t *next);
+
+#endif
diff --git a/secure_fw/spm/model_ipc/include/tfm_wait.h b/secure_fw/spm/model_ipc/include/tfm_wait.h
new file mode 100644
index 0000000..7d2055b
--- /dev/null
+++ b/secure_fw/spm/model_ipc/include/tfm_wait.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef __TFM_WAIT_H__
+#define __TFM_WAIT_H__
+
+#include <stddef.h>
+
+#include "cmsis_compiler.h"
+
+/* The magic number has two purposes: corruption detection and debug */
+#define TFM_EVENT_MAGIC 0x65766e74
+
+struct tfm_event_t {
+ uint32_t magic; /* 'evnt' */
+ struct tfm_core_thread_t *owner; /* Event blocked thread */
+};
+
+/*
+ * Initialize an event object.
+ *
+ * Parameters:
+ * pevnt - The pointer of event object allocated by the caller
+ */
+void __STATIC_INLINE tfm_event_init(struct tfm_event_t *pevnt)
+{
+ pevnt->magic = TFM_EVENT_MAGIC;
+ pevnt->owner = NULL;
+}
+
+/*
+ * Wait on an event object.
+ *
+ * Parameters:
+ * pevnt - The pointer of event object allocated by the caller
+ *
+ * Notes:
+ * Block caller thread by calling this function.
+ */
+void tfm_event_wait(struct tfm_event_t *pevnt);
+
+/*
+ * Wake up an event object.
+ *
+ * Parameters :
+ * pevnt - The pointer of event object allocated by the caller
+ * retval - Value to be returned to owner
+ *
+ * Notes:
+ * Wake up the blocked thread and set parameter 'retval' as the return value.
+ */
+void tfm_event_wake(struct tfm_event_t *pevnt, uint32_t retval);
+
+#endif
diff --git a/secure_fw/spm/model_ipc/spm_ipc.c b/secure_fw/spm/model_ipc/spm_ipc.c
new file mode 100644
index 0000000..99f6925
--- /dev/null
+++ b/secure_fw/spm/model_ipc/spm_ipc.c
@@ -0,0 +1,1676 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include "psa/client.h"
+#include "psa/service.h"
+#include "psa/lifecycle.h"
+#include "tfm_thread.h"
+#include "tfm_wait.h"
+#include "tfm_utils.h"
+#include "tfm_internal_defines.h"
+#include "tfm_message_queue.h"
+#include "tfm_spm_hal.h"
+#include "tfm_irq_list.h"
+#include "tfm_api.h"
+#include "tfm_secure_api.h"
+#include "tfm_memory_utils.h"
+#include "tfm/spm_api.h"
+#include "tfm_peripherals_def.h"
+#include "tfm/spm_db.h"
+#include "tfm_core_utils.h"
+#include "spm_psa_client_call.h"
+#include "tfm_rpc.h"
+#include "tfm_internal.h"
+#include "tfm_core_trustzone.h"
+#include "tfm_core_mem_check.h"
+#include "tfm_list.h"
+#include "tfm_pools.h"
+#include "region_defs.h"
+#include "tfm/tfm_spm_services_api.h"
+
+#include "secure_fw/partitions/tfm_service_list.inc"
+
+/* Extern service variable */
+extern struct tfm_spm_service_t service[];
+extern const struct tfm_spm_service_db_t service_db[];
+
+/* Extern SPM variable */
+extern struct spm_partition_db_t g_spm_partition_db;
+
+/* Pools */
+TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
+ TFM_CONN_HANDLE_MAX_NUM);
+
+void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
+ IRQn_Type irq_line);
+
+#include "tfm_secure_irq_handlers_ipc.inc"
+
+/*********************** Connection handle conversion APIs *******************/
+
+/* Set a minimal value here for feature expansion. */
+#define CLIENT_HANDLE_VALUE_MIN 32
+
+#define CONVERSION_FACTOR_BITOFFSET 3
+#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
+/* Set 32 as the maximum */
+#define CONVERSION_FACTOR_VALUE_MAX 0x20
+
+#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
+#error "CONVERSION FACTOR OUT OF RANGE"
+#endif
+
+static uint32_t loop_index;
+
+/*
+ * A handle instance psa_handle_t allocated inside SPM is actually a memory
+ * address among the handle pool. Return this handle to the client directly
+ * exposes information of secure memory address. In this case, converting the
+ * handle into another value does not represent the memory address to avoid
+ * exposing secure memory directly to clients.
+ *
+ * This function converts the handle instance into another value by scaling the
+ * handle in pool offset, the converted value is named as a user handle.
+ *
+ * The formula:
+ * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
+ * CLIENT_HANDLE_VALUE_MIN + loop_index
+ * where:
+ * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
+ * exceed CONVERSION_FACTOR_VALUE_MAX.
+ *
+ * handle_instance in RANGE[POOL_START, POOL_END]
+ * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
+ * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
+ *
+ * note:
+ * loop_index is used to promise same handle instance is converted into
+ * different user handles in short time.
+ */
+static psa_handle_t tfm_spm_to_user_handle(
+ struct tfm_conn_handle_t *handle_instance)
+{
+ psa_handle_t user_handle;
+
+ loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
+ user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
+ (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
+ CLIENT_HANDLE_VALUE_MIN + loop_index);
+
+ return user_handle;
+}
+
+/*
+ * This function converts a user handle into a corresponded handle instance.
+ * The converted value is validated before returning, an invalid handle instance
+ * is returned as NULL.
+ *
+ * The formula:
+ * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
+ * CONVERSION_FACTOR_VALUE) + POOL_START
+ * where:
+ * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
+ * exceed CONVERSION_FACTOR_VALUE_MAX.
+ *
+ * handle_instance in RANGE[POOL_START, POOL_END]
+ * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
+ * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
+ */
+struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
+{
+ struct tfm_conn_handle_t *handle_instance;
+
+ if (user_handle == PSA_NULL_HANDLE) {
+ return NULL;
+ }
+
+ handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
+ CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
+ (uintptr_t)conn_handle_pool);
+
+ return handle_instance;
+}
+
+/* Service handle management functions */
+struct tfm_conn_handle_t *tfm_spm_create_conn_handle(
+ struct tfm_spm_service_t *service,
+ int32_t client_id)
+{
+ struct tfm_conn_handle_t *p_handle;
+
+ TFM_CORE_ASSERT(service);
+
+ /* Get buffer for handle list structure from handle pool */
+ p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
+ if (!p_handle) {
+ return NULL;
+ }
+
+ p_handle->service = service;
+ p_handle->status = TFM_HANDLE_STATUS_IDLE;
+ p_handle->client_id = client_id;
+
+ /* Add handle node to list for next psa functions */
+ tfm_list_add_tail(&service->handle_list, &p_handle->list);
+
+ return p_handle;
+}
+
+int32_t tfm_spm_validate_conn_handle(
+ const struct tfm_conn_handle_t *conn_handle,
+ int32_t client_id)
+{
+ /* Check the handle address is validated */
+ if (is_valid_chunk_data_in_pool(conn_handle_pool,
+ (uint8_t *)conn_handle) != true) {
+ return IPC_ERROR_GENERIC;
+ }
+
+ /* Check the handle caller is correct */
+ if (conn_handle->client_id != client_id) {
+ return IPC_ERROR_GENERIC;
+ }
+
+ return IPC_SUCCESS;
+}
+
+/**
+ * \brief Free connection handle which not used anymore.
+ *
+ * \param[in] service Target service context pointer
+ * \param[in] conn_handle Connection handle created by
+ * tfm_spm_create_conn_handle()
+ *
+ * \retval IPC_SUCCESS Success
+ * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
+ * \retval "Does not return" Panic for not find service by handle
+ */
+static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
+ struct tfm_conn_handle_t *conn_handle)
+{
+ TFM_CORE_ASSERT(service);
+ TFM_CORE_ASSERT(conn_handle != NULL);
+
+ /* Clear magic as the handler is not used anymore */
+ conn_handle->internal_msg.magic = 0;
+
+ /* Remove node from handle list */
+ tfm_list_del_node(&conn_handle->list);
+
+ /* Back handle buffer to pool */
+ tfm_pool_free(conn_handle);
+ return IPC_SUCCESS;
+}
+
+/**
+ * \brief Set reverse handle value for connection.
+ *
+ * \param[in] service Target service context pointer
+ * \param[in] conn_handle Connection handle created by
+ * tfm_spm_create_conn_handle()
+ * \param[in] rhandle rhandle need to save
+ *
+ * \retval IPC_SUCCESS Success
+ * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
+ * \retval "Does not return" Panic for not find handle node
+ */
+static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
+ struct tfm_conn_handle_t *conn_handle,
+ void *rhandle)
+{
+ TFM_CORE_ASSERT(service);
+ /* Set reverse handle value only be allowed for a connected handle */
+ TFM_CORE_ASSERT(conn_handle != NULL);
+
+ conn_handle->rhandle = rhandle;
+ return IPC_SUCCESS;
+}
+
+/**
+ * \brief Get reverse handle value from connection hanlde.
+ *
+ * \param[in] service Target service context pointer
+ * \param[in] conn_handle Connection handle created by
+ * tfm_spm_create_conn_handle()
+ *
+ * \retval void * Success
+ * \retval "Does not return" Panic for those:
+ * service pointer are NULL
+ * hanlde is \ref PSA_NULL_HANDLE
+ * handle node does not be found
+ */
+static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
+ struct tfm_conn_handle_t *conn_handle)
+{
+ TFM_CORE_ASSERT(service);
+ /* Get reverse handle value only be allowed for a connected handle */
+ TFM_CORE_ASSERT(conn_handle != NULL);
+
+ return conn_handle->rhandle;
+}
+
+/* Partition management functions */
+
+/**
+ * \brief Get the service context by signal.
+ *
+ * \param[in] partition Partition context pointer
+ * \ref spm_partition_desc_t structures
+ * \param[in] signal Signal associated with inputs to the Secure
+ * Partition, \ref psa_signal_t
+ *
+ * \retval NULL Failed
+ * \retval "Not NULL" Target service context pointer,
+ * \ref tfm_spm_service_t structures
+ */
+static struct tfm_spm_service_t *
+ tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition,
+ psa_signal_t signal)
+{
+ struct tfm_list_node_t *node, *head;
+ struct tfm_spm_service_t *service;
+
+ TFM_CORE_ASSERT(partition);
+
+ if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
+ tfm_core_panic();
+ }
+
+ head = &partition->runtime_data.service_list;
+ TFM_LIST_FOR_EACH(node, head) {
+ service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list);
+ if (service->service_db->signal == signal) {
+ return service;
+ }
+ }
+ return NULL;
+}
+
+struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
+{
+ uint32_t i;
+ struct tfm_list_node_t *node, *head;
+ struct tfm_spm_service_t *service;
+ struct spm_partition_desc_t *partition;
+
+ for (i = 0; i < g_spm_partition_db.partition_count; i++) {
+ partition = &g_spm_partition_db.partitions[i];
+ /* Skip partition without IPC flag */
+ if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) {
+ continue;
+ }
+
+ if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
+ continue;
+ }
+
+ head = &partition->runtime_data.service_list;
+ TFM_LIST_FOR_EACH(node, head) {
+ service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t,
+ list);
+ if (service->service_db->sid == sid) {
+ return service;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * \brief Get the partition context by partition ID.
+ *
+ * \param[in] partition_id Partition identity
+ *
+ * \retval NULL Failed
+ * \retval "Not NULL" Target partition context pointer,
+ * \ref spm_partition_desc_t structures
+ */
+static struct spm_partition_desc_t *
+ tfm_spm_get_partition_by_id(int32_t partition_id)
+{
+ uint32_t idx = get_partition_idx(partition_id);
+
+ if (idx != SPM_INVALID_PARTITION_IDX) {
+ return &(g_spm_partition_db.partitions[idx]);
+ }
+ return NULL;
+}
+
+struct spm_partition_desc_t *tfm_spm_get_running_partition(void)
+{
+ uint32_t spid;
+
+ spid = tfm_spm_partition_get_running_partition_id();
+
+ return tfm_spm_get_partition_by_id(spid);
+}
+
+int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
+ uint32_t version)
+{
+ TFM_CORE_ASSERT(service);
+
+ switch (service->service_db->version_policy) {
+ case TFM_VERSION_POLICY_RELAXED:
+ if (version > service->service_db->version) {
+ return IPC_ERROR_VERSION;
+ }
+ break;
+ case TFM_VERSION_POLICY_STRICT:
+ if (version != service->service_db->version) {
+ return IPC_ERROR_VERSION;
+ }
+ break;
+ default:
+ return IPC_ERROR_VERSION;
+ }
+ return IPC_SUCCESS;
+}
+
+int32_t tfm_spm_check_authorization(uint32_t sid,
+ struct tfm_spm_service_t *service,
+ bool ns_caller)
+{
+ struct spm_partition_desc_t *partition = NULL;
+ int32_t i;
+
+ TFM_CORE_ASSERT(service);
+
+ if (ns_caller) {
+ if (!service->service_db->non_secure_client) {
+ return IPC_ERROR_GENERIC;
+ }
+ } else {
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ for (i = 0; i < partition->static_data->dependencies_num; i++) {
+ if (partition->static_data->p_dependencies[i] == sid) {
+ break;
+ }
+ }
+
+ if (i == partition->static_data->dependencies_num) {
+ return IPC_ERROR_GENERIC;
+ }
+ }
+ return IPC_SUCCESS;
+}
+
+/* Message functions */
+
+/**
+ * \brief Get message context by message handle.
+ *
+ * \param[in] msg_handle Message handle which is a reference generated
+ * by the SPM to a specific message.
+ *
+ * \return The message body context pointer
+ * \ref tfm_msg_body_t structures
+ */
+static struct tfm_msg_body_t *
+ tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
+{
+ /*
+ * The message handler passed by the caller is considered invalid in the
+ * following cases:
+ * 1. Not a valid message handle. (The address of a message is not the
+ * address of a possible handle from the pool
+ * 2. Handle not belongs to the caller partition (The handle is either
+ * unused, or owned by anither partition)
+ * Check the conditions above
+ */
+ struct tfm_conn_handle_t *connection_handle_address;
+ struct tfm_msg_body_t *msg;
+ uint32_t partition_id;
+
+ msg = (struct tfm_msg_body_t *)msg_handle;
+
+ connection_handle_address =
+ TFM_GET_CONTAINER_PTR(msg, struct tfm_conn_handle_t, internal_msg);
+
+ if (is_valid_chunk_data_in_pool(
+ conn_handle_pool, (uint8_t *)connection_handle_address) != 1) {
+ return NULL;
+ }
+
+ /*
+ * Check that the magic number is correct. This proves that the message
+ * structure contains an active message.
+ */
+ if (msg->magic != TFM_MSG_MAGIC) {
+ return NULL;
+ }
+
+ /* Check that the running partition owns the message */
+ partition_id = tfm_spm_partition_get_running_partition_id();
+ if (partition_id != msg->service->partition->static_data->partition_id) {
+ return NULL;
+ }
+
+ /*
+ * FixMe: For condition 1 it should be checked whether the message belongs
+ * to the service. Skipping this check isn't a security risk as even if the
+ * message belongs to another service, the handle belongs to the calling
+ * partition.
+ */
+
+ return msg;
+}
+
+struct tfm_msg_body_t *
+ tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
+{
+ TFM_CORE_ASSERT(conn_handle != NULL);
+
+ return &(conn_handle->internal_msg);
+}
+
+void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
+ struct tfm_spm_service_t *service,
+ struct tfm_conn_handle_t *handle,
+ int32_t type, int32_t client_id,
+ psa_invec *invec, size_t in_len,
+ psa_outvec *outvec, size_t out_len,
+ psa_outvec *caller_outvec)
+{
+ uint32_t i;
+
+ TFM_CORE_ASSERT(msg);
+ TFM_CORE_ASSERT(service);
+ TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
+ TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
+ TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
+ TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
+ TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
+
+ /* Clear message buffer before using it */
+ tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t));
+
+ tfm_event_init(&msg->ack_evnt);
+ msg->magic = TFM_MSG_MAGIC;
+ msg->service = service;
+ msg->handle = handle;
+ msg->caller_outvec = caller_outvec;
+ msg->msg.client_id = client_id;
+
+ /* Copy contents */
+ msg->msg.type = type;
+
+ for (i = 0; i < in_len; i++) {
+ msg->msg.in_size[i] = invec[i].len;
+ msg->invec[i].base = invec[i].base;
+ }
+
+ for (i = 0; i < out_len; i++) {
+ msg->msg.out_size[i] = outvec[i].len;
+ msg->outvec[i].base = outvec[i].base;
+ /* Out len is used to record the writed number, set 0 here again */
+ msg->outvec[i].len = 0;
+ }
+
+ /* Use message address as handle */
+ msg->msg.handle = (psa_handle_t)msg;
+
+ /* For connected handle, set rhandle to every message */
+ if (handle) {
+ msg->msg.rhandle = tfm_spm_get_rhandle(service, handle);
+ }
+
+ /* Set the private data of NSPE client caller in multi-core topology */
+ if (TFM_CLIENT_ID_IS_NS(client_id)) {
+ tfm_rpc_set_caller_data(msg, client_id);
+ }
+}
+
+int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
+ struct tfm_msg_body_t *msg)
+{
+ struct spm_partition_runtime_data_t *p_runtime_data =
+ &service->partition->runtime_data;
+
+ TFM_CORE_ASSERT(service);
+ TFM_CORE_ASSERT(msg);
+
+ /* Enqueue message to service message queue */
+ if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) {
+ return IPC_ERROR_GENERIC;
+ }
+
+ /* Messages put. Update signals */
+ p_runtime_data->signals |= service->service_db->signal;
+
+ tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals &
+ p_runtime_data->signal_mask));
+
+ /*
+ * If it is a NS request via RPC, it is unnecessary to block current
+ * thread.
+ */
+ if (!is_tfm_rpc_msg(msg)) {
+ tfm_event_wait(&msg->ack_evnt);
+ }
+
+ return IPC_SUCCESS;
+}
+
+/**
+ * \brief Get bottom of stack region for a partition
+ *
+ * \param[in] partition_idx Partition index
+ *
+ * \return Stack region bottom value
+ *
+ * \note This function doesn't check if partition_idx is valid.
+ */
+static uint32_t tfm_spm_partition_get_stack_bottom(uint32_t partition_idx)
+{
+ return g_spm_partition_db.partitions[partition_idx].
+ memory_data->stack_bottom;
+}
+
+/**
+ * \brief Get top of stack region for a partition
+ *
+ * \param[in] partition_idx Partition index
+ *
+ * \return Stack region top value
+ *
+ * \note This function doesn't check if partition_idx is valid.
+ */
+static uint32_t tfm_spm_partition_get_stack_top(uint32_t partition_idx)
+{
+ return g_spm_partition_db.partitions[partition_idx].memory_data->stack_top;
+}
+
+uint32_t tfm_spm_partition_get_running_partition_id(void)
+{
+ struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread();
+ struct spm_partition_desc_t *partition;
+ struct spm_partition_runtime_data_t *r_data;
+
+ r_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t,
+ sp_thrd);
+ partition = TFM_GET_CONTAINER_PTR(r_data, struct spm_partition_desc_t,
+ runtime_data);
+ return partition->static_data->partition_id;
+}
+
+static struct tfm_core_thread_t *
+ tfm_spm_partition_get_thread_info(uint32_t partition_idx)
+{
+ return &g_spm_partition_db.partitions[partition_idx].runtime_data.sp_thrd;
+}
+
+static tfm_core_thrd_entry_t
+ tfm_spm_partition_get_init_func(uint32_t partition_idx)
+{
+ return (tfm_core_thrd_entry_t)(g_spm_partition_db.partitions[partition_idx].
+ static_data->partition_init);
+}
+
+static uint32_t tfm_spm_partition_get_priority(uint32_t partition_idx)
+{
+ return g_spm_partition_db.partitions[partition_idx].static_data->
+ partition_priority;
+}
+
+int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
+ enum tfm_memory_access_e access,
+ uint32_t privileged)
+{
+ enum tfm_status_e err;
+
+ /* If len is zero, this indicates an empty buffer and base is ignored */
+ if (len == 0) {
+ return IPC_SUCCESS;
+ }
+
+ if (!buffer) {
+ return IPC_ERROR_BAD_PARAMETERS;
+ }
+
+ if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
+ return IPC_ERROR_MEMORY_CHECK;
+ }
+
+ if (access == TFM_MEMORY_ACCESS_RW) {
+ err = tfm_core_has_write_access_to_region(buffer, len, ns_caller,
+ privileged);
+ } else {
+ err = tfm_core_has_read_access_to_region(buffer, len, ns_caller,
+ privileged);
+ }
+ if (err == TFM_SUCCESS) {
+ return IPC_SUCCESS;
+ }
+
+ return IPC_ERROR_MEMORY_CHECK;
+}
+
+uint32_t tfm_spm_init(void)
+{
+ uint32_t i, j, num;
+ struct spm_partition_desc_t *partition;
+ struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
+ const struct tfm_spm_partition_platform_data_t **platform_data_p;
+
+ tfm_pool_init(conn_handle_pool,
+ POOL_BUFFER_SIZE(conn_handle_pool),
+ sizeof(struct tfm_conn_handle_t),
+ TFM_CONN_HANDLE_MAX_NUM);
+
+ /* Init partition first for it will be used when init service */
+ for (i = 0; i < g_spm_partition_db.partition_count; i++) {
+ partition = &g_spm_partition_db.partitions[i];
+
+ /* Check if the PSA framework version matches. */
+ if (partition->static_data->psa_framework_version !=
+ PSA_FRAMEWORK_VERSION) {
+ ERROR_MSG("Warning: PSA Framework Verison is not matched!");
+ continue;
+ }
+
+ platform_data_p = partition->platform_data_list;
+ if (platform_data_p != NULL) {
+ while ((*platform_data_p) != NULL) {
+ if (tfm_spm_hal_configure_default_isolation(i,
+ *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
+ tfm_core_panic();
+ }
+ ++platform_data_p;
+ }
+ }
+
+ if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) {
+ continue;
+ }
+
+ /* Add PSA_DOORBELL signal to assigned_signals */
+ partition->runtime_data.assigned_signals |= PSA_DOORBELL;
+
+ /* TODO: This can be optimized by generating the assigned signal
+ * in code generation time.
+ */
+ for (j = 0; j < tfm_core_irq_signals_count; ++j) {
+ if (tfm_core_irq_signals[j].partition_id ==
+ partition->static_data->partition_id) {
+ partition->runtime_data.assigned_signals |=
+ tfm_core_irq_signals[j].signal_value;
+ }
+ }
+
+ tfm_event_init(&partition->runtime_data.signal_evnt);
+ tfm_list_init(&partition->runtime_data.service_list);
+
+ pth = tfm_spm_partition_get_thread_info(i);
+ if (!pth) {
+ tfm_core_panic();
+ }
+
+ tfm_core_thrd_init(pth,
+ tfm_spm_partition_get_init_func(i),
+ NULL,
+ (uintptr_t)tfm_spm_partition_get_stack_top(i),
+ (uintptr_t)tfm_spm_partition_get_stack_bottom(i));
+
+ pth->prior = tfm_spm_partition_get_priority(i);
+
+ if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) {
+ p_ns_entry_thread = pth;
+ pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
+ }
+
+ /* Kick off */
+ if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
+ tfm_core_panic();
+ }
+ }
+
+ /* Init Service */
+ num = sizeof(service) / sizeof(struct tfm_spm_service_t);
+ for (i = 0; i < num; i++) {
+ service[i].service_db = &service_db[i];
+ partition =
+ tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
+ if (!partition) {
+ tfm_core_panic();
+ }
+ service[i].partition = partition;
+ partition->runtime_data.assigned_signals |= service[i].service_db->signal;
+
+ tfm_list_init(&service[i].handle_list);
+ tfm_list_add_tail(&partition->runtime_data.service_list,
+ &service[i].list);
+ }
+
+ /*
+ * All threads initialized, start the scheduler.
+ *
+ * NOTE:
+ * It is worthy to give the thread object to scheduler if the background
+ * context belongs to one of the threads. Here the background thread is the
+ * initialization thread who calls SPM SVC, which re-uses the non-secure
+ * entry thread's stack. After SPM initialization is done, this stack is
+ * cleaned up and the background context is never going to return. Tell
+ * the scheduler that the current thread is non-secure entry thread.
+ */
+ tfm_core_thrd_start_scheduler(p_ns_entry_thread);
+
+ return p_ns_entry_thread->arch_ctx.lr;
+}
+
+void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
+{
+#if TFM_LVL == 2
+ struct spm_partition_desc_t *p_next_partition;
+ struct spm_partition_runtime_data_t *r_data;
+ uint32_t is_privileged;
+#endif
+ struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread();
+ struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread();
+
+ if (pth_next != NULL && pth_curr != pth_next) {
+#if TFM_LVL == 2
+ r_data = TFM_GET_CONTAINER_PTR(pth_next,
+ struct spm_partition_runtime_data_t,
+ sp_thrd);
+ p_next_partition = TFM_GET_CONTAINER_PTR(r_data,
+ struct spm_partition_desc_t,
+ runtime_data);
+
+ if (p_next_partition->static_data->partition_flags &
+ SPM_PART_FLAG_PSA_ROT) {
+ is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
+ } else {
+ is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
+ }
+
+ tfm_spm_partition_change_privilege(is_privileged);
+#endif
+
+ tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
+ }
+
+ /*
+ * Handle pending mailbox message from NS in multi-core topology.
+ * Empty operation on single Armv8-M platform.
+ */
+ tfm_rpc_client_call_handler();
+}
+
+/*********************** SPM functions for PSA Client APIs *******************/
+
+uint32_t tfm_spm_psa_framework_version(void)
+{
+ return tfm_spm_client_psa_framework_version();
+}
+
+uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller)
+{
+ uint32_t sid;
+
+ TFM_CORE_ASSERT(args != NULL);
+ sid = (uint32_t)args[0];
+
+ return tfm_spm_client_psa_version(sid, ns_caller);
+}
+
+psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller)
+{
+ uint32_t sid;
+ uint32_t version;
+
+ TFM_CORE_ASSERT(args != NULL);
+ sid = (uint32_t)args[0];
+ version = (uint32_t)args[1];
+
+ return tfm_spm_client_psa_connect(sid, version, ns_caller);
+}
+
+psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr)
+{
+ psa_handle_t handle;
+ psa_invec *inptr;
+ psa_outvec *outptr;
+ size_t in_num, out_num;
+ struct spm_partition_desc_t *partition = NULL;
+ uint32_t privileged;
+ int32_t type;
+ struct tfm_control_parameter_t ctrl_param;
+
+ TFM_CORE_ASSERT(args != NULL);
+ handle = (psa_handle_t)args[0];
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+ privileged = tfm_spm_partition_get_privileged_mode(
+ partition->static_data->partition_flags);
+
+ /*
+ * Read parameters from the arguments. It is a fatal error if the
+ * memory reference for buffer is invalid or not readable.
+ */
+ if (tfm_memory_check((const void *)args[1],
+ sizeof(struct tfm_control_parameter_t), ns_caller,
+ TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ tfm_core_util_memcpy(&ctrl_param,
+ (const void *)args[1],
+ sizeof(ctrl_param));
+
+ type = ctrl_param.type;
+ in_num = ctrl_param.in_len;
+ out_num = ctrl_param.out_len;
+ inptr = (psa_invec *)args[2];
+ outptr = (psa_outvec *)args[3];
+
+ /* The request type must be zero or positive. */
+ if (type < 0) {
+ tfm_core_panic();
+ }
+
+ return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num,
+ ns_caller, privileged);
+}
+
+void tfm_spm_psa_close(uint32_t *args, bool ns_caller)
+{
+ psa_handle_t handle;
+
+ TFM_CORE_ASSERT(args != NULL);
+ handle = args[0];
+
+ tfm_spm_client_psa_close(handle, ns_caller);
+}
+
+uint32_t tfm_spm_get_lifecycle_state(void)
+{
+ /*
+ * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
+ * implemented in the future.
+ */
+ return PSA_LIFECYCLE_UNKNOWN;
+}
+
+/********************* SPM functions for PSA Service APIs ********************/
+
+psa_signal_t tfm_spm_psa_wait(uint32_t *args)
+{
+ psa_signal_t signal_mask;
+ uint32_t timeout;
+ struct spm_partition_desc_t *partition = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ signal_mask = (psa_signal_t)args[0];
+ timeout = args[1];
+
+ /*
+ * Timeout[30:0] are reserved for future use.
+ * SPM must ignore the value of RES.
+ */
+ timeout &= PSA_TIMEOUT_MASK;
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
+ * signals.
+ */
+ if ((partition->runtime_data.assigned_signals & signal_mask) == 0) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Expected signals are included in signal wait mask, ignored signals
+ * should not be set and affect caller thread state. Save this mask for
+ * further checking while signals are ready to be set.
+ */
+ partition->runtime_data.signal_mask = signal_mask;
+
+ /*
+ * tfm_event_wait() blocks the caller thread if no signals are available.
+ * In this case, the return value of this function is temporary set into
+ * runtime context. After new signal(s) are available, the return value
+ * is updated with the available signal(s) and blocked thread gets to run.
+ */
+ if (timeout == PSA_BLOCK &&
+ (partition->runtime_data.signals & signal_mask) == 0) {
+ tfm_event_wait(&partition->runtime_data.signal_evnt);
+ }
+
+ return partition->runtime_data.signals & signal_mask;
+}
+
+psa_status_t tfm_spm_psa_get(uint32_t *args)
+{
+ psa_signal_t signal;
+ psa_msg_t *msg = NULL;
+ struct tfm_spm_service_t *service = NULL;
+ struct tfm_msg_body_t *tmp_msg = NULL;
+ struct spm_partition_desc_t *partition = NULL;
+ uint32_t privileged;
+
+ TFM_CORE_ASSERT(args != NULL);
+ signal = (psa_signal_t)args[0];
+ msg = (psa_msg_t *)args[1];
+
+ /*
+ * Only one message could be retrieved every time for psa_get(). It is a
+ * fatal error if the input signal has more than a signal bit set.
+ */
+ if (!tfm_is_one_bit_set(signal)) {
+ tfm_core_panic();
+ }
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+ privileged = tfm_spm_partition_get_privileged_mode(
+ partition->static_data->partition_flags);
+
+ /*
+ * Write the message to the service buffer. It is a fatal error if the
+ * input msg pointer is not a valid memory reference or not read-write.
+ */
+ if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
+ privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if the caller call psa_get() when no message has
+ * been set. The caller must call this function after an RoT Service signal
+ * is returned by psa_wait().
+ */
+ if (partition->runtime_data.signals == 0) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if the RoT Service signal is not currently asserted.
+ */
+ if ((partition->runtime_data.signals & signal) == 0) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Get RoT service by signal from partition. It is a fatal error if getting
+ * failed, which means the input signal is not correspond to an RoT service.
+ */
+ service = tfm_spm_get_service_by_signal(partition, signal);
+ if (!service) {
+ tfm_core_panic();
+ }
+
+ tmp_msg = tfm_msg_dequeue(&service->msg_queue);
+ if (!tmp_msg) {
+ return PSA_ERROR_DOES_NOT_EXIST;
+ }
+
+ ((struct tfm_conn_handle_t *)(tmp_msg->handle))->status =
+ TFM_HANDLE_STATUS_ACTIVE;
+
+ tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
+
+ /*
+ * There may be multiple messages for this RoT Service signal, do not clear
+ * its mask until no remaining message.
+ */
+ if (tfm_msg_queue_is_empty(&service->msg_queue)) {
+ partition->runtime_data.signals &= ~signal;
+ }
+
+ return PSA_SUCCESS;
+}
+
+void tfm_spm_psa_set_rhandle(uint32_t *args)
+{
+ psa_handle_t msg_handle;
+ void *rhandle = NULL;
+ struct tfm_msg_body_t *msg = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ msg_handle = (psa_handle_t)args[0];
+ rhandle = (void *)args[1];
+
+ /* It is a fatal error if message handle is invalid */
+ msg = tfm_spm_get_msg_from_handle(msg_handle);
+ if (!msg) {
+ tfm_core_panic();
+ }
+
+ msg->msg.rhandle = rhandle;
+
+ /* Store reverse handle for following client calls. */
+ tfm_spm_set_rhandle(msg->service, msg->handle, rhandle);
+}
+
+size_t tfm_spm_psa_read(uint32_t *args)
+{
+ psa_handle_t msg_handle;
+ uint32_t invec_idx;
+ void *buffer = NULL;
+ size_t num_bytes;
+ size_t bytes;
+ struct tfm_msg_body_t *msg = NULL;
+ uint32_t privileged;
+ struct spm_partition_desc_t *partition = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ msg_handle = (psa_handle_t)args[0];
+ invec_idx = args[1];
+ buffer = (void *)args[2];
+ num_bytes = (size_t)args[3];
+
+ /* It is a fatal error if message handle is invalid */
+ msg = tfm_spm_get_msg_from_handle(msg_handle);
+ if (!msg) {
+ tfm_core_panic();
+ }
+
+ partition = msg->service->partition;
+ privileged = tfm_spm_partition_get_privileged_mode(
+ partition->static_data->partition_flags);
+
+ /*
+ * It is a fatal error if message handle does not refer to a request
+ * message
+ */
+ if (msg->msg.type < PSA_IPC_CALL) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if invec_idx is equal to or greater than
+ * PSA_MAX_IOVEC
+ */
+ if (invec_idx >= PSA_MAX_IOVEC) {
+ tfm_core_panic();
+ }
+
+ /* There was no remaining data in this input vector */
+ if (msg->msg.in_size[invec_idx] == 0) {
+ return 0;
+ }
+
+ /*
+ * Copy the client data to the service buffer. It is a fatal error
+ * if the memory reference for buffer is invalid or not read-write.
+ */
+ if (tfm_memory_check(buffer, num_bytes, false,
+ TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ bytes = num_bytes > msg->msg.in_size[invec_idx] ?
+ msg->msg.in_size[invec_idx] : num_bytes;
+
+ tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes);
+
+ /* There maybe some remaining data */
+ msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
+ msg->msg.in_size[invec_idx] -= bytes;
+
+ return bytes;
+}
+
+size_t tfm_spm_psa_skip(uint32_t *args)
+{
+ psa_handle_t msg_handle;
+ uint32_t invec_idx;
+ size_t num_bytes;
+ struct tfm_msg_body_t *msg = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ msg_handle = (psa_handle_t)args[0];
+ invec_idx = args[1];
+ num_bytes = (size_t)args[2];
+
+ /* It is a fatal error if message handle is invalid */
+ msg = tfm_spm_get_msg_from_handle(msg_handle);
+ if (!msg) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if message handle does not refer to a request
+ * message
+ */
+ if (msg->msg.type < PSA_IPC_CALL) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if invec_idx is equal to or greater than
+ * PSA_MAX_IOVEC
+ */
+ if (invec_idx >= PSA_MAX_IOVEC) {
+ tfm_core_panic();
+ }
+
+ /* There was no remaining data in this input vector */
+ if (msg->msg.in_size[invec_idx] == 0) {
+ return 0;
+ }
+
+ /*
+ * If num_bytes is greater than the remaining size of the input vector then
+ * the remaining size of the input vector is used.
+ */
+ if (num_bytes > msg->msg.in_size[invec_idx]) {
+ num_bytes = msg->msg.in_size[invec_idx];
+ }
+
+ /* There maybe some remaining data */
+ msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
+ num_bytes;
+ msg->msg.in_size[invec_idx] -= num_bytes;
+
+ return num_bytes;
+}
+
+void tfm_spm_psa_write(uint32_t *args)
+{
+ psa_handle_t msg_handle;
+ uint32_t outvec_idx;
+ void *buffer = NULL;
+ size_t num_bytes;
+ struct tfm_msg_body_t *msg = NULL;
+ uint32_t privileged;
+ struct spm_partition_desc_t *partition = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ msg_handle = (psa_handle_t)args[0];
+ outvec_idx = args[1];
+ buffer = (void *)args[2];
+ num_bytes = (size_t)args[3];
+
+ /* It is a fatal error if message handle is invalid */
+ msg = tfm_spm_get_msg_from_handle(msg_handle);
+ if (!msg) {
+ tfm_core_panic();
+ }
+
+ partition = msg->service->partition;
+ privileged = tfm_spm_partition_get_privileged_mode(
+ partition->static_data->partition_flags);
+
+ /*
+ * It is a fatal error if message handle does not refer to a request
+ * message
+ */
+ if (msg->msg.type < PSA_IPC_CALL) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if outvec_idx is equal to or greater than
+ * PSA_MAX_IOVEC
+ */
+ if (outvec_idx >= PSA_MAX_IOVEC) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if the call attempts to write data past the end of
+ * the client output vector
+ */
+ if (num_bytes > msg->msg.out_size[outvec_idx] -
+ msg->outvec[outvec_idx].len) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Copy the service buffer to client outvecs. It is a fatal error
+ * if the memory reference for buffer is invalid or not readable.
+ */
+ if (tfm_memory_check(buffer, num_bytes, false,
+ TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base +
+ msg->outvec[outvec_idx].len, buffer, num_bytes);
+
+ /* Update the write number */
+ msg->outvec[outvec_idx].len += num_bytes;
+}
+
+static void update_caller_outvec_len(struct tfm_msg_body_t *msg)
+{
+ uint32_t i;
+
+ /*
+ * FixeMe: abstract these part into dedicated functions to avoid
+ * accessing thread context in psa layer
+ */
+ /* If it is a NS request via RPC, the owner of this message is not set */
+ if (!is_tfm_rpc_msg(msg)) {
+ TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
+ }
+
+ for (i = 0; i < PSA_MAX_IOVEC; i++) {
+ if (msg->msg.out_size[i] == 0) {
+ continue;
+ }
+
+ TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
+
+ msg->caller_outvec[i].len = msg->outvec[i].len;
+ }
+}
+
+void tfm_spm_psa_reply(uint32_t *args)
+{
+ psa_handle_t msg_handle;
+ psa_status_t status;
+ struct tfm_spm_service_t *service = NULL;
+ struct tfm_msg_body_t *msg = NULL;
+ int32_t ret = PSA_SUCCESS;
+
+ TFM_CORE_ASSERT(args != NULL);
+ msg_handle = (psa_handle_t)args[0];
+ status = (psa_status_t)args[1];
+
+ /* It is a fatal error if message handle is invalid */
+ msg = tfm_spm_get_msg_from_handle(msg_handle);
+ if (!msg) {
+ tfm_core_panic();
+ }
+
+ /*
+ * RoT Service information is needed in this function, stored it in message
+ * body structure. Only two parameters are passed in this function: handle
+ * and status, so it is useful and simply to do like this.
+ */
+ service = msg->service;
+ if (!service) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Three type of message are passed in this function: CONNECTION, REQUEST,
+ * DISCONNECTION. It needs to process differently for each type.
+ */
+ switch (msg->msg.type) {
+ case PSA_IPC_CONNECT:
+ /*
+ * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
+ * input status is PSA_SUCCESS. Others return values are based on the
+ * input status.
+ */
+ if (status == PSA_SUCCESS) {
+ ret = tfm_spm_to_user_handle(msg->handle);
+ } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
+ /* Refuse the client connection, indicating a permanent error. */
+ tfm_spm_free_conn_handle(service, msg->handle);
+ ret = PSA_ERROR_CONNECTION_REFUSED;
+ } else if (status == PSA_ERROR_CONNECTION_BUSY) {
+ /* Fail the client connection, indicating a transient error. */
+ ret = PSA_ERROR_CONNECTION_BUSY;
+ } else {
+ tfm_core_panic();
+ }
+ break;
+ case PSA_IPC_DISCONNECT:
+ /* Service handle is not used anymore */
+ tfm_spm_free_conn_handle(service, msg->handle);
+
+ /*
+ * If the message type is PSA_IPC_DISCONNECT, then the status code is
+ * ignored
+ */
+ break;
+ default:
+ if (msg->msg.type >= PSA_IPC_CALL) {
+ /* Reply to a request message. Return values are based on status */
+ ret = status;
+ /*
+ * The total number of bytes written to a single parameter must be
+ * reported to the client by updating the len member of the
+ * psa_outvec structure for the parameter before returning from
+ * psa_call().
+ */
+ update_caller_outvec_len(msg);
+ } else {
+ tfm_core_panic();
+ }
+ }
+
+ if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
+ /*
+ * If the source of the programmer error is a Secure Partition, the SPM
+ * must panic the Secure Partition in response to a PROGRAMMER ERROR.
+ */
+ if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
+ ((struct tfm_conn_handle_t *)(msg->handle))->status =
+ TFM_HANDLE_STATUS_CONNECT_ERROR;
+ } else {
+ tfm_core_panic();
+ }
+ } else {
+ ((struct tfm_conn_handle_t *)(msg->handle))->status =
+ TFM_HANDLE_STATUS_IDLE;
+ }
+
+ if (is_tfm_rpc_msg(msg)) {
+ tfm_rpc_client_call_reply(msg, ret);
+ } else {
+ tfm_event_wake(&msg->ack_evnt, ret);
+ }
+}
+
+/**
+ * \brief notify the partition with the signal.
+ *
+ * \param[in] partition_id The ID of the partition to be notified.
+ * \param[in] signal The signal that the partition is to be notified
+ * with.
+ *
+ * \retval void Success.
+ * \retval "Does not return" If partition_id is invalid.
+ */
+static void notify_with_signal(int32_t partition_id, psa_signal_t signal)
+{
+ struct spm_partition_desc_t *partition = NULL;
+
+ /*
+ * The value of partition_id must be greater than zero as the target of
+ * notification must be a Secure Partition, providing a Non-secure
+ * Partition ID is a fatal error.
+ */
+ if (!TFM_CLIENT_ID_IS_S(partition_id)) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if partition_id does not correspond to a Secure
+ * Partition.
+ */
+ partition = tfm_spm_get_partition_by_id(partition_id);
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ partition->runtime_data.signals |= signal;
+
+ /*
+ * The target partition may be blocked with waiting for signals after
+ * called psa_wait(). Set the return value with the available signals
+ * before wake it up with tfm_event_signal().
+ */
+ tfm_event_wake(&partition->runtime_data.signal_evnt,
+ partition->runtime_data.signals &
+ partition->runtime_data.signal_mask);
+}
+
+void tfm_spm_psa_notify(uint32_t *args)
+{
+ int32_t partition_id;
+
+ TFM_CORE_ASSERT(args != NULL);
+ partition_id = (int32_t)args[0];
+
+ notify_with_signal(partition_id, PSA_DOORBELL);
+}
+
+/**
+ * \brief assert signal for a given IRQ line.
+ *
+ * \param[in] partition_id The ID of the partition which handles this IRQ
+ * \param[in] signal The signal associated with this IRQ
+ * \param[in] irq_line The number of the IRQ line
+ *
+ * \retval void Success.
+ * \retval "Does not return" Partition ID is invalid
+ */
+void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
+ IRQn_Type irq_line)
+{
+ tfm_spm_hal_disable_irq(irq_line);
+ notify_with_signal(partition_id, signal);
+}
+
+void tfm_spm_psa_clear(void)
+{
+ struct spm_partition_desc_t *partition = NULL;
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is a fatal error if the Secure Partition's doorbell signal is not
+ * currently asserted.
+ */
+ if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) {
+ tfm_core_panic();
+ }
+ partition->runtime_data.signals &= ~PSA_DOORBELL;
+}
+
+void tfm_spm_psa_panic(void)
+{
+ /*
+ * PSA FF recommends that the SPM causes the system to restart when a secure
+ * partition panics.
+ */
+ tfm_spm_hal_system_reset();
+}
+
+/**
+ * \brief Return the IRQ line number associated with a signal
+ *
+ * \param[in] partition_id The ID of the partition in which we look for
+ * the signal.
+ * \param[in] signal The signal we do the query for.
+ * \param[out] irq_line The irq line associated with signal
+ *
+ * \retval IPC_SUCCESS Execution successful, irq_line contains a valid
+ * value.
+ * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the
+ * signal. irq_line is unchanged.
+ */
+static int32_t get_irq_line_for_signal(int32_t partition_id,
+ psa_signal_t signal,
+ IRQn_Type *irq_line)
+{
+ size_t i;
+
+ for (i = 0; i < tfm_core_irq_signals_count; ++i) {
+ if (tfm_core_irq_signals[i].partition_id == partition_id &&
+ tfm_core_irq_signals[i].signal_value == signal) {
+ *irq_line = tfm_core_irq_signals[i].irq_line;
+ return IPC_SUCCESS;
+ }
+ }
+ return IPC_ERROR_GENERIC;
+}
+
+void tfm_spm_psa_eoi(uint32_t *args)
+{
+ psa_signal_t irq_signal;
+ IRQn_Type irq_line = (IRQn_Type) 0;
+ int32_t ret;
+ struct spm_partition_desc_t *partition = NULL;
+
+ TFM_CORE_ASSERT(args != NULL);
+ irq_signal = (psa_signal_t)args[0];
+
+ /* It is a fatal error if passed signal indicates more than one signals. */
+ if (!tfm_is_one_bit_set(irq_signal)) {
+ tfm_core_panic();
+ }
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ ret = get_irq_line_for_signal(partition->static_data->partition_id,
+ irq_signal, &irq_line);
+ /* It is a fatal error if passed signal is not an interrupt signal. */
+ if (ret != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ /* It is a fatal error if passed signal is not currently asserted */
+ if ((partition->runtime_data.signals & irq_signal) == 0) {
+ tfm_core_panic();
+ }
+
+ partition->runtime_data.signals &= ~irq_signal;
+
+ tfm_spm_hal_clear_pending_irq(irq_line);
+ tfm_spm_hal_enable_irq(irq_line);
+}
+
+void tfm_spm_enable_irq(uint32_t *args)
+{
+ struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
+ psa_signal_t irq_signal = svc_ctx->r0;
+ IRQn_Type irq_line = (IRQn_Type) 0;
+ int32_t ret;
+ struct spm_partition_desc_t *partition = NULL;
+
+ /* It is a fatal error if passed signal indicates more than one signals. */
+ if (!tfm_is_one_bit_set(irq_signal)) {
+ tfm_core_panic();
+ }
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ ret = get_irq_line_for_signal(partition->static_data->partition_id,
+ irq_signal, &irq_line);
+ /* It is a fatal error if passed signal is not an interrupt signal. */
+ if (ret != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ tfm_spm_hal_enable_irq(irq_line);
+}
+
+void tfm_spm_disable_irq(uint32_t *args)
+{
+ struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
+ psa_signal_t irq_signal = svc_ctx->r0;
+ IRQn_Type irq_line = (IRQn_Type) 0;
+ int32_t ret;
+ struct spm_partition_desc_t *partition = NULL;
+
+ /* It is a fatal error if passed signal indicates more than one signals. */
+ if (!tfm_is_one_bit_set(irq_signal)) {
+ tfm_core_panic();
+ }
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ ret = get_irq_line_for_signal(partition->static_data->partition_id,
+ irq_signal, &irq_line);
+ /* It is a fatal error if passed signal is not an interrupt signal. */
+ if (ret != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ tfm_spm_hal_disable_irq(irq_line);
+}
+
+void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp,
+ uint32_t *p_ctx, uint32_t exc_return,
+ bool ns_caller)
+{
+ uintptr_t stacked_ctx_pos;
+
+ if (ns_caller) {
+ /*
+ * The background IRQ can't be supported, since if SP is executing,
+ * the preempted context of SP can be different with the one who
+ * preempts veneer.
+ */
+ if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) {
+ tfm_core_panic();
+ }
+
+ /*
+ * It is non-secure caller, check if veneer stack contains
+ * multiple contexts.
+ */
+ stacked_ctx_pos = (uintptr_t)p_ctx +
+ sizeof(struct tfm_state_context_t) +
+ TFM_VENEER_STACK_GUARD_SIZE;
+
+ if (is_stack_alloc_fp_space(exc_return)) {
+#if defined (__FPU_USED) && (__FPU_USED == 1U)
+ if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
+ stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
+ sizeof(uint32_t);
+ }
+#endif
+ stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
+ }
+
+ if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) {
+ tfm_core_panic();
+ }
+ } else if (p_cur_sp->static_data->partition_id <= 0) {
+ tfm_core_panic();
+ }
+}
+
+void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
+{
+ uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
+ uint32_t running_partition_flags = 0;
+ const struct spm_partition_desc_t *partition = NULL;
+
+ /* Check permissions on request type basis */
+
+ switch (svc_ctx->r0) {
+ case TFM_SPM_REQUEST_RESET_VOTE:
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+ running_partition_flags = partition->static_data->partition_flags;
+
+ /* Currently only PSA Root of Trust services are allowed to make Reset
+ * vote request
+ */
+ if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
+ *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
+ }
+
+ /* FixMe: this is a placeholder for checks to be performed before
+ * allowing execution of reset
+ */
+ *res_ptr = (uint32_t)TFM_SUCCESS;
+
+ break;
+ default:
+ *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
+ }
+}
diff --git a/secure_fw/spm/model_ipc/spm_psa_client_call.c b/secure_fw/spm/model_ipc/spm_psa_client_call.c
new file mode 100644
index 0000000..58aae56
--- /dev/null
+++ b/secure_fw/spm/model_ipc/spm_psa_client_call.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include "psa/service.h"
+#include "tfm/spm_api.h"
+#include "tfm_core_utils.h"
+#include "tfm_internal_defines.h"
+#include "tfm_memory_utils.h"
+#include "tfm_message_queue.h"
+#include "spm_psa_client_call.h"
+#include "tfm_utils.h"
+#include "tfm_wait.h"
+#include "tfm_nspm.h"
+
+uint32_t tfm_spm_client_psa_framework_version(void)
+{
+ return PSA_FRAMEWORK_VERSION;
+}
+
+uint32_t tfm_spm_client_psa_version(uint32_t sid, bool ns_caller)
+{
+ struct tfm_spm_service_t *service;
+
+ /*
+ * It should return PSA_VERSION_NONE if the RoT Service is not
+ * implemented.
+ */
+ service = tfm_spm_get_service_by_sid(sid);
+ if (!service) {
+ return PSA_VERSION_NONE;
+ }
+
+ /*
+ * It should return PSA_VERSION_NONE if the caller is not authorized
+ * to access the RoT Service.
+ */
+ if (tfm_spm_check_authorization(sid, service, ns_caller) != IPC_SUCCESS) {
+ return PSA_VERSION_NONE;
+ }
+
+ return service->service_db->version;
+}
+
+psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version,
+ bool ns_caller)
+{
+ struct tfm_spm_service_t *service;
+ struct tfm_msg_body_t *msg;
+ struct tfm_conn_handle_t *connect_handle;
+ int32_t client_id;
+
+ /* It is a fatal error if the RoT Service does not exist on the platform */
+ service = tfm_spm_get_service_by_sid(sid);
+ if (!service) {
+ tfm_core_panic();
+ }
+
+ if (ns_caller) {
+ client_id = tfm_nspm_get_current_client_id();
+ } else {
+ client_id = tfm_spm_partition_get_running_partition_id();
+ }
+
+ /*
+ * It is a fatal error if the caller is not authorized to access the RoT
+ * Service.
+ */
+ if (tfm_spm_check_authorization(sid, service, ns_caller) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Create connection handle here since it is possible to return the error
+ * code to client when creation fails.
+ */
+ connect_handle = tfm_spm_create_conn_handle(service, client_id);
+ if (!connect_handle) {
+ return PSA_ERROR_CONNECTION_BUSY;
+ }
+
+ /*
+ * It is a fatal error if the version of the RoT Service requested is not
+ * supported on the platform.
+ */
+ if (tfm_spm_check_client_version(service, version) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
+ if (!msg) {
+ /* Have no enough resource to create message */
+ return PSA_ERROR_CONNECTION_BUSY;
+ }
+
+ /* No input or output needed for connect message */
+ tfm_spm_fill_msg(msg, service, connect_handle, PSA_IPC_CONNECT,
+ client_id, NULL, 0, NULL, 0, NULL);
+
+ /*
+ * Send message and wake up the SP who is waiting on message queue,
+ * and scheduler triggered
+ */
+ tfm_spm_send_event(service, msg);
+
+ return PSA_SUCCESS;
+}
+
+psa_status_t tfm_spm_client_psa_call(psa_handle_t handle, int32_t type,
+ const psa_invec *inptr, size_t in_num,
+ psa_outvec *outptr, size_t out_num,
+ bool ns_caller, uint32_t privileged)
+{
+ psa_invec invecs[PSA_MAX_IOVEC];
+ psa_outvec outvecs[PSA_MAX_IOVEC];
+ struct tfm_conn_handle_t *conn_handle;
+ struct tfm_spm_service_t *service;
+ struct tfm_msg_body_t *msg;
+ int i, j;
+ int32_t client_id;
+
+ /* It is a fatal error if in_len + out_len > PSA_MAX_IOVEC. */
+ if ((in_num > PSA_MAX_IOVEC) ||
+ (out_num > PSA_MAX_IOVEC) ||
+ (in_num + out_num > PSA_MAX_IOVEC)) {
+ tfm_core_panic();
+ }
+
+ if (ns_caller) {
+ client_id = tfm_nspm_get_current_client_id();
+ } else {
+ client_id = tfm_spm_partition_get_running_partition_id();
+ }
+
+ conn_handle = tfm_spm_to_handle_instance(handle);
+ /* It is a fatal error if an invalid handle was passed. */
+ if (tfm_spm_validate_conn_handle(conn_handle, client_id) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+ service = conn_handle->service;
+ if (!service) {
+ /* FixMe: Need to implement one mechanism to resolve this failure. */
+ tfm_core_panic();
+ }
+
+ /* It is a fatal error if the connection is currently handling a request. */
+ if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
+ * has been terminated by the RoT Service.
+ */
+ if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
+ return PSA_ERROR_PROGRAMMER_ERROR;
+ }
+
+ /*
+ * Read client invecs from the wrap input vector. It is a fatal error
+ * if the memory reference for the wrap input vector is invalid or not
+ * readable.
+ */
+ if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
+ TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ /*
+ * Read client outvecs from the wrap output vector and will update the
+ * actual length later. It is a fatal error if the memory reference for
+ * the wrap output vector is invalid or not read-write.
+ */
+ if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
+ TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+
+ tfm_core_util_memset(invecs, 0, sizeof(invecs));
+ tfm_core_util_memset(outvecs, 0, sizeof(outvecs));
+
+ /* Copy the address out to avoid TOCTOU attacks. */
+ tfm_core_util_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
+ tfm_core_util_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
+
+ /*
+ * For client input vector, it is a fatal error if the provided payload
+ * memory reference was invalid or not readable.
+ */
+ for (i = 0; i < in_num; i++) {
+ if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
+ TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+ }
+
+ /*
+ * Clients must never overlap input parameters because of the risk of a
+ * double-fetch inconsistency.
+ * Overflow is checked in tfm_memory_check functions.
+ */
+ for (i = 0; i + 1 < in_num; i++) {
+ for (j = i+1; j < in_num; j++) {
+ if (!((char *) invecs[j].base + invecs[j].len <=
+ (char *) invecs[i].base ||
+ (char *) invecs[j].base >=
+ (char *) invecs[i].base + invecs[i].len)) {
+ tfm_core_panic();
+ }
+ }
+ }
+
+ /*
+ * For client output vector, it is a fatal error if the provided payload
+ * memory reference was invalid or not read-write.
+ */
+ for (i = 0; i < out_num; i++) {
+ if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
+ ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+ }
+
+ /*
+ * FixMe: Need to check if the message is unrecognized by the RoT
+ * Service or incorrectly formatted.
+ */
+ msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
+ if (!msg) {
+ /* FixMe: Need to implement one mechanism to resolve this failure. */
+ tfm_core_panic();
+ }
+
+ tfm_spm_fill_msg(msg, service, conn_handle, type, client_id,
+ invecs, in_num, outvecs, out_num, outptr);
+
+ /*
+ * Send message and wake up the SP who is waiting on message queue,
+ * and scheduler triggered
+ */
+ if (tfm_spm_send_event(service, msg) != IPC_SUCCESS) {
+ /* FixMe: Need to refine failure process here. */
+ tfm_core_panic();
+ }
+ return PSA_SUCCESS;
+}
+
+void tfm_spm_client_psa_close(psa_handle_t handle, bool ns_caller)
+{
+ struct tfm_spm_service_t *service;
+ struct tfm_msg_body_t *msg;
+ struct tfm_conn_handle_t *conn_handle;
+ int32_t client_id;
+
+ /* It will have no effect if called with the NULL handle */
+ if (handle == PSA_NULL_HANDLE) {
+ return;
+ }
+
+ if (ns_caller) {
+ client_id = tfm_nspm_get_current_client_id();
+ } else {
+ client_id = tfm_spm_partition_get_running_partition_id();
+ }
+
+ conn_handle = tfm_spm_to_handle_instance(handle);
+ /*
+ * It is a fatal error if an invalid handle was provided that is not the
+ * null handle.
+ */
+ if (tfm_spm_validate_conn_handle(conn_handle, client_id) != IPC_SUCCESS) {
+ tfm_core_panic();
+ }
+ service = conn_handle->service;
+ if (!service) {
+ /* FixMe: Need to implement one mechanism to resolve this failure. */
+ tfm_core_panic();
+ }
+
+ msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
+ if (!msg) {
+ /* FixMe: Need to implement one mechanism to resolve this failure. */
+ tfm_core_panic();
+ }
+
+ /* It is a fatal error if the connection is currently handling a request. */
+ if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
+ tfm_core_panic();
+ }
+
+ /* No input or output needed for close message */
+ tfm_spm_fill_msg(msg, service, conn_handle, PSA_IPC_DISCONNECT, client_id,
+ NULL, 0, NULL, 0, NULL);
+
+ /*
+ * Send message and wake up the SP who is waiting on message queue,
+ * and scheduler triggered
+ */
+ tfm_spm_send_event(service, msg);
+}
diff --git a/secure_fw/spm/model_ipc/spm_psa_client_call.h b/secure_fw/spm/model_ipc/spm_psa_client_call.h
new file mode 100644
index 0000000..5146e76
--- /dev/null
+++ b/secure_fw/spm/model_ipc/spm_psa_client_call.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef __TFM_PSA_CLIENT_CALL_H__
+#define __TFM_PSA_CLIENT_CALL_H__
+
+#include <stdint.h>
+#include "psa/client.h"
+
+/* Common handlers for PSA client calls */
+
+/**
+ * \brief handler for \ref psa_framework_version.
+ *
+ * \return version The version of the PSA Framework implementation
+ * that is providing the runtime services.
+ */
+uint32_t tfm_spm_client_psa_framework_version(void);
+
+/**
+ * \brief handler for \ref psa_version.
+ *
+ * \param[in] sid RoT Service identity.
+ * \param[in] ns_caller If 'true', call from non-secure client.
+ * Otherwise from secure client.
+ *
+ * \retval PSA_VERSION_NONE The RoT Service is not implemented, or the
+ * caller is not permitted to access the service.
+ * \retval > 0 The version of the implemented RoT Service.
+ */
+uint32_t tfm_spm_client_psa_version(uint32_t sid, bool ns_caller);
+
+/**
+ * \brief handler for \ref psa_connect.
+ *
+ * \param[in] sid RoT Service identity.
+ * \param[in] version The version of the RoT Service.
+ * \param[in] ns_caller If 'true', call from non-secure client.
+ * Otherwise from secure client.
+ *
+ * \retval PSA_SUCCESS Success.
+ * \retval PSA_ERROR_CONNECTION_REFUSED The SPM or RoT Service has refused the
+ * connection.
+ * \retval PSA_ERROR_CONNECTION_BUSY The SPM or RoT Service cannot make the
+ * connection at the moment.
+ * \retval "Does not return" The RoT Service ID and version are not
+ * supported, or the caller is not permitted to
+ * access the service.
+ */
+psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version,
+ bool ns_caller);
+
+/**
+ * \brief handler for \ref psa_call.
+ *
+ * \param[in] handle Service handle to the established connection,
+ * \ref psa_handle_t
+ * \param[in] type The request type.
+ * Must be zero( \ref PSA_IPC_CALL) or positive.
+ * \param[in] inptr Array of input psa_invec structures.
+ * \ref psa_invec
+ * \param[in] in_num Number of input psa_invec structures.
+ * \ref psa_invec
+ * \param[in] outptr Array of output psa_outvec structures.
+ * \ref psa_outvec
+ * \param[in] out_num Number of outut psa_outvec structures.
+ * \ref psa_outvec
+ * \param[in] ns_caller If 'true', call from non-secure client.
+ * Otherwise from secure client.
+ * \param[in] privileged Privileged mode or unprivileged mode:
+ * \ref TFM_PARTITION_UNPRIVILEGED_MODE
+ * \ref TFM_PARTITION_PRIVILEGED_MODE
+ *
+ * \retval PSA_SUCCESS Success.
+ * \retval "Does not return" The call is invalid, one or more of the
+ * following are true:
+ * \arg An invalid handle was passed.
+ * \arg The connection is already handling a request.
+ * \arg An invalid memory reference was provided.
+ * \arg in_num + out_num > PSA_MAX_IOVEC.
+ * \arg The message is unrecognized by the RoT
+ * Service or incorrectly formatted.
+ */
+psa_status_t tfm_spm_client_psa_call(psa_handle_t handle, int32_t type,
+ const psa_invec *inptr, size_t in_num,
+ psa_outvec *outptr, size_t out_num,
+ bool ns_caller, uint32_t privileged);
+
+/**
+ * \brief handler for \ref psa_close.
+ *
+ * \param[in] handle Service handle to the connection to be closed,
+ * \ref psa_handle_t
+ * \param[in] ns_caller If 'true', call from non-secure client.
+ * Otherwise from secure client.
+ *
+ * \retval void Success.
+ * \retval "Does not return" The call is invalid, one or more of the
+ * following are true:
+ * \arg An invalid handle was provided that is not
+ * the null handle.
+ * \arg The connection is handling a request.
+ */
+void tfm_spm_client_psa_close(psa_handle_t handle, bool ns_caller);
+
+#endif
diff --git a/secure_fw/spm/model_ipc/tfm_core_svcalls_ipc.c b/secure_fw/spm/model_ipc/tfm_core_svcalls_ipc.c
new file mode 100644
index 0000000..2c07bf4
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_core_svcalls_ipc.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <string.h>
+#include "region.h"
+#include "tfm/spm_api.h"
+#include "tfm/spm_db.h"
+#include "tfm_api.h"
+#include "tfm_arch.h"
+#include "tfm_core_trustzone.h"
+#include "tfm_internal.h"
+#include "tfm_svcalls.h"
+#include "tfm_utils.h"
+#include "tfm/tfm_core_svc.h"
+
+/* The section names come from the scatter file */
+REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit);
+
+#ifdef PLATFORM_SVC_HANDLERS
+extern int32_t platform_svc_handlers(tfm_svc_number_t svc_num,
+ uint32_t *ctx, uint32_t lr);
+#endif
+
+static int32_t SVC_Handler_IPC(tfm_svc_number_t svc_num, uint32_t *ctx,
+ uint32_t lr)
+{
+ bool ns_caller = false;
+ struct spm_partition_desc_t *partition = NULL;
+ uint32_t veneer_base =
+ (uint32_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
+ uint32_t veneer_limit =
+ (uint32_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit);
+
+ /*
+ * The caller security attribute detection bases on LR of state context.
+ * However, if SP calls PSA APIs based on its customized SVC, the LR may be
+ * occupied by general purpose value while calling SVC.
+ * Check if caller comes from non-secure: return address (ctx[6]) is belongs
+ * to veneer section, and the bit0 of LR (ctx[5]) is zero.
+ */
+ if (ctx[6] >= veneer_base && ctx[6] < veneer_limit &&
+ !(ctx[5] & TFM_VENEER_LR_BIT0_MASK)) {
+ ns_caller = true;
+ }
+
+ partition = tfm_spm_get_running_partition();
+ if (!partition) {
+ tfm_core_panic();
+ }
+
+ tfm_spm_validate_caller(partition, ctx, lr, ns_caller);
+
+ switch (svc_num) {
+ case TFM_SVC_PSA_FRAMEWORK_VERSION:
+ return tfm_spm_psa_framework_version();
+ case TFM_SVC_PSA_VERSION:
+ return tfm_spm_psa_version(ctx, ns_caller);
+ case TFM_SVC_PSA_CONNECT:
+ return tfm_spm_psa_connect(ctx, ns_caller);
+ case TFM_SVC_PSA_CALL:
+ return tfm_spm_psa_call(ctx, ns_caller, lr);
+ case TFM_SVC_PSA_CLOSE:
+ tfm_spm_psa_close(ctx, ns_caller);
+ break;
+ case TFM_SVC_PSA_WAIT:
+ return tfm_spm_psa_wait(ctx);
+ case TFM_SVC_PSA_GET:
+ return tfm_spm_psa_get(ctx);
+ case TFM_SVC_PSA_SET_RHANDLE:
+ tfm_spm_psa_set_rhandle(ctx);
+ break;
+ case TFM_SVC_PSA_READ:
+ return tfm_spm_psa_read(ctx);
+ case TFM_SVC_PSA_SKIP:
+ return tfm_spm_psa_skip(ctx);
+ case TFM_SVC_PSA_WRITE:
+ tfm_spm_psa_write(ctx);
+ break;
+ case TFM_SVC_PSA_REPLY:
+ tfm_spm_psa_reply(ctx);
+ break;
+ case TFM_SVC_PSA_NOTIFY:
+ tfm_spm_psa_notify(ctx);
+ break;
+ case TFM_SVC_PSA_CLEAR:
+ tfm_spm_psa_clear();
+ break;
+ case TFM_SVC_PSA_EOI:
+ tfm_spm_psa_eoi(ctx);
+ break;
+ case TFM_SVC_ENABLE_IRQ:
+ tfm_spm_enable_irq(ctx);
+ break;
+ case TFM_SVC_DISABLE_IRQ:
+ tfm_spm_disable_irq(ctx);
+ break;
+ case TFM_SVC_PSA_PANIC:
+ tfm_spm_psa_panic();
+ break;
+ case TFM_SVC_SPM_REQUEST:
+ tfm_spm_request_handler((const struct tfm_state_context_t *)ctx);
+ break;
+ case TFM_SVC_PSA_LIFECYCLE:
+ return tfm_spm_get_lifecycle_state();
+ default:
+#ifdef PLATFORM_SVC_HANDLERS
+ return (platform_svc_handlers(svc_num, ctx, lr));
+#else
+ ERROR_MSG("Unknown SVC number requested!");
+ return PSA_ERROR_GENERIC_ERROR;
+#endif
+ }
+ return PSA_SUCCESS;
+}
+
+uint32_t tfm_core_svc_handler(uint32_t *svc_args, uint32_t exc_return)
+{
+ tfm_svc_number_t svc_number = TFM_SVC_SFN_REQUEST;
+ /*
+ * Stack contains:
+ * r0, r1, r2, r3, r12, r14 (lr), the return address and xPSR
+ * First argument (r0) is svc_args[0]
+ */
+ if (is_return_secure_stack(exc_return)) {
+ /* SV called directly from secure context. Check instruction for
+ * svc_number
+ */
+ svc_number = ((tfm_svc_number_t *)svc_args[6])[-2];
+ } else {
+ /* Secure SV executing with NS return.
+ * NS cannot directly trigger S SVC so this should not happen. This is
+ * an unrecoverable error.
+ */
+ tfm_core_panic();
+ }
+ switch (svc_number) {
+ case TFM_SVC_HANDLER_MODE:
+ tfm_arch_clear_fp_status();
+ exc_return = tfm_spm_init();
+ break;
+ case TFM_SVC_GET_BOOT_DATA:
+ tfm_core_get_boot_data_handler(svc_args);
+ break;
+ default:
+ svc_args[0] = SVC_Handler_IPC(svc_number, svc_args, exc_return);
+ break;
+ }
+
+ return exc_return;
+}
+
+__attribute__ ((naked)) void tfm_core_handler_mode(void)
+{
+ __ASM volatile("SVC %0 \n"
+ "BX LR \n"
+ : : "I" (TFM_SVC_HANDLER_MODE));
+}
+
+void tfm_access_violation_handler(void)
+{
+ while (1) {
+ ;
+ }
+}
diff --git a/secure_fw/spm/model_ipc/tfm_message_queue.c b/secure_fw/spm/model_ipc/tfm_message_queue.c
new file mode 100644
index 0000000..1a7790e
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_message_queue.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#include "tfm_internal_defines.h"
+#include "tfm_message_queue.h"
+
+/* Message queue process */
+int32_t tfm_msg_enqueue(struct tfm_msg_queue_t *queue,
+ struct tfm_msg_body_t *node)
+{
+ if (!queue || !node) {
+ return IPC_ERROR_BAD_PARAMETERS;
+ }
+
+ if (queue->size == 0) {
+ queue->head = node;
+ queue->tail = node;
+ } else {
+ queue->tail->next = node;
+ queue->tail = node;
+ }
+ queue->size++;
+ return IPC_SUCCESS;
+}
+
+struct tfm_msg_body_t *tfm_msg_dequeue(struct tfm_msg_queue_t *queue)
+{
+ struct tfm_msg_body_t *pop_node;
+
+ if (!queue) {
+ return NULL;
+ }
+
+ if (queue->size == 0) {
+ return NULL;
+ }
+
+ pop_node = queue->head;
+ queue->head = queue->head->next;
+ queue->size--;
+ return pop_node;
+}
+
+int32_t tfm_msg_queue_is_empty(struct tfm_msg_queue_t *queue)
+{
+ return queue->size == 0 ? 1 : 0;
+}
diff --git a/secure_fw/spm/model_ipc/tfm_multi_core.c b/secure_fw/spm/model_ipc/tfm_multi_core.c
new file mode 100644
index 0000000..0678509
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_multi_core.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "platform/include/tfm_spm_hal.h"
+#include "psa/client.h"
+#include "tfm_internal.h"
+#include "tfm_nspm.h"
+#include "tfm_spe_mailbox.h"
+#include "tfm_utils.h"
+#include "log/tfm_assert.h"
+#include "log/tfm_log.h"
+#include "log/tfm_assert.h"
+
+#define DEFAULT_NS_CLIENT_ID (-1)
+
+int32_t tfm_nspm_get_current_client_id(void)
+{
+ return DEFAULT_NS_CLIENT_ID;
+}
+
+void tfm_nspm_thread_entry(void)
+{
+#ifdef TFM_CORE_DEBUG
+ /* Boot up non-secure core */
+ LOG_MSG("Enabling non-secure core...");
+#endif
+
+ tfm_spm_hal_boot_ns_cpu(tfm_spm_hal_get_ns_VTOR());
+ tfm_spm_hal_wait_for_ns_cpu_ready();
+
+ tfm_mailbox_init();
+
+ /*
+ * TODO
+ * The infinite-loop can be replaced with low-power sleep and resume
+ * operation. It may require privileged access to platform specific
+ * hardware.
+ */
+ while (1) {
+ }
+
+
+ /* NOTREACHED */
+ TFM_ASSERT(false);
+}
+
+void tfm_psa_ipc_request_handler(const uint32_t svc_args[])
+{
+ (void)svc_args;
+
+ /* Should not receive any request from ns-callable in multi-core topology */
+ tfm_core_panic();
+}
diff --git a/secure_fw/spm/model_ipc/tfm_multi_core_mem_check.c b/secure_fw/spm/model_ipc/tfm_multi_core_mem_check.c
new file mode 100644
index 0000000..ccd93d2
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_multi_core_mem_check.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include "platform/include/tfm_spm_hal.h"
+#include "region_defs.h"
+#include "secure_utilities.h"
+#include "tfm/spm_api.h"
+#include "tfm/spm_db.h"
+#include "tfm_internal.h"
+#include "tfm_multi_core.h"
+#include "tfm_secure_api.h"
+#include "tfm_utils.h"
+#include "region.h"
+
+#ifndef TFM_LVL
+#error TFM_LVL is not defined!
+#endif
+
+/* Follow CMSE flag definitions */
+#define MEM_CHECK_MPU_READWRITE (1 << 0x0)
+#define MEM_CHECK_AU_NONSECURE (1 << 0x1)
+#define MEM_CHECK_MPU_UNPRIV (1 << 0x2)
+#define MEM_CHECK_MPU_READ (1 << 0x3)
+#define MEM_CHECK_MPU_NONSECURE (1 << 0x4)
+#define MEM_CHECK_NONSECURE (MEM_CHECK_AU_NONSECURE | \
+ MEM_CHECK_MPU_NONSECURE)
+
+void tfm_get_mem_region_security_attr(const void *p, size_t s,
+ struct security_attr_info_t *p_attr)
+{
+ p_attr->is_valid = true;
+
+ if (check_address_range(p, s, NS_DATA_START,
+ NS_DATA_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_secure = false;
+ return;
+ }
+
+ if (check_address_range(p, s, NS_CODE_START,
+ NS_CODE_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_secure = false;
+ return;
+ }
+
+ if (check_address_range(p, s, S_DATA_START, S_DATA_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_secure = true;
+ return;
+ }
+
+ if (check_address_range(p, s, S_CODE_START, S_CODE_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_secure = true;
+ return;
+ }
+
+ p_attr->is_valid = false;
+}
+
+#if TFM_LVL == 2
+REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_UNPRIV_DATA, $$RW$$Base);
+REGION_DECLARE(Image$$, TFM_UNPRIV_DATA, $$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_APP_CODE_START, $$Base);
+REGION_DECLARE(Image$$, TFM_APP_CODE_END, $$Base);
+REGION_DECLARE(Image$$, TFM_APP_RW_STACK_START, $$Base);
+REGION_DECLARE(Image$$, TFM_APP_RW_STACK_END, $$Base);
+#endif
+
+void tfm_get_secure_mem_region_attr(const void *p, size_t s,
+ struct mem_attr_info_t *p_attr)
+{
+#if TFM_LVL == 1
+ p_attr->is_mpu_enabled = false;
+ p_attr->is_valid = true;
+
+ if (check_address_range(p, s, S_DATA_START, S_DATA_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = true;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = true;
+ p_attr->is_xn = true;
+ return;
+ }
+
+ if (check_address_range(p, s, S_CODE_START, S_CODE_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = false;
+ return;
+ }
+
+ p_attr->is_valid = false;
+#elif TFM_LVL == 2
+ uintptr_t base, limit;
+
+ p_attr->is_mpu_enabled = false;
+ p_attr->is_valid = true;
+
+ /* TFM Core unprivileged code region */
+ base = (uintptr_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
+ limit = (uintptr_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit) - 1;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = false;
+ return;
+ }
+
+ /* TFM Core unprivileged data region */
+ base = (uintptr_t)®ION_NAME(Image$$, TFM_UNPRIV_DATA, $$RW$$Base);
+ limit = (uintptr_t)®ION_NAME(Image$$, TFM_UNPRIV_DATA, $$ZI$$Limit) - 1;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = true;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = true;
+ p_attr->is_xn = true;
+ return;
+ }
+
+ /* APP RoT partition RO region */
+ base = (uintptr_t)®ION_NAME(Image$$, TFM_APP_CODE_START, $$Base);
+ limit = (uintptr_t)®ION_NAME(Image$$, TFM_APP_CODE_END, $$Base) - 1;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = false;
+ return;
+ }
+
+ /* RW, ZI and stack as one region */
+ base = (uintptr_t)®ION_NAME(Image$$, TFM_APP_RW_STACK_START, $$Base);
+ limit = (uintptr_t)®ION_NAME(Image$$, TFM_APP_RW_STACK_END, $$Base) - 1;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = true;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = true;
+ p_attr->is_xn = true;
+ return;
+ }
+
+ /*
+ * Treat the remaining parts in secure data section and secure code section
+ * as privileged regions
+ */
+ base = (uintptr_t)S_DATA_START;
+ limit = (uintptr_t)S_DATA_LIMIT;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = true;
+ p_attr->is_unpriv_rd_allow = false;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = true;
+ return;
+ }
+
+ base = (uintptr_t)S_CODE_START;
+ limit = (uintptr_t)S_CODE_LIMIT;
+ if (check_address_range(p, s, base, limit) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = false;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = false;
+ return;
+ }
+
+ p_attr->is_valid = false;
+#else
+#error "Cannot support current TF-M isolation level"
+#endif
+}
+
+void tfm_get_ns_mem_region_attr(const void *p, size_t s,
+ struct mem_attr_info_t *p_attr)
+{
+ p_attr->is_mpu_enabled = false;
+ p_attr->is_valid = true;
+
+ if (check_address_range(p, s, NS_DATA_START,
+ NS_DATA_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = true;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = true;
+ p_attr->is_xn = true;
+ return;
+ }
+
+ if (check_address_range(p, s, NS_CODE_START,
+ NS_CODE_LIMIT) == TFM_SUCCESS) {
+ p_attr->is_priv_rd_allow = true;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = true;
+ p_attr->is_unpriv_wr_allow = false;
+ p_attr->is_xn = false;
+ return;
+ }
+
+ p_attr->is_valid = false;
+}
+
+static void security_attr_init(struct security_attr_info_t *p_attr)
+{
+ /* No check if p_attr is valid */
+
+ /*
+ * The initial values may be not a valid combination.
+ * The value in each filed just guarantees the least access permission in
+ * case that the field is incorrectly set later.
+ */
+ p_attr->is_valid = false;
+ p_attr->is_secure = true;
+}
+
+static void mem_attr_init(struct mem_attr_info_t *p_attr)
+{
+ /* No check if p_attr is valid */
+
+ /*
+ * The initial values may be not a valid combination.
+ * The value in each filed just guarantees the least access permission in
+ * case that the field is incorrectly set later.
+ */
+ p_attr->is_mpu_enabled = false;
+ p_attr->is_valid = false;
+ p_attr->is_xn = true;
+ p_attr->is_priv_rd_allow = false;
+ p_attr->is_priv_wr_allow = false;
+ p_attr->is_unpriv_rd_allow = false;
+ p_attr->is_unpriv_wr_allow = false;
+}
+
+/**
+ * \brief Check whether the access permission matches the security settings of
+ * the target memory region
+ *
+ * \param[in] attr The security_attr_info_t containing security settings of
+ * memory region
+ * \param[in] flags The flags indicating the access permissions.
+ *
+ * \return TFM_SUCCESS if the check passes,
+ * TFM_ERROR_GENERIC otherwise.
+ */
+static enum tfm_status_e security_attr_check(struct security_attr_info_t attr,
+ uint8_t flags)
+{
+ bool secure_access;
+
+ if (!attr.is_valid) {
+ return TFM_ERROR_GENERIC;
+ }
+
+ secure_access = flags & MEM_CHECK_NONSECURE ? false : true;
+ /*
+ * Non-secure access should not access secure memory region.
+ * Secure service should not directly access non-secure memory region.
+ */
+ if (secure_access ^ attr.is_secure) {
+ return TFM_ERROR_GENERIC;
+ }
+
+ return TFM_SUCCESS;
+}
+
+/**
+ * \brief Check whether the access permission matches the target non-secure
+ * memory region access attributes.
+ *
+ * \param[in] attr The mem_attr_info_t containing attributes of memory region
+ * \param[in] flags The flags indicating the access permissions.
+ *
+ * \return TFM_SUCCESS if the check passes,
+ * TFM_ERROR_GENERIC otherwise.
+ */
+static enum tfm_status_e ns_mem_attr_check(struct mem_attr_info_t attr,
+ uint8_t flags)
+{
+ /*
+ * Non-secure privileged/unprivileged check is skipped.
+ * Non-secure software should implement the check if it enforces the
+ * isolation between privileged and unprivileged regions.
+ */
+
+ if ((flags & MEM_CHECK_MPU_READWRITE) &&
+ (attr.is_priv_rd_allow || attr.is_unpriv_rd_allow) &&
+ (attr.is_priv_wr_allow || attr.is_unpriv_wr_allow)) {
+ return TFM_SUCCESS;
+ }
+
+ if ((flags & MEM_CHECK_MPU_READ) &&
+ (attr.is_priv_rd_allow || attr.is_unpriv_rd_allow)) {
+ return TFM_SUCCESS;
+ }
+
+ return TFM_ERROR_GENERIC;
+}
+
+/**
+ * \brief Check whether the access permission matches the target secure memory
+ * region access attributes.
+ *
+ * \param[in] attr The mem_attr_info_t containing attributes of memory region
+ * \param[in] flags The flags indicating the access permissions.
+ *
+ * \return TFM_SUCCESS if the check passes,
+ * TFM_ERROR_GENERIC otherwise.
+ */
+static enum tfm_status_e secure_mem_attr_check(struct mem_attr_info_t attr,
+ uint8_t flags)
+{
+#if TFM_LVL == 1
+ /* Privileged/unprivileged is ignored in TFM_LVL == 1 */
+
+ if ((flags & MEM_CHECK_MPU_READWRITE) &&
+ (attr.is_priv_rd_allow || attr.is_unpriv_rd_allow) &&
+ (attr.is_priv_wr_allow || attr.is_unpriv_wr_allow)) {
+ return TFM_SUCCESS;
+ }
+
+ if ((flags & MEM_CHECK_MPU_READ) &&
+ (attr.is_priv_rd_allow || attr.is_unpriv_rd_allow)) {
+ return TFM_SUCCESS;
+ }
+
+ return TFM_ERROR_GENERIC;
+#else
+ if (flags & MEM_CHECK_MPU_UNPRIV) {
+ if ((flags & MEM_CHECK_MPU_READWRITE) && attr.is_unpriv_rd_allow &&
+ attr.is_unpriv_wr_allow) {
+ return TFM_SUCCESS;
+ }
+
+ if ((flags & MEM_CHECK_MPU_READ) && attr.is_unpriv_rd_allow) {
+ return TFM_SUCCESS;
+ }
+ } else {
+ if ((flags & MEM_CHECK_MPU_READWRITE) && attr.is_priv_rd_allow &&
+ attr.is_priv_wr_allow) {
+ return TFM_SUCCESS;
+ }
+
+ if ((flags & MEM_CHECK_MPU_READ) && attr.is_priv_rd_allow) {
+ return TFM_SUCCESS;
+ }
+ }
+
+ return TFM_ERROR_GENERIC;
+#endif
+}
+
+/**
+ * \brief Check whether the access permission matches the memory attributes of
+ * the target memory region
+ *
+ * \param[in] attr The mem_attr_info_t containing memory region attributes
+ * \param[in] flags The flags indicating the access permissions.
+ *
+ * \return TFM_SUCCESS if the check passes,
+ * TFM_ERROR_GENERIC otherwise.
+ */
+static enum tfm_status_e mem_attr_check(struct mem_attr_info_t attr,
+ uint8_t flags)
+{
+ if (!attr.is_valid) {
+ return TFM_ERROR_GENERIC;
+ }
+
+ if (flags & MEM_CHECK_NONSECURE) {
+ return ns_mem_attr_check(attr, flags);
+ }
+
+ return secure_mem_attr_check(attr, flags);
+}
+
+/**
+ * \brief Check whether a memory access is allowed to access to a memory range
+ *
+ * \param[in] p The start address of the range to check
+ * \param[in] s The size of the range to check
+ * \param[in] flags The flags indicating the access permissions.
+ *
+ * \return TFM_SUCCESS if the access is allowed,
+ * TFM_ERROR_GENERIC otherwise.
+ */
+static int32_t has_access_to_region(const void *p, size_t s, uint8_t flags)
+{
+ struct security_attr_info_t security_attr;
+ struct mem_attr_info_t mem_attr;
+
+ if (!p) {
+ return (int32_t)TFM_ERROR_GENERIC;
+ }
+
+ if ((uintptr_t)p > (UINTPTR_MAX - s)) {
+ return (int32_t)TFM_ERROR_GENERIC;
+ }
+
+ /* Abort if not in Handler mode */
+ if (!__get_IPSR()) {
+ tfm_core_panic();
+ }
+
+ security_attr_init(&security_attr);
+
+ /* Retrieve security attributes of target memory region */
+ tfm_spm_hal_get_mem_security_attr(p, s, &security_attr);
+
+ if (security_attr_check(security_attr, flags) != TFM_SUCCESS) {
+ return (int32_t)TFM_ERROR_GENERIC;
+ }
+
+ mem_attr_init(&mem_attr);
+
+ if (security_attr.is_secure) {
+ /* Retrieve access attributes of secure memory region */
+ tfm_spm_hal_get_secure_access_attr(p, s, &mem_attr);
+
+#if TFM_LVL != 1
+ /* Secure MPU must be enabled in Isolation Level 2 and 3 */
+ if (!mem_attr.is_mpu_enabled) {
+ tfm_core_panic();
+ }
+#endif
+ } else {
+ /* Retrieve access attributes of non-secure memory region. */
+ tfm_spm_hal_get_ns_access_attr(p, s, &mem_attr);
+ }
+
+ return (int32_t)mem_attr_check(mem_attr, flags);
+}
+
+int32_t tfm_core_has_read_access_to_region(const void *p, size_t s,
+ bool ns_caller,
+ uint32_t privileged)
+{
+ uint8_t flags = MEM_CHECK_MPU_READ;
+
+ if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
+ flags |= MEM_CHECK_MPU_UNPRIV;
+ } else if (privileged != TFM_PARTITION_PRIVILEGED_MODE) {
+ return TFM_ERROR_GENERIC;
+ }
+
+ if (ns_caller) {
+ flags |= MEM_CHECK_NONSECURE;
+ }
+
+ return has_access_to_region(p, s, flags);
+}
+
+int32_t tfm_core_has_write_access_to_region(void *p, size_t s,
+ bool ns_caller,
+ uint32_t privileged)
+{
+ uint8_t flags = MEM_CHECK_MPU_READWRITE;
+
+ if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
+ flags |= MEM_CHECK_MPU_UNPRIV;
+ } else if (privileged != TFM_PARTITION_PRIVILEGED_MODE) {
+ return TFM_ERROR_GENERIC;
+ }
+
+ if (ns_caller) {
+ flags |= MEM_CHECK_NONSECURE;
+ }
+
+ return has_access_to_region(p, s, flags);
+}
diff --git a/secure_fw/spm/model_ipc/tfm_nspm_ipc.c b/secure_fw/spm/model_ipc/tfm_nspm_ipc.c
new file mode 100644
index 0000000..c5eb993
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_nspm_ipc.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include "platform/include/tfm_spm_hal.h"
+#include "psa/error.h"
+#include "tfm_nspm.h"
+#include "tfm_utils.h"
+#include "tfm_internal.h"
+#include "log/tfm_assert.h"
+#include "log/tfm_log.h"
+
+#define DEFAULT_NS_CLIENT_ID ((int32_t)-1)
+
+typedef uint32_t TZ_ModuleId_t;
+typedef uint32_t TZ_MemoryId_t;
+
+int32_t tfm_nspm_get_current_client_id(void)
+{
+ return DEFAULT_NS_CLIENT_ID;
+}
+
+/* TF-M implementation of the CMSIS TZ RTOS thread context management API */
+
+/// Initialize secure context memory system
+/// \return execution status (1: success, 0: error)
+/* This veneer is TF-M internal, not a secure service */
+__tfm_nspm_secure_gateway_attributes__
+uint32_t TZ_InitContextSystem_S(void)
+{
+ return 1U;
+}
+
+/// Allocate context memory for calling secure software modules in TrustZone
+/// \param[in] module identifies software modules called from non-secure mode
+/// \return value != 0 id TrustZone memory slot identifier
+/// \return value 0 no memory available or internal error
+/* This veneer is TF-M internal, not a secure service */
+__tfm_nspm_secure_gateway_attributes__
+TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module)
+{
+ /* add attribute 'noinline' to avoid a build error. */
+ (void)module;
+ return 1U;
+}
+
+/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S
+/// \param[in] id TrustZone memory slot identifier
+/// \return execution status (1: success, 0: error)
+/* This veneer is TF-M internal, not a secure service */
+__tfm_nspm_secure_gateway_attributes__
+uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id)
+{
+ (void)id;
+ return 1U;
+}
+
+/// Load secure context (called on RTOS thread context switch)
+/// \param[in] id TrustZone memory slot identifier
+/// \return execution status (1: success, 0: error)
+/* This veneer is TF-M internal, not a secure service */
+__tfm_nspm_secure_gateway_attributes__
+uint32_t TZ_LoadContext_S (TZ_MemoryId_t id)
+{
+ (void)id;
+ return 1U;
+}
+
+/// Store secure context (called on RTOS thread context switch)
+/// \param[in] id TrustZone memory slot identifier
+/// \return execution status (1: success, 0: error)
+/* This veneer is TF-M internal, not a secure service */
+__tfm_nspm_secure_gateway_attributes__
+uint32_t TZ_StoreContext_S (TZ_MemoryId_t id)
+{
+ (void)id;
+ return 1U;
+}
+
+/*
+ * 'r0' impliedly holds the address of non-secure entry,
+ * given during non-secure partition initialization.
+ */
+__attribute__((naked, section("SFN")))
+void tfm_nspm_thread_entry(void)
+{
+ __ASM volatile(
+#ifndef __ICCARM__
+ ".syntax unified \n"
+#endif
+ "mov r4, r0 \n"
+ "movs r2, #1 \n" /* Clear Bit[0] for S to NS transition */
+ "bics r4, r2 \n"
+ "mov r0, r4 \n"
+ "mov r1, r4 \n"
+ "mov r2, r4 \n"
+ "mov r3, r4 \n"
+ "mov r5, r4 \n"
+ "mov r6, r4 \n"
+ "mov r7, r4 \n"
+ "mov r8, r4 \n"
+ "mov r9, r4 \n"
+ "mov r10, r4 \n"
+ "mov r11, r4 \n"
+ "mov r12, r4 \n"
+ "push {r0, r1} \n"
+ "bxns r0 \n"
+ );
+}
+
+void configure_ns_code(void)
+{
+ /* SCB_NS.VTOR points to the Non-secure vector table base address */
+ SCB_NS->VTOR = tfm_spm_hal_get_ns_VTOR();
+
+ /* Setups Main stack pointer of the non-secure code */
+ uint32_t ns_msp = tfm_spm_hal_get_ns_MSP();
+
+ __TZ_set_MSP_NS(ns_msp);
+}
diff --git a/secure_fw/spm/model_ipc/tfm_pools.c b/secure_fw/spm/model_ipc/tfm_pools.c
new file mode 100644
index 0000000..6ab56af
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_pools.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include "tfm_thread.h"
+#include "tfm_wait.h"
+#include "psa/client.h"
+#include "psa/service.h"
+#include "tfm_internal_defines.h"
+#include "cmsis_compiler.h"
+#include "tfm_utils.h"
+#include "tfm_list.h"
+#include "tfm_pools.h"
+#include "tfm_memory_utils.h"
+#include "tfm_core_utils.h"
+
+int32_t tfm_pool_init(struct tfm_pool_instance_t *pool, size_t poolsz,
+ size_t chunksz, size_t num)
+{
+ struct tfm_pool_chunk_t *pchunk;
+ size_t i;
+
+ if (!pool || num == 0) {
+ return IPC_ERROR_BAD_PARAMETERS;
+ }
+
+ /* Ensure buffer is large enough */
+ if (poolsz != ((chunksz + sizeof(struct tfm_pool_chunk_t)) * num +
+ sizeof(struct tfm_pool_instance_t))) {
+ return IPC_ERROR_BAD_PARAMETERS;
+ }
+
+ /* Buffer should be BSS cleared but clear it again */
+ tfm_core_util_memset(pool, 0, poolsz);
+
+ /* Chain pool chunks */
+ tfm_list_init(&pool->chunks_list);
+
+ pchunk = (struct tfm_pool_chunk_t *)pool->chunks;
+ for (i = 0; i < num; i++) {
+ pchunk->pool = pool;
+ tfm_list_add_tail(&pool->chunks_list, &pchunk->list);
+ pchunk = (struct tfm_pool_chunk_t *)&pchunk->data[chunksz];
+ }
+
+ /* Prepare instance and insert to pool list */
+ pool->chunksz = chunksz;
+ pool->chunk_count = num;
+
+ return IPC_SUCCESS;
+}
+
+void *tfm_pool_alloc(struct tfm_pool_instance_t *pool)
+{
+ struct tfm_list_node_t *node;
+ struct tfm_pool_chunk_t *pchunk;
+
+ if (!pool) {
+ return NULL;
+ }
+
+ if (tfm_list_is_empty(&pool->chunks_list)) {
+ return NULL;
+ }
+
+ node = tfm_list_first_node(&pool->chunks_list);
+ pchunk = TFM_GET_CONTAINER_PTR(node, struct tfm_pool_chunk_t, list);
+
+ /* Remove node from list node, it will be added when pool free */
+ tfm_list_del_node(node);
+
+ return &pchunk->data;
+}
+
+void tfm_pool_free(void *ptr)
+{
+ struct tfm_pool_chunk_t *pchunk;
+ struct tfm_pool_instance_t *pool;
+
+ pchunk = TFM_GET_CONTAINER_PTR(ptr, struct tfm_pool_chunk_t, data);
+ pool = (struct tfm_pool_instance_t *)pchunk->pool;
+ tfm_list_add_tail(&pool->chunks_list, &pchunk->list);
+}
+
+bool is_valid_chunk_data_in_pool(struct tfm_pool_instance_t *pool,
+ uint8_t *data)
+{
+ const uintptr_t chunks_start = (uintptr_t)(pool->chunks);
+ const size_t chunks_size = pool->chunksz + sizeof(struct tfm_pool_chunk_t);
+ const size_t chunk_count = pool->chunk_count;
+ const uintptr_t chunks_end = chunks_start + chunks_size * chunk_count;
+ uintptr_t pool_chunk_address = 0;
+
+ /* Check that the message was allocated from the pool. */
+ if ((uintptr_t)data < chunks_start || (uintptr_t)data >= chunks_end) {
+ return false;
+ }
+
+ pool_chunk_address =
+ (uint32_t)TFM_GET_CONTAINER_PTR(data, struct tfm_pool_chunk_t, data);
+
+ /* Make sure that the chunk containing the message is aligned on */
+ /* chunk boundary in the pool. */
+ if ((pool_chunk_address - chunks_start) % chunks_size != 0) {
+ return false;
+ }
+ return true;
+}
diff --git a/secure_fw/spm/model_ipc/tfm_psa_api_veneers.c b/secure_fw/spm/model_ipc/tfm_psa_api_veneers.c
new file mode 100644
index 0000000..e4ba155
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_psa_api_veneers.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "psa/client.h"
+#include "psa/service.h"
+#include "secure_utilities.h"
+#include "tfm_arch.h"
+#include "tfm_secure_api.h"
+#include "tfm_api.h"
+#include "tfm_svcalls.h"
+
+/* Veneer implementation */
+
+/*
+ * SVC to core directly before touch stack due to:
+ * - Re-entrant detection bases on stack information.
+ * - SVC here stores the current xPSR into stack and recover it back while
+ * exception returns, no leakage of secure state information and no
+ * interference between two sides.
+ */
+
+__tfm_psa_secure_gateway_attributes__
+uint32_t tfm_psa_framework_version_veneer(void)
+{
+ __ASM volatile("SVC %0 \n"
+ "BXNS LR \n"
+ : : "I" (TFM_SVC_PSA_FRAMEWORK_VERSION));
+}
+
+__tfm_psa_secure_gateway_attributes__
+uint32_t tfm_psa_version_veneer(uint32_t sid)
+{
+ __ASM volatile("SVC %0 \n"
+ "BXNS LR \n"
+ : : "I" (TFM_SVC_PSA_VERSION));
+}
+
+__tfm_psa_secure_gateway_attributes__
+psa_handle_t tfm_psa_connect_veneer(uint32_t sid, uint32_t version)
+{
+ __ASM volatile("SVC %0 \n"
+ "BXNS LR \n"
+ : : "I" (TFM_SVC_PSA_CONNECT));
+}
+
+__tfm_psa_secure_gateway_attributes__
+psa_status_t tfm_psa_call_veneer(psa_handle_t handle,
+ const struct tfm_control_parameter_t *ctrl_param,
+ const psa_invec *in_vec,
+ psa_outvec *out_vec)
+{
+ __ASM volatile("SVC %0 \n"
+ "BXNS LR \n"
+ : : "I" (TFM_SVC_PSA_CALL));
+}
+
+__tfm_psa_secure_gateway_attributes__
+void tfm_psa_close_veneer(psa_handle_t handle)
+{
+ __ASM volatile("SVC %0 \n"
+ "BXNS LR \n"
+ : : "I" (TFM_SVC_PSA_CLOSE));
+}
diff --git a/secure_fw/spm/model_ipc/tfm_rpc.c b/secure_fw/spm/model_ipc/tfm_rpc.c
new file mode 100644
index 0000000..073e4f4
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_rpc.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include "tfm/spm_api.h"
+#include "spm_psa_client_call.h"
+#include "tfm_rpc.h"
+#include "tfm_utils.h"
+
+static void default_handle_req(void)
+{
+}
+
+static void default_mailbox_reply(const void *owner, int32_t ret)
+{
+ (void)owner;
+ (void)ret;
+}
+
+static const void *default_get_caller_data(int32_t client_id)
+{
+ (void)client_id;
+
+ return NULL;
+}
+
+static struct tfm_rpc_ops_t rpc_ops = {
+ .handle_req = default_handle_req,
+ .reply = default_mailbox_reply,
+ .get_caller_data = default_get_caller_data,
+};
+
+uint32_t tfm_rpc_psa_framework_version(void)
+{
+ return tfm_spm_client_psa_framework_version();
+}
+
+uint32_t tfm_rpc_psa_version(const struct client_call_params_t *params,
+ bool ns_caller)
+{
+ TFM_CORE_ASSERT(params != NULL);
+
+ return tfm_spm_client_psa_version(params->sid, ns_caller);
+}
+
+psa_status_t tfm_rpc_psa_connect(const struct client_call_params_t *params,
+ bool ns_caller)
+{
+ TFM_CORE_ASSERT(params != NULL);
+
+ return tfm_spm_client_psa_connect(params->sid, params->version, ns_caller);
+}
+
+psa_status_t tfm_rpc_psa_call(const struct client_call_params_t *params,
+ bool ns_caller)
+{
+ TFM_CORE_ASSERT(params != NULL);
+
+ return tfm_spm_client_psa_call(params->handle, params->type,
+ params->in_vec, params->in_len,
+ params->out_vec, params->out_len, ns_caller,
+ TFM_PARTITION_UNPRIVILEGED_MODE);
+}
+
+void tfm_rpc_psa_close(const struct client_call_params_t *params,
+ bool ns_caller)
+{
+ TFM_CORE_ASSERT(params != NULL);
+
+ tfm_spm_client_psa_close(params->handle, ns_caller);
+}
+
+int32_t tfm_rpc_register_ops(const struct tfm_rpc_ops_t *ops_ptr)
+{
+ if (!ops_ptr) {
+ return TFM_RPC_INVAL_PARAM;
+ }
+
+ if (!ops_ptr->handle_req || !ops_ptr->reply || !ops_ptr->get_caller_data) {
+ return TFM_RPC_INVAL_PARAM;
+ }
+
+ /* Currently, one and only one mailbox implementation is supported. */
+ if ((rpc_ops.handle_req != default_handle_req) ||
+ (rpc_ops.reply != default_mailbox_reply) || \
+ (rpc_ops.get_caller_data != default_get_caller_data)) {
+ return TFM_RPC_CONFLICT_CALLBACK;
+ }
+
+ rpc_ops.handle_req = ops_ptr->handle_req;
+ rpc_ops.reply = ops_ptr->reply;
+ rpc_ops.get_caller_data = ops_ptr->get_caller_data;
+
+ return TFM_RPC_SUCCESS;
+}
+
+void tfm_rpc_unregister_ops(void)
+{
+ rpc_ops.handle_req = default_handle_req;
+ rpc_ops.reply = default_mailbox_reply;
+ rpc_ops.get_caller_data = default_get_caller_data;
+}
+
+void tfm_rpc_client_call_handler(void)
+{
+ rpc_ops.handle_req();
+}
+
+void tfm_rpc_client_call_reply(const void *owner, int32_t ret)
+{
+ const struct tfm_msg_body_t *msg = (const struct tfm_msg_body_t *)owner;
+
+ rpc_ops.reply(msg->caller_data, ret);
+}
+
+void tfm_rpc_set_caller_data(struct tfm_msg_body_t *msg, int32_t client_id)
+{
+ msg->caller_data = rpc_ops.get_caller_data(client_id);
+}
diff --git a/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc b/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc
new file mode 100644
index 0000000..22f92cd
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/*********** WARNING: This is an auto-generated file. Do not edit! ***********/
+
+#include "secure_fw/partitions/secure_storage/psa_manifest/tfm_secure_storage.h"
+#include "secure_fw/partitions/internal_trusted_storage/psa_manifest/tfm_internal_trusted_storage.h"
+#include "secure_fw/partitions/audit_logging/psa_manifest/tfm_audit_logging.h"
+#include "secure_fw/partitions/crypto/psa_manifest/tfm_crypto.h"
+#include "secure_fw/partitions/platform/psa_manifest/tfm_platform.h"
+#include "secure_fw/partitions/initial_attestation/psa_manifest/tfm_initial_attestation.h"
+#include "test/test_services/tfm_core_test/psa_manifest/tfm_test_core.h"
+#include "test/test_services/tfm_core_test_2/psa_manifest/tfm_test_core_2.h"
+#include "test/test_services/tfm_secure_client_service/psa_manifest/tfm_test_client_service.h"
+#include "test/test_services/tfm_ipc_service/psa_manifest/tfm_ipc_service_partition.h"
+#include "test/test_services/tfm_ipc_client/psa_manifest/tfm_ipc_client_partition.h"
+#include "test/test_services/tfm_irq_test_service_1/psa_manifest/tfm_irq_test_service_1.h"
+#include "test/test_services/tfm_sst_test_service/psa_manifest/tfm_sst_test_service.h"
+#include "test/test_services/tfm_secure_client_2/psa_manifest/tfm_secure_client_2.h"
+#include "test/test_services/tfm_multi_core_test/psa_manifest/tfm_multi_core_test.h"
+#include "cmsis_compiler.h"
+
+/* Definitions of the signals of the IRQs (if any) */
+const struct tfm_core_irq_signal_data_t tfm_core_irq_signals[] = {
+#ifdef TFM_ENABLE_IRQ_TEST
+ { TFM_IRQ_TEST_1, SPM_CORE_IRQ_TEST_1_SIGNAL_TIMER_0_IRQ, TFM_TIMER0_IRQ, 64 },
+#endif /* TFM_ENABLE_IRQ_TEST */
+ {0, 0, (IRQn_Type) 0, 0} /* add dummy element to avoid non-standard empty array */
+};
+
+const size_t tfm_core_irq_signals_count = (sizeof(tfm_core_irq_signals) /
+ sizeof(*tfm_core_irq_signals)) - 1; /* adjust for the dummy element */
+
+/* Definitions of privileged IRQ handlers (if any) */
+#ifdef TFM_ENABLE_IRQ_TEST
+void TFM_TIMER0_IRQ_Handler(void)
+{
+ __disable_irq();
+ /* It is OK to call tfm_irq_handler directly from here, as we are already
+ * in handler mode, and we will not be pre-empted as we disabled interrupts
+ */
+ tfm_irq_handler(TFM_IRQ_TEST_1, SPM_CORE_IRQ_TEST_1_SIGNAL_TIMER_0_IRQ, TFM_TIMER0_IRQ);
+ __enable_irq();
+}
+
+#endif /* TFM_ENABLE_IRQ_TEST */
+
diff --git a/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc.template b/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc.template
new file mode 100644
index 0000000..e5a7d6b
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_secure_irq_handlers_ipc.inc.template
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+{{utilities.donotedit_warning}}
+
+{% for header in utilities.manifest_header_list %}
+#include "{{header}}"
+{% endfor %}
+#include "cmsis_compiler.h"
+{% macro _irq_record(partition_name, signal, line, priority) -%}
+{ {{ partition_name }}, {{ signal }}, {{ line }}, {{ priority }} },
+{%- endmacro %}
+
+/* Definitions of the signals of the IRQs (if any) */
+const struct tfm_core_irq_signal_data_t tfm_core_irq_signals[] = {
+{% for manifest in manifests %}
+ {% if manifest.manifest.irqs %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {% for handler in manifest.manifest.irqs %}
+ {% set irq_data = namespace() %}
+ {% if handler.source %}
+ {% set irq_data.line = handler.source %}
+ {% else %}
+#error "Interrupt source isn't provided for 'irqs' in partition {{manifest.manifest.name}}"
+ {% endif %}
+ {% if handler.tfm_irq_priority %}
+ {% set irq_data.priority = handler.tfm_irq_priority %}
+ {% else %}
+ {% set irq_data.priority = "TFM_DEFAULT_SECURE_IRQ_PRIOTITY" %}
+ {% endif %}
+ {{ _irq_record(manifest.manifest.name, handler.signal, irq_data.line, irq_data.priority) }}
+ {% endfor %}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+ {% endif %}
+{% endfor %}
+ {0, 0, (IRQn_Type) 0, 0} /* add dummy element to avoid non-standard empty array */
+};
+
+const size_t tfm_core_irq_signals_count = (sizeof(tfm_core_irq_signals) /
+ sizeof(*tfm_core_irq_signals)) - 1; /* adjust for the dummy element */
+
+/* Definitions of privileged IRQ handlers (if any) */
+{% for manifest in manifests %}
+ {% if manifest.manifest.irqs %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {% for handler in manifest.manifest.irqs %}
+ {% if handler.source is number %}
+void irq_{{handler.source}}_Handler(void)
+ {% elif handler.source %}
+void {{handler.source}}_Handler(void)
+ {% else %}
+#error "Interrupt source isn't provided for 'irqs' in partition {{manifest.manifest.name}}"
+ {% endif %}
+{
+ __disable_irq();
+ /* It is OK to call tfm_irq_handler directly from here, as we are already
+ * in handler mode, and we will not be pre-empted as we disabled interrupts
+ */
+ {% if handler.source %}
+ tfm_irq_handler({{manifest.manifest.name}}, {{handler.signal}}, {{handler.source}});
+ {% else %}
+#error "Interrupt source isn't provided for 'irqs' in partition {{manifest.manifest.name}}"
+ {% endif %}
+ __enable_irq();
+}
+
+ {% endfor %}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+ {% endif %}
+{% endfor %}
diff --git a/secure_fw/spm/model_ipc/tfm_spe_mailbox.c b/secure_fw/spm/model_ipc/tfm_spe_mailbox.c
new file mode 100644
index 0000000..baa02c7
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_spe_mailbox.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include "cmsis_compiler.h"
+
+#include "psa/error.h"
+#include "tfm_core_utils.h"
+#include "tfm_utils.h"
+#include "tfm_spe_mailbox.h"
+#include "tfm_rpc.h"
+
+#define NS_CALLER_FLAG (true)
+
+static struct secure_mailbox_queue_t spe_mailbox_queue;
+
+static int32_t tfm_mailbox_dispatch(uint32_t call_type,
+ const struct psa_client_params_t *params,
+ int32_t client_id, int32_t *psa_ret)
+{
+ struct client_call_params_t spm_params = {0};
+
+ TFM_CORE_ASSERT(params != NULL);
+ TFM_CORE_ASSERT(psa_ret != NULL);
+
+ (void)client_id;
+
+ switch (call_type) {
+ case MAILBOX_PSA_FRAMEWORK_VERSION:
+ *psa_ret = tfm_rpc_psa_framework_version();
+ return MAILBOX_SUCCESS;
+ case MAILBOX_PSA_VERSION:
+ spm_params.sid = params->psa_version_params.sid;
+ *psa_ret = tfm_rpc_psa_version(&spm_params, NS_CALLER_FLAG);
+ return MAILBOX_SUCCESS;
+ case MAILBOX_PSA_CONNECT:
+ spm_params.sid = params->psa_connect_params.sid;
+ spm_params.version = params->psa_connect_params.version;
+ *psa_ret = (uint32_t)tfm_rpc_psa_connect(&spm_params, NS_CALLER_FLAG);
+ return MAILBOX_SUCCESS;
+ case MAILBOX_PSA_CALL:
+ spm_params.handle = params->psa_call_params.handle;
+ spm_params.type = params->psa_call_params.type;
+ spm_params.in_vec = params->psa_call_params.in_vec;
+ spm_params.in_len = params->psa_call_params.in_len;
+ spm_params.out_vec = params->psa_call_params.out_vec;
+ spm_params.out_len = params->psa_call_params.out_len;
+ *psa_ret = (uint32_t)tfm_rpc_psa_call(&spm_params, NS_CALLER_FLAG);
+ return MAILBOX_SUCCESS;
+ case MAILBOX_PSA_CLOSE:
+ spm_params.handle = params->psa_close_params.handle;
+ tfm_rpc_psa_close(&spm_params, NS_CALLER_FLAG);
+ return MAILBOX_SUCCESS;
+ default:
+ return MAILBOX_INVAL_PARAMS;
+ }
+}
+
+__STATIC_INLINE void set_spe_queue_empty_status(uint8_t idx)
+{
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ spe_mailbox_queue.empty_slots |= (1 << idx);
+ }
+}
+
+__STATIC_INLINE void clear_spe_queue_empty_status(uint8_t idx)
+{
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ spe_mailbox_queue.empty_slots &= ~(1 << idx);
+ }
+}
+
+__STATIC_INLINE bool get_spe_queue_empty_status(uint8_t idx)
+{
+ if ((idx < NUM_MAILBOX_QUEUE_SLOT) &&
+ (spe_mailbox_queue.empty_slots & (1 << idx))) {
+ return true;
+ }
+
+ return false;
+}
+
+__STATIC_INLINE mailbox_queue_status_t get_nspe_queue_pend_status(
+ const struct ns_mailbox_queue_t *ns_queue)
+{
+ return ns_queue->pend_slots;
+}
+
+__STATIC_INLINE void set_nspe_queue_replied_status(
+ struct ns_mailbox_queue_t *ns_queue,
+ mailbox_queue_status_t mask)
+{
+ ns_queue->replied_slots |= mask;
+}
+
+__STATIC_INLINE void clear_nspe_queue_pend_status(
+ struct ns_mailbox_queue_t *ns_queue,
+ mailbox_queue_status_t mask)
+{
+ ns_queue->pend_slots &= ~mask;
+}
+
+__STATIC_INLINE int32_t get_spe_mailbox_msg_handle(uint8_t idx,
+ mailbox_msg_handle_t *handle)
+{
+ if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ *handle = (mailbox_msg_handle_t)(idx + 1);
+
+ return MAILBOX_SUCCESS;
+}
+
+__STATIC_INLINE int32_t get_spe_mailbox_msg_idx(mailbox_msg_handle_t handle,
+ uint8_t *idx)
+{
+ if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ *idx = (uint8_t)(handle - 1);
+
+ return MAILBOX_SUCCESS;
+}
+
+static void mailbox_clean_queue_slot(uint8_t idx)
+{
+ if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
+ return;
+ }
+
+ tfm_core_util_memset(&spe_mailbox_queue.queue[idx], 0,
+ sizeof(spe_mailbox_queue.queue[idx]));
+ set_spe_queue_empty_status(idx);
+}
+
+__STATIC_INLINE struct mailbox_reply_t *get_nspe_reply_addr(uint8_t idx)
+{
+ uint8_t ns_slot_idx;
+
+ if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
+ return NULL;
+ }
+
+ ns_slot_idx = spe_mailbox_queue.queue[idx].ns_slot_idx;
+
+ return &spe_mailbox_queue.ns_queue->queue[ns_slot_idx].reply;
+}
+
+static void mailbox_direct_reply(uint8_t idx, uint32_t result)
+{
+ struct mailbox_reply_t *reply_ptr;
+ uint32_t ret_result = result;
+
+ /* Get reply address */
+ reply_ptr = get_nspe_reply_addr(idx);
+ tfm_core_util_memcpy(&reply_ptr->return_val, &ret_result,
+ sizeof(reply_ptr->return_val));
+
+ mailbox_clean_queue_slot(idx);
+
+ /*
+ * Skip NSPE queue status update after single reply.
+ * Update NSPE queue status after all the mailbox messages are completed
+ */
+}
+
+__STATIC_INLINE int32_t check_mailbox_msg(const struct mailbox_msg_t *msg)
+{
+ /*
+ * TODO
+ * Comprehensive check of mailbox msessage content can be implemented here.
+ */
+ (void)msg;
+ return MAILBOX_SUCCESS;
+}
+
+int32_t tfm_mailbox_handle_msg(void)
+{
+ uint8_t idx;
+ int32_t result;
+ int32_t psa_ret = PSA_ERROR_GENERIC_ERROR;
+ mailbox_queue_status_t mask_bits, pend_slots, reply_slots = 0;
+ struct ns_mailbox_queue_t *ns_queue = spe_mailbox_queue.ns_queue;
+ struct mailbox_msg_t *msg_ptr;
+
+ TFM_CORE_ASSERT(ns_queue != NULL);
+
+ tfm_mailbox_hal_enter_critical();
+
+ /* Check if NSPE mailbox did assert a PSA client call request */
+ if (!ns_queue->pend_slots) {
+ tfm_mailbox_hal_exit_critical();
+ return MAILBOX_NO_PEND_EVENT;
+ }
+
+ pend_slots = get_nspe_queue_pend_status(ns_queue);
+
+ tfm_mailbox_hal_exit_critical();
+
+ for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
+ mask_bits = (1 << idx);
+ /* Check if current NSPE mailbox queue slot is pending for handling */
+ if (!(pend_slots & mask_bits)) {
+ continue;
+ }
+
+ /*
+ * TODO
+ * The operations are simplified here. Use the SPE mailbox queue
+ * slot with the same idx as that of the NSPE mailbox queue slot.
+ * A more general implementation should dynamically search and
+ * select an empty SPE mailbox queue slot.
+ */
+ clear_spe_queue_empty_status(idx);
+ spe_mailbox_queue.queue[idx].ns_slot_idx = idx;
+
+ msg_ptr = &spe_mailbox_queue.queue[idx].msg;
+ tfm_core_util_memcpy(msg_ptr, &ns_queue->queue[idx].msg,
+ sizeof(*msg_ptr));
+
+ if (check_mailbox_msg(msg_ptr) != MAILBOX_SUCCESS) {
+ mailbox_clean_queue_slot(idx);
+ continue;
+ }
+
+ get_spe_mailbox_msg_handle(idx,
+ &spe_mailbox_queue.queue[idx].msg_handle);
+
+ /*
+ * Set the current slot index under processing.
+ * The value is used in mailbox_get_caller_data() to identify the
+ * mailbox queue slot.
+ */
+ spe_mailbox_queue.cur_proc_slot_idx = idx;
+
+ result = tfm_mailbox_dispatch(msg_ptr->call_type, &msg_ptr->params,
+ msg_ptr->client_id, &psa_ret);
+ if (result != MAILBOX_SUCCESS) {
+ mailbox_clean_queue_slot(idx);
+ continue;
+ }
+
+ /* Clean up the current slot index under processing */
+ spe_mailbox_queue.cur_proc_slot_idx = NUM_MAILBOX_QUEUE_SLOT;
+
+ if ((msg_ptr->call_type == MAILBOX_PSA_FRAMEWORK_VERSION) ||
+ (msg_ptr->call_type == MAILBOX_PSA_VERSION)) {
+ /*
+ * Directly write the result to NSPE for psa_framework_version() and
+ * psa_version().
+ */
+ reply_slots |= (1 << idx);
+
+ mailbox_direct_reply(idx, psa_ret);
+ } else if ((msg_ptr->call_type == MAILBOX_PSA_CONNECT) ||
+ (msg_ptr->call_type == MAILBOX_PSA_CALL)) {
+ /*
+ * If it failed to deliver psa_connect() or psa_call() request to
+ * TF-M IPC SPM, the failure result should be returned immediately.
+ */
+ if (psa_ret != PSA_SUCCESS) {
+ reply_slots |= (1 << idx);
+ mailbox_direct_reply(idx, psa_ret);
+ }
+ }
+ /*
+ * Skip checking psa_call() since it neither returns immediately nor
+ * has return value.
+ */
+ }
+
+ tfm_mailbox_hal_enter_critical();
+
+ /* Clean the NSPE mailbox pending status. */
+ clear_nspe_queue_pend_status(ns_queue, pend_slots);
+
+ /* Set the NSPE mailbox replied status */
+ set_nspe_queue_replied_status(ns_queue, reply_slots);
+
+ tfm_mailbox_hal_exit_critical();
+
+ if (reply_slots) {
+ tfm_mailbox_hal_notify_peer();
+ }
+
+ return MAILBOX_SUCCESS;
+}
+
+int32_t tfm_mailbox_reply_msg(mailbox_msg_handle_t handle, int32_t reply)
+{
+ uint8_t idx;
+ int32_t ret;
+ struct ns_mailbox_queue_t *ns_queue = spe_mailbox_queue.ns_queue;
+
+ TFM_CORE_ASSERT(ns_queue != NULL);
+
+ /*
+ * If handle == MAILBOX_MSG_NULL_HANDLE, reply to the mailbox message
+ * in the first slot.
+ * When multiple ongoing PSA client calls from NSPE are supported,
+ * additional check might be necessary to avoid spoofing the first slot.
+ */
+ if (handle == MAILBOX_MSG_NULL_HANDLE) {
+ idx = 0;
+ } else {
+ ret = get_spe_mailbox_msg_idx(handle, &idx);
+ if (ret != MAILBOX_SUCCESS) {
+ return ret;
+ }
+ }
+
+ if (get_spe_queue_empty_status(idx)) {
+ return MAILBOX_NO_PEND_EVENT;
+ }
+
+ mailbox_direct_reply(idx, (uint32_t)reply);
+
+ tfm_mailbox_hal_enter_critical();
+
+ /* Set the NSPE mailbox replied status */
+ set_nspe_queue_replied_status(ns_queue, (1 << idx));
+
+ tfm_mailbox_hal_exit_critical();
+
+ tfm_mailbox_hal_notify_peer();
+
+ return MAILBOX_SUCCESS;
+}
+
+/* RPC handle_req() callback */
+static void mailbox_handle_req(void)
+{
+ (void)tfm_mailbox_handle_msg();
+}
+
+/* RPC reply() callback */
+static void mailbox_reply(const void *owner, int32_t ret)
+{
+ mailbox_msg_handle_t handle = MAILBOX_MSG_NULL_HANDLE;
+
+ /* If the owner is specified */
+ if (owner) {
+ handle = *((mailbox_msg_handle_t *)owner);
+ }
+
+ (void)tfm_mailbox_reply_msg(handle, ret);
+}
+
+/* RPC get_caller_data() callback */
+static const void *mailbox_get_caller_data(int32_t client_id)
+{
+ uint8_t idx;
+
+ (void)client_id;
+
+ idx = spe_mailbox_queue.cur_proc_slot_idx;
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ return (const void *)&spe_mailbox_queue.queue[idx].msg_handle;
+ }
+
+ return NULL;
+}
+
+/* Mailbox specific operations callback for TF-M RPC */
+static const struct tfm_rpc_ops_t mailbox_rpc_ops = {
+ .handle_req = mailbox_handle_req,
+ .reply = mailbox_reply,
+ .get_caller_data = mailbox_get_caller_data,
+};
+
+int32_t tfm_mailbox_init(void)
+{
+ int32_t ret;
+
+ tfm_core_util_memset(&spe_mailbox_queue, 0, sizeof(spe_mailbox_queue));
+
+ spe_mailbox_queue.empty_slots =
+ (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
+ spe_mailbox_queue.empty_slots +=
+ (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
+
+ /* Register RPC callbacks */
+ ret = tfm_rpc_register_ops(&mailbox_rpc_ops);
+ if (ret != TFM_RPC_SUCCESS) {
+ return MAILBOX_CALLBACK_REG_ERROR;
+ }
+
+ /*
+ * Platform specific initialization.
+ * Initialize Inter-Processor Communication and achieve the base address of
+ * NSPE mailbox queue
+ */
+ ret = tfm_mailbox_hal_init(&spe_mailbox_queue);
+ if (ret != MAILBOX_SUCCESS) {
+ tfm_rpc_unregister_ops();
+
+ return ret;
+ }
+
+ return MAILBOX_SUCCESS;
+}
diff --git a/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc b/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc
new file mode 100644
index 0000000..3a99b2e
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/*********** WARNING: This is an auto-generated file. Do not edit! ***********/
+
+#ifndef __TFM_SPM_DB_IPC_INC__
+#define __TFM_SPM_DB_IPC_INC__
+
+#include "tfm/spm_api.h"
+#include "psa_manifest/sid.h"
+
+/**************************************************************************/
+/** IRQ count per partition */
+/**************************************************************************/
+#ifdef TFM_PARTITION_SECURE_STORAGE
+#define TFM_PARTITION_TFM_SP_STORAGE_IRQ_COUNT 0
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+#define TFM_PARTITION_TFM_SP_ITS_IRQ_COUNT 0
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+#define TFM_PARTITION_TFM_SP_AUDIT_LOG_IRQ_COUNT 0
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+#define TFM_PARTITION_TFM_SP_CRYPTO_IRQ_COUNT 0
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+#define TFM_PARTITION_TFM_SP_PLATFORM_IRQ_COUNT 0
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+#define TFM_PARTITION_TFM_SP_INITIAL_ATTESTATION_IRQ_COUNT 0
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+#define TFM_PARTITION_TFM_SP_CORE_TEST_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+#define TFM_PARTITION_TFM_SP_CORE_TEST_2_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+#define TFM_PARTITION_TFM_SP_SECURE_TEST_PARTITION_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+#define TFM_PARTITION_TFM_SP_IPC_SERVICE_TEST_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+#define TFM_PARTITION_TFM_SP_IPC_CLIENT_TEST_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+#define TFM_PARTITION_TFM_IRQ_TEST_1_IRQ_COUNT 1
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+#define TFM_PARTITION_TFM_SP_SST_TEST_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+#define TFM_PARTITION_TFM_SP_SECURE_CLIENT_2_IRQ_COUNT 0
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+#define TFM_PARTITION_TFM_SP_MULTI_CORE_TEST_IRQ_COUNT 0
+#endif /* TFM_MULTI_CORE_TEST */
+
+/**************************************************************************/
+/** Declarations of partition init functions */
+/**************************************************************************/
+extern void tfm_nspm_thread_entry(void);
+
+#ifdef TFM_PARTITION_SECURE_STORAGE
+extern void tfm_sst_req_mngr_init(void);
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+extern void tfm_its_req_mngr_init(void);
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+extern void audit_core_init(void);
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+extern void tfm_crypto_init(void);
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+extern void platform_sp_init(void);
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+extern void attest_partition_init(void);
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+extern void core_test_init(void);
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+extern void core_test_2_init(void);
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+extern void tfm_secure_client_service_init(void);
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+extern void ipc_service_test_main(void);
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+extern void ipc_client_test_main(void);
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+extern void tfm_irq_test_1_init(void);
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+extern void tfm_sst_test_init(void);
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+extern void tfm_secure_client_2_init(void);
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+extern void multi_core_test_main(void);
+#endif /* TFM_MULTI_CORE_TEST */
+
+/**************************************************************************/
+/** Memory region declarations */
+/**************************************************************************/
+REGION_DECLARE(Image$$, ARM_LIB_STACK, $$ZI$$Base);
+REGION_DECLARE(Image$$, ARM_LIB_STACK, $$ZI$$Limit);
+
+#ifdef TFM_PARTITION_SECURE_STORAGE
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_STORAGE_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_ITS_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_AUDIT_LOG_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CRYPTO_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_PLATFORM_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_INITIAL_ATTESTATION_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_CORE_TEST_2_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_TEST_PARTITION_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_SERVICE_TEST_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_IPC_CLIENT_TEST_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_IRQ_TEST_1_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SST_TEST_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_SECURE_CLIENT_2_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, $$Base);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, $$Limit);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, TFM_SP_MULTI_CORE_TEST_LINKER, _STACK$$ZI$$Limit);
+#endif /* TFM_MULTI_CORE_TEST */
+
+/**************************************************************************/
+/** Dependencies array for Secure Partition */
+/**************************************************************************/
+#ifdef TFM_PARTITION_SECURE_STORAGE
+static int32_t dependencies_TFM_SP_STORAGE[] =
+{
+ TFM_CRYPTO_SID,
+ TFM_ITS_SET_SID,
+ TFM_ITS_GET_SID,
+ TFM_ITS_GET_INFO_SID,
+ TFM_ITS_REMOVE_SID,
+ TFM_SP_PLATFORM_NV_COUNTER_SID,
+};
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_CRYPTO
+static int32_t dependencies_TFM_SP_CRYPTO[] =
+{
+ TFM_ITS_SET_SID,
+ TFM_ITS_GET_SID,
+ TFM_ITS_GET_INFO_SID,
+ TFM_ITS_REMOVE_SID,
+};
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+static int32_t dependencies_TFM_SP_INITIAL_ATTESTATION[] =
+{
+ TFM_CRYPTO_SID,
+};
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+static int32_t dependencies_TFM_SP_CORE_TEST[] =
+{
+ SPM_CORE_TEST_2_INVERT_SID,
+ SPM_CORE_TEST_2_GET_EVERY_SECOND_BYTE_SID,
+ SPM_CORE_TEST_2_SLAVE_SERVICE_SID,
+};
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+static int32_t dependencies_TFM_SP_SECURE_TEST_PARTITION[] =
+{
+ TFM_SECURE_CLIENT_2_SID,
+ TFM_CRYPTO_SID,
+ TFM_SST_SET_SID,
+ TFM_SST_GET_SID,
+ TFM_SST_GET_INFO_SID,
+ TFM_SST_REMOVE_SID,
+ TFM_SST_GET_SUPPORT_SID,
+ TFM_ITS_SET_SID,
+ TFM_ITS_GET_SID,
+ TFM_ITS_GET_INFO_SID,
+ TFM_ITS_REMOVE_SID,
+ TFM_ATTEST_GET_TOKEN_SID,
+ TFM_ATTEST_GET_TOKEN_SIZE_SID,
+ TFM_ATTEST_GET_PUBLIC_KEY_SID,
+ TFM_SST_TEST_PREPARE_SID,
+ TFM_SP_PLATFORM_SYSTEM_RESET_SID,
+ TFM_SP_PLATFORM_IOCTL_SID,
+};
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+static int32_t dependencies_TFM_SP_IPC_CLIENT_TEST[] =
+{
+ IPC_SERVICE_TEST_PSA_ACCESS_APP_READ_ONLY_MEM_SID,
+ IPC_SERVICE_TEST_PSA_ACCESS_APP_MEM_SID,
+ IPC_SERVICE_TEST_BASIC_SID,
+ IPC_SERVICE_TEST_APP_ACCESS_PSA_MEM_SID,
+};
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_SST
+static int32_t dependencies_TFM_SP_SST_TEST[] =
+{
+ TFM_CRYPTO_SID,
+ TFM_ITS_GET_SID,
+ TFM_ITS_REMOVE_SID,
+};
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+static int32_t dependencies_TFM_SP_SECURE_CLIENT_2[] =
+{
+ TFM_ITS_GET_SID,
+ TFM_CRYPTO_SID,
+};
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+/**************************************************************************/
+/** The static data of the partition list */
+/**************************************************************************/
+const struct spm_partition_static_data_t static_data_list[] =
+{
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_NON_SECURE_ID,
+#if TFM_MULTI_CORE_TOPOLOGY
+ .partition_flags = SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_IPC,
+#else
+ .partition_flags = SPM_PART_FLAG_APP_ROT | SPM_PART_FLAG_IPC,
+#endif
+ .partition_priority = TFM_PRIORITY_LOW,
+ .partition_init = tfm_nspm_thread_entry,
+ },
+
+#ifdef TFM_PARTITION_SECURE_STORAGE
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_STORAGE,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_sst_req_mngr_init,
+ .dependencies_num = 6,
+ .p_dependencies = dependencies_TFM_SP_STORAGE,
+ },
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_ITS,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_its_req_mngr_init,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_AUDIT_LOG,
+ .partition_flags = 0
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = audit_core_init,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_CRYPTO,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_crypto_init,
+ .dependencies_num = 4,
+ .p_dependencies = dependencies_TFM_SP_CRYPTO,
+ },
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_PLATFORM,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = platform_sp_init,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_INITIAL_ATTESTATION,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = attest_partition_init,
+ .dependencies_num = 1,
+ .p_dependencies = dependencies_TFM_SP_INITIAL_ATTESTATION,
+ },
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_CORE_TEST,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = core_test_init,
+ .dependencies_num = 3,
+ .p_dependencies = dependencies_TFM_SP_CORE_TEST,
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_CORE_TEST_2,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = core_test_2_init,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_SECURE_TEST_PARTITION,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_secure_client_service_init,
+ .dependencies_num = 17,
+ .p_dependencies = dependencies_TFM_SP_SECURE_TEST_PARTITION,
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_IPC_SERVICE_TEST,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(HIGH),
+ .partition_init = ipc_service_test_main,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_IPC_CLIENT_TEST,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = ipc_client_test_main,
+ .dependencies_num = 4,
+ .p_dependencies = dependencies_TFM_SP_IPC_CLIENT_TEST,
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_IRQ_TEST_1,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_irq_test_1_init,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_SST_TEST,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_sst_test_init,
+ .dependencies_num = 3,
+ .p_dependencies = dependencies_TFM_SP_SST_TEST,
+ },
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_SECURE_CLIENT_2,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = tfm_secure_client_2_init,
+ .dependencies_num = 2,
+ .p_dependencies = dependencies_TFM_SP_SECURE_CLIENT_2,
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_MULTI_CORE_TEST,
+ .partition_flags = SPM_PART_FLAG_IPC
+ | SPM_PART_FLAG_APP_ROT
+ ,
+ .partition_priority = TFM_PRIORITY(NORMAL),
+ .partition_init = multi_core_test_main,
+ .dependencies_num = 0,
+ .p_dependencies = NULL,
+ },
+#endif /* TFM_MULTI_CORE_TEST */
+
+};
+
+/**************************************************************************/
+/** The platform data of the partition list */
+/**************************************************************************/
+#ifdef TFM_PARTITION_AUDIT_LOG
+const struct tfm_spm_partition_platform_data_t *
+ platform_data_list_TFM_SP_AUDIT_LOG[] =
+{
+#ifdef AUDIT_UART_REDIRECTION
+ TFM_PERIPHERAL_UART1,
+#endif /* AUDIT_UART_REDIRECTION */
+ NULL
+};
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_TEST_CORE
+const struct tfm_spm_partition_platform_data_t *
+ platform_data_list_TFM_SP_CORE_TEST[] =
+{
+ TFM_PERIPHERAL_FPGA_IO,
+ NULL
+};
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+const struct tfm_spm_partition_platform_data_t *
+ platform_data_list_TFM_SP_SECURE_TEST_PARTITION[] =
+{
+ TFM_PERIPHERAL_STD_UART,
+ NULL
+};
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+const struct tfm_spm_partition_platform_data_t *
+ platform_data_list_TFM_IRQ_TEST_1[] =
+{
+ TFM_PERIPHERAL_TIMER0,
+ NULL
+};
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+const struct tfm_spm_partition_platform_data_t **platform_data_list_list[] =
+{
+ NULL,
+
+#ifdef TFM_PARTITION_SECURE_STORAGE
+ NULL,
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+ NULL,
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+ platform_data_list_TFM_SP_AUDIT_LOG,
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+ NULL,
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+ NULL,
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+ NULL,
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ platform_data_list_TFM_SP_CORE_TEST,
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ NULL,
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ platform_data_list_TFM_SP_SECURE_TEST_PARTITION,
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ NULL,
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ NULL,
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+ platform_data_list_TFM_IRQ_TEST_1,
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+ NULL,
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ NULL,
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+ NULL,
+#endif /* TFM_MULTI_CORE_TEST */
+
+};
+
+/**************************************************************************/
+/** The memory data of the partition list */
+/**************************************************************************/
+const struct tfm_spm_partition_memory_data_t memory_data_list[] =
+{
+ {
+ .stack_bottom = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Limit),
+ .rw_start = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Base),
+ },
+#ifdef TFM_PARTITION_SECURE_STORAGE
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_STORAGE_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_ITS_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_ITS_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_ITS_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_ITS_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_ITS_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+#ifdef TFM_PARTITION_AUDIT_LOG
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_AUDIT_LOG_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+#ifdef TFM_PARTITION_CRYPTO
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_CRYPTO_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_CRYPTO */
+
+#ifdef TFM_PARTITION_PLATFORM
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_PLATFORM_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_PLATFORM */
+
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_INITIAL_ATTESTATION_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_CORE_TEST_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_CORE_TEST_2_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_SECURE_TEST_PARTITION_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_IPC_SERVICE_TEST_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_IPC_CLIENT_TEST_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+#ifdef TFM_ENABLE_IRQ_TEST
+ {
+ .code_start = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_IRQ_TEST_1_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+#ifdef TFM_PARTITION_TEST_SST
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_SST_TEST_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_SST */
+
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_SECURE_CLIENT_2_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+#ifdef TFM_MULTI_CORE_TEST
+ {
+ .code_start = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(TFM_SP_MULTI_CORE_TEST_LINKER, _STACK$$ZI$$Limit),
+ },
+#endif /* TFM_MULTI_CORE_TEST */
+
+};
+
+/**************************************************************************/
+/** The partition list for the DB */
+/**************************************************************************/
+static struct spm_partition_desc_t partition_list [] =
+{
+ {{0}}, /* placeholder for Non-secure internal partition */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_STORAGE */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_SECURE_STORAGE
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_SECURE_STORAGE */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_ITS */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_INTERNAL_TRUSTED_STORAGE
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_INTERNAL_TRUSTED_STORAGE */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_AUDIT_LOG */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_AUDIT_LOG
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_AUDIT_LOG */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_CRYPTO */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_CRYPTO
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_CRYPTO */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_PLATFORM */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_PLATFORM
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_PLATFORM */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_INITIAL_ATTESTATION */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_INITIAL_ATTESTATION
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_INITIAL_ATTESTATION */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_CORE_TEST */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_CORE_TEST_2 */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_CORE
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_SECURE_TEST_PARTITION */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_IPC_SERVICE_TEST */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_IPC_CLIENT_TEST */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_CORE_IPC
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_CORE_IPC */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_IRQ_TEST_1 */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_ENABLE_IRQ_TEST
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_ENABLE_IRQ_TEST */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_SST_TEST */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_SST
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_SST */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_SECURE_CLIENT_2 */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_PARTITION_TEST_SECURE_SERVICES
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_PARTITION_TEST_SECURE_SERVICES */
+
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for TFM_SP_MULTI_CORE_TEST */
+ /* -----------------------------------------------------------------------*/
+#ifdef TFM_MULTI_CORE_TEST
+ {
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ },
+#endif /* TFM_MULTI_CORE_TEST */
+
+};
+
+struct spm_partition_db_t g_spm_partition_db = {
+ .is_init = 0,
+ .partition_count = sizeof(partition_list) / sizeof(partition_list[0]),
+ .partitions = partition_list,
+};
+
+#endif /* __TFM_SPM_DB_IPC_INC__ */
\ No newline at end of file
diff --git a/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc.template b/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc.template
new file mode 100644
index 0000000..4fcd0fb
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_spm_db_ipc.inc.template
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+{{utilities.donotedit_warning}}
+
+#ifndef __TFM_SPM_DB_IPC_INC__
+#define __TFM_SPM_DB_IPC_INC__
+
+#include "tfm/spm_api.h"
+#include "psa_manifest/sid.h"
+
+{# Produce a build error if heap_size is presented in the manifest, because of the dynamic memory allocation is not supported now. #}
+{% for manifest in manifests %}
+ {% if manifest.manifest.heap_size %}
+#error "Please do not add 'heap_size' for partition '{{manifest.manifest.name}}', the dynamic memory allocation is not supported now!"
+ {% endif %}
+{% endfor %}
+/**************************************************************************/
+/** IRQ count per partition */
+/**************************************************************************/
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {% if manifest.manifest.irqs %}
+#define TFM_PARTITION_{{manifest.manifest.name}}_IRQ_COUNT {{manifest.manifest.irqs | length() }}
+ {% else %}
+#define TFM_PARTITION_{{manifest.manifest.name}}_IRQ_COUNT 0
+ {% endif %}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+/**************************************************************************/
+/** Declarations of partition init functions */
+/**************************************************************************/
+extern void tfm_nspm_thread_entry(void);
+
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+extern void {{manifest.manifest.entry_point}}(void);
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+/**************************************************************************/
+/** Memory region declarations */
+/**************************************************************************/
+REGION_DECLARE(Image$$, ARM_LIB_STACK, $$ZI$$Base);
+REGION_DECLARE(Image$$, ARM_LIB_STACK, $$ZI$$Limit);
+
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, $$Base);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, $$Limit);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, $$RO$$Base);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, $$RO$$Limit);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _DATA$$RW$$Base);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _DATA$$RW$$Limit);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _DATA$$ZI$$Base);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _DATA$$ZI$$Limit);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _STACK$$ZI$$Base);
+REGION_DECLARE(Image$$, {{manifest.manifest.name}}_LINKER, _STACK$$ZI$$Limit);
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+/**************************************************************************/
+/** Dependencies array for Secure Partition */
+/**************************************************************************/
+{% for manifest in manifests %}
+ {% if manifest.manifest.dependencies %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+static int32_t dependencies_{{manifest.manifest.name}}[] =
+{
+ {% for dependence in manifest.manifest.dependencies %}
+ {% for service in manifest.manifest.services %}
+ {% if dependence == service.name %}
+#error "Please DO NOT include SP's own RoT Service '{{dependence}}', which will cause a deadlock!"
+ {% endif %}
+ {% endfor %}
+ {{dependence}}_SID,
+ {% endfor %}
+};
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+ {% endif %}
+{% endfor %}
+/**************************************************************************/
+/** The static data of the partition list */
+/**************************************************************************/
+const struct spm_partition_static_data_t static_data_list[] =
+{
+ {
+ .psa_framework_version = 0x0100,
+ .partition_id = TFM_SP_NON_SECURE_ID,
+#if TFM_MULTI_CORE_TOPOLOGY
+ .partition_flags = SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_IPC,
+#else
+ .partition_flags = SPM_PART_FLAG_APP_ROT | SPM_PART_FLAG_IPC,
+#endif
+ .partition_priority = TFM_PRIORITY_LOW,
+ .partition_init = tfm_nspm_thread_entry,
+ },
+
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {{'{'}}
+ {% if manifest.manifest.psa_framework_version == 1.0 %}
+ .psa_framework_version = 0x0100,
+ {% else %}
+ .psa_framework_version = 0,
+ {% endif %}
+ .partition_id = {{manifest.manifest.name}},
+ {% if manifest.attr.tfm_partition_ipc %}
+ .partition_flags = SPM_PART_FLAG_IPC
+ {% else %}
+ .partition_flags = 0
+ {% endif %}
+ {% if manifest.manifest.type == "APPLICATION-ROT" %}
+ | SPM_PART_FLAG_APP_ROT
+ {% elif manifest.manifest.type == "PSA-ROT" %}
+ | SPM_PART_FLAG_PSA_ROT | SPM_PART_FLAG_APP_ROT
+ {% else %}
+#error "Unsupported type '{{manifest.manifest.type}}' for partition '{{manifest.manifest.name}}'!"
+ {% endif %}
+ ,
+ .partition_priority = TFM_PRIORITY({{manifest.manifest.priority}}),
+ .partition_init = {{manifest.manifest.entry_point}},
+ .dependencies_num = {{manifest.manifest.dependencies | length()}},
+ {% if manifest.manifest.dependencies %}
+ .p_dependencies = dependencies_{{manifest.manifest.name}},
+ {% else %}
+ .p_dependencies = NULL,
+ {% endif %}
+ {{'},'}}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+};
+
+/**************************************************************************/
+/** The platform data of the partition list */
+/**************************************************************************/
+{% for manifest in manifests %}
+ {% if manifest.manifest.mmio_regions %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+const struct tfm_spm_partition_platform_data_t *
+ platform_data_list_{{manifest.manifest.name}}[] =
+{
+ {% for region in manifest.manifest.mmio_regions %}
+ {% if region.conditional %}
+#ifdef {{region.conditional}}
+ {% endif %}
+ {{region.name}},
+ {% if region.conditional %}
+#endif /* {{region.conditional}} */
+ {% endif %}
+ {% endfor %}
+ NULL
+};
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+ {% endif %}
+{% endfor %}
+const struct tfm_spm_partition_platform_data_t **platform_data_list_list[] =
+{
+ NULL,
+
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {% if manifest.manifest.mmio_regions %}
+ platform_data_list_{{manifest.manifest.name}},
+ {% else %}{# if manifest.manifest.mmio_regions #}
+ NULL,
+ {% endif %}{# if manifest.manifest.mmio_regions #}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+};
+
+/**************************************************************************/
+/** The memory data of the partition list */
+/**************************************************************************/
+const struct tfm_spm_partition_memory_data_t memory_data_list[] =
+{
+ {
+ .stack_bottom = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Base),
+ .stack_top = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Limit),
+ .rw_start = PART_REGION_ADDR(ARM_LIB_STACK, $$ZI$$Base),
+ },
+{% for manifest in manifests %}
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {{'{'}}
+ .code_start = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, $$Base),
+ .code_limit = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, $$Limit),
+ .ro_start = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, $$RO$$Base),
+ .ro_limit = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, $$RO$$Limit),
+ .rw_start = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _DATA$$RW$$Base),
+ .rw_limit = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _DATA$$RW$$Limit),
+ .zi_start = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _DATA$$ZI$$Base),
+ .zi_limit = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _DATA$$ZI$$Limit),
+ .stack_bottom = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _STACK$$ZI$$Base),
+ .stack_top = PART_REGION_ADDR({{manifest.manifest.name}}_LINKER, _STACK$$ZI$$Limit),
+ {{'},'}}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+};
+
+/**************************************************************************/
+/** The partition list for the DB */
+/**************************************************************************/
+static struct spm_partition_desc_t partition_list [] =
+{
+ {{'{{0}}'}}, /* placeholder for Non-secure internal partition */
+
+{% for manifest in manifests %}
+ /* -----------------------------------------------------------------------*/
+ /* - Partition DB record for {{manifest.manifest.name}} */
+ /* -----------------------------------------------------------------------*/
+ {% if manifest.attr.conditional %}
+#ifdef {{manifest.attr.conditional}}
+ {% endif %}
+ {{'{'}}
+ /* Runtime data */
+ .runtime_data = {0},
+ .static_data = NULL,
+ .platform_data_list = NULL,
+ {{'},'}}
+ {% if manifest.attr.conditional %}
+#endif /* {{manifest.attr.conditional}} */
+ {% endif %}
+
+{% endfor %}
+};
+
+struct spm_partition_db_t g_spm_partition_db = {
+ .is_init = 0,
+ .partition_count = sizeof(partition_list) / sizeof(partition_list[0]),
+ .partitions = partition_list,
+};
+
+#endif /* __TFM_SPM_DB_IPC_INC__ */
\ No newline at end of file
diff --git a/secure_fw/spm/model_ipc/tfm_thread.c b/secure_fw/spm/model_ipc/tfm_thread.c
new file mode 100644
index 0000000..1d66e25
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_thread.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#include <inttypes.h>
+#include "tfm_arch.h"
+#include "tfm_thread.h"
+#include "tfm_utils.h"
+#include "tfm_memory_utils.h"
+#include "tfm/tfm_core_svc.h"
+#include "tfm/spm_api.h"
+#include "tfm_core_utils.h"
+
+/* Force ZERO in case ZI(bss) clear is missing */
+static struct tfm_core_thread_t *p_thrd_head = NULL;
+static struct tfm_core_thread_t *p_runn_head = NULL;
+static struct tfm_core_thread_t *p_curr_thrd = NULL;
+
+/* Define Macro to fetch global to support future expansion (PERCPU e.g.) */
+#define LIST_HEAD p_thrd_head
+#define RUNN_HEAD p_runn_head
+#define CURR_THRD p_curr_thrd
+
+static struct tfm_core_thread_t *find_next_running_thread(
+ struct tfm_core_thread_t *pth)
+{
+ while (pth && pth->state != THRD_STATE_RUNNING) {
+ pth = pth->next;
+ }
+
+ return pth;
+}
+
+/* To get next running thread for scheduler */
+struct tfm_core_thread_t *tfm_core_thrd_get_next_thread(void)
+{
+ /*
+ * First RUNNING thread has highest priority since threads are sorted with
+ * priority.
+ */
+ return find_next_running_thread(RUNN_HEAD);
+}
+
+/* To get current thread for caller */
+struct tfm_core_thread_t *tfm_core_thrd_get_curr_thread(void)
+{
+ return CURR_THRD;
+}
+
+/* Insert a new thread into list by descending priority (Highest at head) */
+static void insert_by_prior(struct tfm_core_thread_t **head,
+ struct tfm_core_thread_t *node)
+{
+ if (*head == NULL || (node->prior <= (*head)->prior)) {
+ node->next = *head;
+ *head = node;
+ } else {
+ struct tfm_core_thread_t *iter = *head;
+
+ while (iter->next && (node->prior > iter->next->prior)) {
+ iter = iter->next;
+ }
+ node->next = iter->next;
+ iter->next = node;
+ }
+}
+
+/*
+ * Set first running thread as head to reduce enumerate
+ * depth while searching for a first running thread.
+ */
+static void update_running_head(struct tfm_core_thread_t **runn,
+ struct tfm_core_thread_t *node)
+{
+ if ((node->state == THRD_STATE_RUNNING) &&
+ (*runn == NULL || (node->prior < (*runn)->prior))) {
+ *runn = node;
+ } else {
+ *runn = LIST_HEAD;
+ }
+}
+
+/* Set context members only. No validation here */
+void tfm_core_thrd_init(struct tfm_core_thread_t *pth,
+ tfm_core_thrd_entry_t pfn, void *param,
+ uintptr_t stk_top, uintptr_t stk_btm)
+{
+ pth->prior = THRD_PRIOR_MEDIUM;
+ pth->state = THRD_STATE_CREATING;
+ pth->pfn = pfn;
+ pth->param = param;
+ pth->stk_btm = stk_btm;
+ pth->stk_top = stk_top;
+}
+
+uint32_t tfm_core_thrd_start(struct tfm_core_thread_t *pth)
+{
+ /* Validate parameters before really start */
+ if ((pth->state != THRD_STATE_CREATING) ||
+ (pth->pfn == NULL) ||
+ (pth->stk_btm == 0) ||
+ (pth->stk_top == 0)) {
+ return THRD_ERR_INVALID_PARAM;
+ }
+
+ /* Thread management runs in handler mode; set context for thread mode. */
+ tfm_arch_init_context(&pth->arch_ctx, pth->param, (uintptr_t)pth->pfn,
+ pth->stk_btm, pth->stk_top);
+
+ /* Insert a new thread with priority */
+ insert_by_prior(&LIST_HEAD, pth);
+
+ /* Mark it as RUNNING after insertion */
+ tfm_core_thrd_set_state(pth, THRD_STATE_RUNNING);
+
+ return THRD_SUCCESS;
+}
+
+void tfm_core_thrd_set_state(struct tfm_core_thread_t *pth, uint32_t new_state)
+{
+ TFM_CORE_ASSERT(pth != NULL && new_state < THRD_STATE_INVALID);
+
+ pth->state = new_state;
+ update_running_head(&RUNN_HEAD, pth);
+}
+
+/* Scheduling won't happen immediately but after the exception returns */
+void tfm_core_thrd_activate_schedule(void)
+{
+ tfm_arch_trigger_pendsv();
+}
+
+void tfm_core_thrd_start_scheduler(struct tfm_core_thread_t *pth)
+{
+ /*
+ * There is no selected thread before scheduler start, assign the caller
+ * provided thread as the current thread. Update the hardware PSP/PSPLIM
+ * with the value in thread context to ensure they are identical.
+ * This function can be called only ONCE; further calling triggers assert.
+ */
+ TFM_CORE_ASSERT(CURR_THRD == NULL);
+ TFM_CORE_ASSERT(pth != NULL);
+ TFM_CORE_ASSERT(pth->arch_ctx.sp != 0);
+
+ tfm_arch_update_ctx(&pth->arch_ctx);
+
+ CURR_THRD = pth;
+
+ tfm_core_thrd_activate_schedule();
+}
+
+void tfm_core_thrd_switch_context(struct tfm_arch_ctx_t *p_actx,
+ struct tfm_core_thread_t *prev,
+ struct tfm_core_thread_t *next)
+{
+ TFM_CORE_ASSERT(prev != NULL);
+ TFM_CORE_ASSERT(next != NULL);
+
+ /*
+ * First, update latest context into the current thread context.
+ * Then, update background context with next thread's context.
+ */
+ tfm_core_util_memcpy(&prev->arch_ctx, p_actx, sizeof(*p_actx));
+ tfm_core_util_memcpy(p_actx, &next->arch_ctx, sizeof(next->arch_ctx));
+
+ /* Update current thread indicator */
+ CURR_THRD = next;
+}
diff --git a/secure_fw/spm/model_ipc/tfm_wait.c b/secure_fw/spm/model_ipc/tfm_wait.c
new file mode 100644
index 0000000..efb7be6
--- /dev/null
+++ b/secure_fw/spm/model_ipc/tfm_wait.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#include "tfm_thread.h"
+#include "tfm_utils.h"
+#include "tfm_wait.h"
+
+void tfm_event_wait(struct tfm_event_t *pevnt)
+{
+ TFM_CORE_ASSERT(pevnt && pevnt->magic == TFM_EVENT_MAGIC);
+
+ pevnt->owner = tfm_core_thrd_get_curr_thread();
+ tfm_core_thrd_set_state(pevnt->owner, THRD_STATE_BLOCK);
+ tfm_core_thrd_activate_schedule();
+}
+
+void tfm_event_wake(struct tfm_event_t *pevnt, uint32_t retval)
+{
+ TFM_CORE_ASSERT(pevnt && pevnt->magic == TFM_EVENT_MAGIC);
+
+ if (pevnt->owner && pevnt->owner->state == THRD_STATE_BLOCK) {
+ tfm_core_thrd_set_state(pevnt->owner, THRD_STATE_RUNNING);
+ tfm_core_thrd_set_retval(pevnt->owner, retval);
+ pevnt->owner = NULL;
+ tfm_core_thrd_activate_schedule();
+ }
+}