aboutsummaryrefslogtreecommitdiff
path: root/interface
diff options
context:
space:
mode:
authorDavid Hu <david.hu@arm.com>2020-11-26 16:47:46 +0800
committerDavid Hu <david.hu@arm.com>2021-02-05 03:32:50 +0000
commitd597345eb4915d116e1344a8b963363b75f78e17 (patch)
tree6219ec6255f6c42933fd3820850b56474bad72b7 /interface
parent62f05aabbd98c18b490a2381f72425b01c8470d0 (diff)
downloadtrusted-firmware-m-d597345eb4915d116e1344a8b963363b75f78e17.tar.gz
Dualcpu: Add a new NS mailbox working model with a dedicated thread
Support a new NS mailbox working model. When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is selected, NS OS should allocate a dedicated NS mailbox thread to receive from requests from application threads and send mailbox messages to SPE. The new working model consists of the following features: - Define a request structure to collect paramters of application threads. - Pass request from application thread to the NS mailbox thread via RTOS message queue. - Assign application thread specific woken flag to enable threads to check woken status without SVC. - Remove the semaphores. When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is disabled, the original NS mailbox working model will be selected. Also extract common parts from NS mailbox models. Change-Id: I8f2601c21ad112b10315748b13e5b09cd1f58b29 Signed-off-by: David Hu <david.hu@arm.com>
Diffstat (limited to 'interface')
-rw-r--r--interface/include/multi_core/tfm_mailbox.h26
-rw-r--r--interface/include/multi_core/tfm_ns_mailbox.h131
-rw-r--r--interface/src/multi_core/tfm_ns_mailbox.c143
-rw-r--r--interface/src/multi_core/tfm_ns_mailbox_rtos_api.c53
-rw-r--r--interface/src/multi_core/tfm_ns_mailbox_test.c78
-rw-r--r--interface/src/multi_core/tfm_ns_mailbox_thread.c353
6 files changed, 650 insertions, 134 deletions
diff --git a/interface/include/multi_core/tfm_mailbox.h b/interface/include/multi_core/tfm_mailbox.h
index 5824543bc..74527a23b 100644
--- a/interface/include/multi_core/tfm_mailbox.h
+++ b/interface/include/multi_core/tfm_mailbox.h
@@ -93,20 +93,28 @@ struct mailbox_msg_t {
* to hold the PSA client call return result from SPE
*/
struct mailbox_reply_t {
- int32_t return_val;
+ int32_t return_val;
+ const void *owner; /* Handle of owner task. */
+ int32_t *reply; /* Address of reply value belonging
+ * to owner task.
+ */
+#ifdef TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD
+ uint8_t *woken_flag; /* Indicate that owner task has been
+ * or should be woken up, after the
+ * reply is received.
+ */
+#else
+ bool is_woken; /* Indicate that owner task has been
+ * or should be woken up, after the
+ * reply is received.
+ */
+#endif
};
/* A single slot structure in NSPE mailbox queue */
struct ns_mailbox_slot_t {
struct mailbox_msg_t msg;
struct mailbox_reply_t reply;
- const void *owner; /* Handle of the owner task of this
- * slot
- */
- bool is_woken; /* Indicate that owner task has been
- * or should be woken up, after the
- * replied is received.
- */
};
typedef uint32_t mailbox_queue_status_t;
@@ -135,6 +143,8 @@ struct ns_mailbox_queue_t {
* NS thread requests a mailbox
* queue slot.
*/
+
+ bool is_full; /* Queue if full */
#endif
};
diff --git a/interface/include/multi_core/tfm_ns_mailbox.h b/interface/include/multi_core/tfm_ns_mailbox.h
index 69d0b2999..7ba15bfcf 100644
--- a/interface/include/multi_core/tfm_ns_mailbox.h
+++ b/interface/include/multi_core/tfm_ns_mailbox.h
@@ -12,6 +12,8 @@
#include <stdbool.h>
#include <stdint.h>
+
+#include "cmsis_compiler.h"
#include "tfm_mailbox.h"
#ifdef __cplusplus
@@ -88,6 +90,20 @@ static inline int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
}
#endif
+#ifdef TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD
+/**
+ * \brief Handling PSA client calls in a dedicated NS mailbox thread.
+ * This function constructs NS mailbox messages, transmits them to SPE
+ * mailbox and returns the results to NS PSA client.
+ *
+ * \param[in] args The pointer to the structure of PSA client call
+ * parameters.
+ */
+void tfm_ns_mailbox_thread_runner(void *args);
+#else /* TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD */
+#define tfm_ns_mailbox_thread_runner(args) do {} while (0)
+#endif /* TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD */
+
/**
* \brief Platform specific NSPE mailbox initialization.
* Invoked by \ref tfm_ns_mailbox_init().
@@ -202,6 +218,47 @@ void tfm_ns_mailbox_os_wait_reply(void);
* \param[in] task_handle The handle to the task to be woken up.
*/
void tfm_ns_mailbox_os_wake_task_isr(const void *task_handle);
+
+/**
+ * \brief Create and initialize a message queue
+ *
+ * \param[in] msg_size The maximum message size in bytes
+ * \param[in] msg_count The maximum number of messages in queue
+ *
+ * \return Returns handle of the message queue created, or NULL in case of error
+ */
+void *tfm_ns_mailbox_os_mq_create(size_t msg_size, uint8_t msg_count);
+
+/**
+ * \brief Send a request via message queue
+ *
+ * \param[in] mq_handle The handle of message queue
+ * \param[in] msg_ptr The pointer to the message to be sent
+ *
+ * \note The message size must be the same as the value set in
+ * \ref tfm_ns_mailbox_os_mq_create.
+ *
+ * \return \ref MAILBOX_SUCCESS if the message is successfully sent, or
+ * other return code in case of error
+ */
+int32_t tfm_ns_mailbox_os_mq_send(void *mq_handle, const void *msg_ptr);
+
+/**
+ * \brief Receive a request from message queue
+ *
+ * \param[in] mq_handle The handle of message queue
+ * \param[in] msg_ptr The pointer to buffer for message to be received
+ *
+ * \return \ref MAILBOX_SUCCESS if the message is successfully received, or
+ * other return code in case of error
+ *
+ * \note The message size is the same as the value set in
+ * \ref tfm_ns_mailbox_os_mq_create.
+ *
+ * \note The function should be blocked until a message is received from message
+ * queue, unless a fatal error occurs.
+ */
+int32_t tfm_ns_mailbox_os_mq_receive(void *mq_handle, void *msg_ptr);
#else /* TFM_MULTI_CORE_NS_OS */
#define tfm_ns_mailbox_os_wait_reply() do {} while (0)
@@ -233,8 +290,28 @@ static inline const void *tfm_ns_mailbox_os_get_task_handle(void)
* \brief Initialize the statistics module in TF-M NSPE mailbox.
*
* \note This function is only available when multi-core tests are enabled.
+ *
+ * \param[in] ns_queue The NSPE mailbox queue to be tracked.
+ */
+void tfm_ns_mailbox_tx_stats_init(struct ns_mailbox_queue_t *ns_queue);
+
+/**
+ * \brief Re-initialize the statistics module in TF-M NSPE mailbox.
+ * Clean up statistics data.
+ *
+ * \note This function is only available when multi-core tests are enabled.
+ *
+ * \return \ref MAILBOX_SUCCESS if the operation succeeded, or other return code
+ in case of error
+ */
+int32_t tfm_ns_mailbox_tx_stats_reinit(void);
+
+/**
+ * \brief Update the statistics result of NSPE mailbox message transmission.
+ *
+ * \note This function is only available when multi-core tests are enabled.
*/
-void tfm_ns_mailbox_tx_stats_init(void);
+void tfm_ns_mailbox_tx_stats_update(void);
/**
* \brief Calculate the average number of used NS mailbox queue slots each time
@@ -251,6 +328,58 @@ void tfm_ns_mailbox_tx_stats_init(void);
void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res);
#endif
+#ifdef TFM_MULTI_CORE_NS_OS
+/*
+ * When NSPE mailbox only covers a single non-secure core, spinlock only
+ * requires to disable IRQ.
+ */
+static inline void ns_mailbox_spin_lock(void)
+{
+ __disable_irq();
+}
+
+/*
+ * It is assumed that IRQ is always enabled when spinlock is acquired.
+ * Otherwise, the waiting thread won't be woken up.
+ */
+static inline void ns_mailbox_spin_unlock(void)
+{
+ __enable_irq();
+}
+#else /* TFM_MULTI_CORE_NS_OS */
+/*
+ * Local spinlock is implemented as a dummy one when integrating with NS bare
+ * metal environment since interrupt is not required in NS mailbox.
+ */
+#define ns_mailbox_spin_lock() do {} while (0)
+
+#define ns_mailbox_spin_unlock() do {} while (0)
+#endif /* TFM_MULTI_CORE_NS_OS */
+
+/* The following inline functions configure non-secure mailbox queue status */
+static inline void clear_queue_slot_empty(struct ns_mailbox_queue_t *queue_ptr,
+ uint8_t idx)
+{
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ queue_ptr->empty_slots &= ~(1UL << idx);
+ }
+}
+
+static inline void set_queue_slot_pend(struct ns_mailbox_queue_t *queue_ptr,
+ uint8_t idx)
+{
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ queue_ptr->pend_slots |= (1UL << idx);
+ }
+}
+
+static inline void clear_queue_slot_all_replied(
+ struct ns_mailbox_queue_t *queue_ptr,
+ mailbox_queue_status_t status)
+{
+ queue_ptr->replied_slots &= ~status;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/interface/src/multi_core/tfm_ns_mailbox.c b/interface/src/multi_core/tfm_ns_mailbox.c
index 90836ff5e..922e5fed9 100644
--- a/interface/src/multi_core/tfm_ns_mailbox.c
+++ b/interface/src/multi_core/tfm_ns_mailbox.c
@@ -7,7 +7,6 @@
#include <string.h>
-#include "cmsis_compiler.h"
#include "tfm_ns_mailbox.h"
/* The pointer to NSPE mailbox queue */
@@ -15,98 +14,51 @@ static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
static int32_t mailbox_wait_reply(uint8_t idx);
-static inline void clear_queue_slot_empty(uint8_t idx)
-{
- if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->empty_slots &= ~(1 << idx);
- }
-}
-
static inline void set_queue_slot_empty(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->empty_slots |= (1 << idx);
- }
-}
-
-static inline void set_queue_slot_pend(uint8_t idx)
-{
- if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->pend_slots |= (1 << idx);
+ mailbox_queue_ptr->empty_slots |= (1UL << idx);
}
}
-static inline void clear_queue_slot_replied(uint8_t idx)
+static inline void set_queue_slot_woken(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->replied_slots &= ~(1 << idx);
+ mailbox_queue_ptr->queue[idx].reply.is_woken = true;
}
}
-static inline void clear_queue_slot_all_replied(mailbox_queue_status_t status)
-{
- mailbox_queue_ptr->replied_slots &= ~status;
-}
-
-static inline bool is_queue_slot_replied(uint8_t idx)
+static inline bool is_queue_slot_woken(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- return mailbox_queue_ptr->replied_slots & (1UL << idx);
+ return mailbox_queue_ptr->queue[idx].reply.is_woken;
}
return false;
}
-static inline void set_queue_slot_woken(uint8_t idx)
+static inline void clear_queue_slot_woken(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->queue[idx].is_woken = true;
+ mailbox_queue_ptr->queue[idx].reply.is_woken = false;
}
}
-static inline bool is_queue_slot_woken(uint8_t idx)
+static inline void clear_queue_slot_replied(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- return mailbox_queue_ptr->queue[idx].is_woken;
+ mailbox_queue_ptr->replied_slots &= ~(1UL << idx);
}
-
- return false;
}
-static inline void clear_queue_slot_woken(uint8_t idx)
+static inline bool is_queue_slot_replied(uint8_t idx)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->queue[idx].is_woken = false;
+ return mailbox_queue_ptr->replied_slots & (1UL << idx);
}
-}
-#ifdef TFM_MULTI_CORE_NS_OS
-/*
- * When NSPE mailbox only covers a single non-secure core, spinlock only
- * requires to disable IRQ.
- */
-static inline void ns_mailbox_spin_lock(void)
-{
- __disable_irq();
-}
-
-/*
- * It is assumed that IRQ is always enabled when spinlock is acquired.
- * Otherwise, the waiting thread won't be woken up.
- */
-static inline void ns_mailbox_spin_unlock(void)
-{
- __enable_irq();
+ return false;
}
-#else /* TFM_MULTI_CORE_NS_OS */
-/*
- * Local spinlock is implemented as a dummy one when integrating with NS bare
- * metal environment since interrupt is not required in NS mailbox.
- */
-#define ns_mailbox_spin_lock() do {} while (0)
-
-#define ns_mailbox_spin_unlock() do {} while (0)
-#endif /* TFM_MULTI_CORE_NS_OS */
static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
{
@@ -129,7 +81,7 @@ static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
}
ns_mailbox_spin_lock();
- clear_queue_slot_empty(idx);
+ clear_queue_slot_empty(queue, idx);
ns_mailbox_spin_unlock();
return idx;
@@ -138,65 +90,9 @@ static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
static void set_msg_owner(uint8_t idx, const void *owner)
{
if (idx < NUM_MAILBOX_QUEUE_SLOT) {
- mailbox_queue_ptr->queue[idx].owner = owner;
- }
-}
-
-#ifdef TFM_MULTI_CORE_TEST
-void tfm_ns_mailbox_tx_stats_init(void)
-{
- if (!mailbox_queue_ptr) {
- return;
- }
-
- mailbox_queue_ptr->nr_tx = 0;
- mailbox_queue_ptr->nr_used_slots = 0;
-}
-
-static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
-{
- mailbox_queue_status_t empty_status;
- uint8_t idx, nr_empty = 0;
-
- if (!ns_queue) {
- return;
- }
-
- ns_mailbox_spin_lock();
- /* Count the number of used slots when this tx arrives */
- empty_status = ns_queue->empty_slots;
- ns_mailbox_spin_unlock();
-
- if (empty_status) {
- for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
- if (empty_status & (0x1UL << idx)) {
- nr_empty++;
- }
- }
- }
-
- ns_mailbox_spin_lock();
- ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
- ns_queue->nr_tx++;
- ns_mailbox_spin_unlock();
-}
-
-void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
-{
- uint32_t nr_used_slots, nr_tx;
-
- if (!mailbox_queue_ptr || !stats_res) {
- return;
+ mailbox_queue_ptr->queue[idx].reply.owner = owner;
}
-
- nr_used_slots = mailbox_queue_ptr->nr_used_slots;
- nr_tx = mailbox_queue_ptr->nr_tx;
-
- stats_res->avg_nr_slots = nr_used_slots / nr_tx;
- nr_used_slots %= nr_tx;
- stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
}
-#endif
static int32_t mailbox_tx_client_req(uint32_t call_type,
const struct psa_client_params_t *params,
@@ -213,7 +109,7 @@ static int32_t mailbox_tx_client_req(uint32_t call_type,
}
#ifdef TFM_MULTI_CORE_TEST
- mailbox_tx_stats_update(mailbox_queue_ptr);
+ tfm_ns_mailbox_tx_stats_update();
#endif
/* Fill the mailbox message */
@@ -231,7 +127,7 @@ static int32_t mailbox_tx_client_req(uint32_t call_type,
set_msg_owner(idx, task_handle);
tfm_ns_mailbox_hal_enter_critical();
- set_queue_slot_pend(idx);
+ set_queue_slot_pend(mailbox_queue_ptr, idx);
tfm_ns_mailbox_hal_exit_critical();
tfm_ns_mailbox_hal_notify_peer();
@@ -315,7 +211,7 @@ int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
tfm_ns_mailbox_hal_enter_critical_isr();
replied_status = mailbox_queue_ptr->replied_slots;
- clear_queue_slot_all_replied(replied_status);
+ clear_queue_slot_all_replied(mailbox_queue_ptr, replied_status);
tfm_ns_mailbox_hal_exit_critical_isr();
if (!replied_status) {
@@ -336,7 +232,8 @@ int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
set_queue_slot_woken(idx);
tfm_ns_mailbox_hal_exit_critical_isr();
- tfm_ns_mailbox_os_wake_task_isr(mailbox_queue_ptr->queue[idx].owner);
+ tfm_ns_mailbox_os_wake_task_isr(
+ mailbox_queue_ptr->queue[idx].reply.owner);
replied_status &= ~(0x1UL << idx);
if (!replied_status) {
@@ -439,7 +336,7 @@ int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
ret = tfm_ns_mailbox_os_lock_init();
#ifdef TFM_MULTI_CORE_TEST
- tfm_ns_mailbox_tx_stats_init();
+ tfm_ns_mailbox_tx_stats_init(queue);
#endif
return ret;
diff --git a/interface/src/multi_core/tfm_ns_mailbox_rtos_api.c b/interface/src/multi_core/tfm_ns_mailbox_rtos_api.c
index 1ac1b4fba..629e108c8 100644
--- a/interface/src/multi_core/tfm_ns_mailbox_rtos_api.c
+++ b/interface/src/multi_core/tfm_ns_mailbox_rtos_api.c
@@ -11,13 +11,15 @@
* It can be replaced by RTOS specific implementation.
*/
+#ifdef TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD
+#include "os_wrapper/msg_queue.h"
+#else
#include "os_wrapper/semaphore.h"
+#endif
#include "os_wrapper/thread.h"
#include "tfm_ns_mailbox.h"
-#define MAX_SEMAPHORE_COUNT NUM_MAILBOX_QUEUE_SLOT
-
/*
* Thread flag to manage wait/wake mechanism in mailbox.、
* Thread flag can be RTOS specific.
@@ -26,7 +28,11 @@
*/
#define MAILBOX_THREAD_FLAG 0x5FCA0000
+#ifndef TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD
+#define MAX_SEMAPHORE_COUNT NUM_MAILBOX_QUEUE_SLOT
+
static void *ns_lock_handle = NULL;
+#endif
const void *tfm_ns_mailbox_os_get_task_handle(void)
{
@@ -43,6 +49,48 @@ void tfm_ns_mailbox_os_wake_task_isr(const void *task_handle)
os_wrapper_thread_set_flag_isr((void *)task_handle, MAILBOX_THREAD_FLAG);
}
+#ifdef TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD
+void *tfm_ns_mailbox_os_mq_create(size_t msg_size, uint8_t msg_count)
+{
+ return os_wrapper_msg_queue_create(msg_size, msg_count);
+}
+
+int32_t tfm_ns_mailbox_os_mq_send(void *mq_handle, const void *msg_ptr)
+{
+ int32_t ret;
+
+ if (!mq_handle || !msg_ptr) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ while (1) {
+ ret = os_wrapper_msg_queue_send(mq_handle, msg_ptr);
+ if (ret == OS_WRAPPER_SUCCESS) {
+ return MAILBOX_SUCCESS;
+ }
+ }
+
+ return MAILBOX_GENERIC_ERROR;
+}
+
+int32_t tfm_ns_mailbox_os_mq_receive(void *mq_handle, void *msg_ptr)
+{
+ int32_t ret;
+
+ if (!mq_handle || !msg_ptr) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ while (1) {
+ ret = os_wrapper_msg_queue_receive(mq_handle, msg_ptr);
+ if (ret == OS_WRAPPER_SUCCESS) {
+ return MAILBOX_SUCCESS;
+ }
+ }
+
+ return MAILBOX_GENERIC_ERROR;
+}
+#else /* TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD */
int32_t tfm_ns_mailbox_os_lock_init(void)
{
ns_lock_handle = os_wrapper_semaphore_create(MAX_SEMAPHORE_COUNT,
@@ -65,3 +113,4 @@ int32_t tfm_ns_mailbox_os_lock_release(void)
{
return os_wrapper_semaphore_release(ns_lock_handle);
}
+#endif /* TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD */
diff --git a/interface/src/multi_core/tfm_ns_mailbox_test.c b/interface/src/multi_core/tfm_ns_mailbox_test.c
new file mode 100644
index 000000000..e12cdc956
--- /dev/null
+++ b/interface/src/multi_core/tfm_ns_mailbox_test.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include "tfm_ns_mailbox.h"
+
+static struct ns_mailbox_queue_t *stats_queue_ptr = NULL;
+
+void tfm_ns_mailbox_tx_stats_init(struct ns_mailbox_queue_t *ns_queue)
+{
+ if (!ns_queue) {
+ return;
+ }
+
+ ns_queue->nr_tx = 0;
+ ns_queue->nr_used_slots = 0;
+
+ stats_queue_ptr = ns_queue;
+}
+
+int32_t tfm_ns_mailbox_tx_stats_reinit(void)
+{
+ if (!stats_queue_ptr) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ stats_queue_ptr->nr_tx = 0;
+ stats_queue_ptr->nr_used_slots = 0;
+
+ return MAILBOX_SUCCESS;
+}
+
+void tfm_ns_mailbox_tx_stats_update(void)
+{
+ mailbox_queue_status_t empty_status;
+ uint8_t idx, nr_empty = 0;
+
+ if (!stats_queue_ptr) {
+ return;
+ }
+
+ ns_mailbox_spin_lock();
+ /* Count the number of used slots when this tx arrives */
+ empty_status = stats_queue_ptr->empty_slots;
+ ns_mailbox_spin_unlock();
+
+ if (empty_status) {
+ for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
+ if (empty_status & (0x1UL << idx)) {
+ nr_empty++;
+ }
+ }
+ }
+
+ ns_mailbox_spin_lock();
+ stats_queue_ptr->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
+ stats_queue_ptr->nr_tx++;
+ ns_mailbox_spin_unlock();
+}
+
+void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
+{
+ uint32_t nr_used_slots, nr_tx;
+
+ if (!stats_queue_ptr || !stats_res) {
+ return;
+ }
+
+ nr_used_slots = stats_queue_ptr->nr_used_slots;
+ nr_tx = stats_queue_ptr->nr_tx;
+
+ stats_res->avg_nr_slots = nr_used_slots / nr_tx;
+ nr_used_slots %= nr_tx;
+ stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
+}
diff --git a/interface/src/multi_core/tfm_ns_mailbox_thread.c b/interface/src/multi_core/tfm_ns_mailbox_thread.c
new file mode 100644
index 000000000..6a77b1b69
--- /dev/null
+++ b/interface/src/multi_core/tfm_ns_mailbox_thread.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <string.h>
+
+#include "tfm_ns_mailbox.h"
+
+/* Thread woken up flag */
+#define NOT_WOKEN 0x0
+#define WOKEN_UP 0x5C
+
+/*
+ * The request contains the parameters which application threads share with
+ * NS mailbox thread.
+ */
+struct ns_mailbox_req_t {
+ uint32_t call_type; /* PSA client call type */
+ const struct psa_client_params_t *params_ptr; /* Pointer to PSA client call
+ * parameters.
+ */
+ int32_t client_id; /* Optional client ID of the
+ * non-secure caller.
+ * It is required to identify
+ * the non-secure task when
+ * NSPE OS enforces non-secure
+ * task isolation
+ */
+ const void *owner; /* Handle of owner task. */
+ int32_t *reply; /* Address of reply value
+ * belonging to owner task.
+ */
+
+ uint8_t *woken_flag; /* Indicate that owner task
+ * has been or should be woken
+ * up, after the reply is
+ * received.
+ */
+};
+
+/* Message queue handle */
+static void *msgq_handle = NULL;
+
+/* The handle of the dedicated NS mailbox thread. */
+static const void *ns_mailbox_thread_handle = NULL;
+
+/* The pointer to NSPE mailbox queue */
+static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
+
+static inline void set_queue_slot_all_empty(mailbox_queue_status_t completed)
+{
+ mailbox_queue_ptr->empty_slots |= completed;
+}
+
+static inline void set_queue_slot_woken(uint8_t idx)
+{
+ if (idx < NUM_MAILBOX_QUEUE_SLOT) {
+ *mailbox_queue_ptr->queue[idx].reply.woken_flag = WOKEN_UP;
+ }
+}
+
+static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
+{
+ uint8_t idx;
+ mailbox_queue_status_t status;
+
+ while (1) {
+ ns_mailbox_spin_lock();
+ status = queue->empty_slots;
+ ns_mailbox_spin_unlock();
+
+ if (status) {
+ break;
+ }
+
+ /* No empty slot */
+ queue->is_full = true;
+ /* DSB to make sure the thread sleeps after the flag is set */
+ __DSB();
+
+ /* Wait for an empty slot released by a completed mailbox message */
+ tfm_ns_mailbox_os_wait_reply();
+ queue->is_full = false;
+ }
+
+ for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
+ if (status & (1 << idx)) {
+ break;
+ }
+ }
+
+ ns_mailbox_spin_lock();
+ clear_queue_slot_empty(queue, idx);
+ ns_mailbox_spin_unlock();
+
+ return idx;
+}
+
+static int32_t mailbox_tx_client_call_msg(const struct ns_mailbox_req_t *req,
+ uint8_t *slot_idx)
+{
+ struct mailbox_msg_t *msg_ptr;
+ struct mailbox_reply_t *reply_ptr;
+ uint8_t idx = NUM_MAILBOX_QUEUE_SLOT;
+
+ idx = acquire_empty_slot(mailbox_queue_ptr);
+ if (idx == NUM_MAILBOX_QUEUE_SLOT) {
+ return MAILBOX_QUEUE_FULL;
+ }
+
+#ifdef TFM_MULTI_CORE_TEST
+ tfm_ns_mailbox_tx_stats_update();
+#endif
+
+ /* Fill the mailbox message */
+ msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
+ msg_ptr->call_type = req->call_type;
+ memcpy(&msg_ptr->params, req->params_ptr, sizeof(msg_ptr->params));
+ msg_ptr->client_id = req->client_id;
+
+ /* Prepare the reply structure */
+ reply_ptr = &mailbox_queue_ptr->queue[idx].reply;
+ reply_ptr->owner = req->owner;
+ reply_ptr->reply = req->reply;
+ reply_ptr->woken_flag = req->woken_flag;
+
+ /*
+ * Memory check can be added here to prevent a malicious application
+ * from providing addresses of other applications or privileged area.
+ */
+
+ tfm_ns_mailbox_hal_enter_critical();
+ set_queue_slot_pend(mailbox_queue_ptr, idx);
+ tfm_ns_mailbox_hal_exit_critical();
+
+ tfm_ns_mailbox_hal_notify_peer();
+
+ if (slot_idx) {
+ *slot_idx = idx;
+ }
+
+ return MAILBOX_SUCCESS;
+}
+
+static inline void ns_mailbox_set_reply_isr(uint8_t idx)
+{
+ int32_t *reply_ptr = mailbox_queue_ptr->queue[idx].reply.reply;
+
+ if (reply_ptr) {
+ *reply_ptr = mailbox_queue_ptr->queue[idx].reply.return_val;
+ }
+}
+
+static int32_t mailbox_wait_reply(const struct ns_mailbox_req_t *req)
+{
+ while (1) {
+ /*
+ * Check the completed flag to make sure that the current thread is
+ * woken up by reply event, rather than other events.
+ */
+ if (*req->woken_flag == WOKEN_UP) {
+ break;
+ }
+
+ /* Woken up from sleep */
+ tfm_ns_mailbox_os_wait_reply();
+ }
+
+ return MAILBOX_SUCCESS;
+}
+
+int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
+ const struct psa_client_params_t *params,
+ int32_t client_id,
+ int32_t *reply)
+{
+ struct ns_mailbox_req_t req;
+ uint8_t woken_flag = NOT_WOKEN;
+ int32_t ret;
+
+ if (!mailbox_queue_ptr) {
+ return MAILBOX_INIT_ERROR;
+ }
+
+ if (!params || !reply) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ req.call_type = call_type;
+ req.params_ptr = params;
+ req.reply = reply;
+ req.woken_flag = &woken_flag;
+ req.owner = tfm_ns_mailbox_os_get_task_handle();
+ req.client_id = client_id;
+
+ ret = tfm_ns_mailbox_os_mq_send(msgq_handle, &req);
+ if (ret != MAILBOX_SUCCESS) {
+ return ret;
+ }
+
+ ret = mailbox_wait_reply(&req);
+
+ return ret;
+}
+
+void tfm_ns_mailbox_thread_runner(void *args)
+{
+ struct ns_mailbox_req_t req;
+ int32_t ret;
+
+ (void)args;
+
+ ns_mailbox_thread_handle = tfm_ns_mailbox_os_get_task_handle();
+
+ while (1) {
+ ret = tfm_ns_mailbox_os_mq_receive(msgq_handle, &req);
+ if (ret != MAILBOX_SUCCESS) {
+ continue;
+ }
+
+ /*
+ * Invalid client address. However, the pointer was already
+ * checked previously and therefore just simply ignore this
+ * client call request.
+ */
+ if (!req.params_ptr || !req.reply || !req.woken_flag) {
+ continue;
+ }
+
+ mailbox_tx_client_call_msg(&req, NULL);
+ }
+}
+
+int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
+{
+ uint8_t idx;
+ const void *task_handle;
+ mailbox_queue_status_t replied_status, complete_slots = 0x0;
+
+ if (!mailbox_queue_ptr) {
+ return MAILBOX_INIT_ERROR;
+ }
+
+ tfm_ns_mailbox_hal_enter_critical_isr();
+ replied_status = mailbox_queue_ptr->replied_slots;
+ clear_queue_slot_all_replied(mailbox_queue_ptr, replied_status);
+ tfm_ns_mailbox_hal_exit_critical_isr();
+
+ if (!replied_status) {
+ return MAILBOX_NO_PEND_EVENT;
+ }
+
+ for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
+ /*
+ * The reply has already received from SPE mailbox but
+ * the wake-up signal is not sent yet.
+ */
+ if (!(replied_status & (0x1UL << idx))) {
+ continue;
+ }
+
+ /*
+ * Write back the return result.
+ * When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is enabled, a reply is
+ * returned inside ns_mailbox_set_reply_isr().
+ * When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is disabled, a reply is
+ * returned inside mailbox_rx_client_reply(). ns_mailbox_set_reply_isr()
+ * is defined as dummy function.
+ */
+ ns_mailbox_set_reply_isr(idx);
+
+ /* Wake up the owner of this mailbox message */
+ set_queue_slot_woken(idx);
+
+ task_handle = mailbox_queue_ptr->queue[idx].reply.owner;
+ if (task_handle) {
+ tfm_ns_mailbox_os_wake_task_isr(task_handle);
+ }
+
+ complete_slots |= (1UL << idx);
+
+ replied_status &= ~(0x1UL << idx);
+ if (!replied_status) {
+ break;
+ }
+ }
+
+ set_queue_slot_all_empty(complete_slots);
+
+ /*
+ * Wake up the NS mailbox thread in case it is waiting for
+ * empty slots.
+ */
+ if (mailbox_queue_ptr->is_full) {
+ if (ns_mailbox_thread_handle) {
+ tfm_ns_mailbox_os_wake_task_isr(ns_mailbox_thread_handle);
+ }
+ }
+
+ return MAILBOX_SUCCESS;
+}
+
+static inline int32_t mailbox_req_queue_init(uint8_t queue_depth)
+{
+ msgq_handle = tfm_ns_mailbox_os_mq_create(sizeof(struct ns_mailbox_req_t),
+ queue_depth);
+ if (!msgq_handle) {
+ return MAILBOX_GENERIC_ERROR;
+ }
+
+ return MAILBOX_SUCCESS;
+}
+
+int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
+{
+ int32_t ret;
+
+ if (!queue) {
+ return MAILBOX_INVAL_PARAMS;
+ }
+
+ /*
+ * Further verification of mailbox queue address may be required according
+ * to non-secure memory assignment.
+ */
+
+ memset(queue, 0, sizeof(*queue));
+
+ /* Initialize empty bitmask */
+ queue->empty_slots =
+ (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
+ queue->empty_slots +=
+ (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
+
+ mailbox_queue_ptr = queue;
+
+ /* Platform specific initialization. */
+ ret = tfm_ns_mailbox_hal_init(queue);
+ if (ret != MAILBOX_SUCCESS) {
+ return ret;
+ }
+
+ ret = mailbox_req_queue_init(NUM_MAILBOX_QUEUE_SLOT);
+
+#ifdef TFM_MULTI_CORE_TEST
+ tfm_ns_mailbox_tx_stats_init(queue);
+#endif
+
+ return ret;
+}