SPM: Combine the same routines in cross call and SVCall
Both Cross and SVC calls require these steps:
- Enter the call
- Check stack pointer, switch to SPM stack if caller is not
NS agent.
- Lock scheduler
- Verify the return value
- Exit the call
- Unlock scheduler
- Trigger PendSV if necessary
This patch renames and reuses the two cross call functions for
SVCall usage.
Signed-off-by: Jianliang Shen <jianliang.shen@arm.com>
Change-Id: I7bb924f55fdcbb8c27fa775920a0a69c932df56b
diff --git a/secure_fw/spm/CMakeLists.txt b/secure_fw/spm/CMakeLists.txt
index 83e2e6d..d22e89c 100755
--- a/secure_fw/spm/CMakeLists.txt
+++ b/secure_fw/spm/CMakeLists.txt
@@ -47,7 +47,6 @@
core/arch/tfm_arch.c
core/main.c
core/spm_ipc.c
- $<$<BOOL:${CONFIG_TFM_SPM_BACKEND_IPC}>:core/spm_cross_call.c>
core/rom_loader.c
core/psa_api.c
core/psa_call_api.c
diff --git a/secure_fw/spm/core/arch/tfm_arch_v6m_v7m.c b/secure_fw/spm/core/arch/tfm_arch_v6m_v7m.c
index 85fc224..a27826b 100644
--- a/secure_fw/spm/core/arch/tfm_arch_v6m_v7m.c
+++ b/secure_fw/spm/core/arch/tfm_arch_v6m_v7m.c
@@ -34,8 +34,8 @@
#if CONFIG_TFM_SPM_BACKEND_IPC == 1
#pragma required = ipc_schedule
-#pragma required = cross_call_entering_c
-#pragma required = cross_call_exiting_c
+#pragma required = backend_abi_entering_spm
+#pragma required = backend_abi_leaving_spm
#endif /* CONFIG_TFM_SPM_BACKEND_IPC == 1*/
@@ -53,7 +53,7 @@
" push {r0-r5} \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_entering_c \n" /* r0: new SP */
+ " bl backend_abi_entering_spm \n" /* r0: new SP */
" cmp r0, #0 \n"
" beq v6v7_branch_to_target \n"
" mov r6, sp \n"
@@ -67,7 +67,7 @@
" blx r4 \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_exiting_c \n"
+ " bl backend_abi_leaving_spm \n"
" mov sp, r6 \n" /* switch stack */
" cpsie i \n"
" isb \n"
diff --git a/secure_fw/spm/core/arch/tfm_arch_v8m_base.c b/secure_fw/spm/core/arch/tfm_arch_v8m_base.c
index e9127cf..0db4168 100644
--- a/secure_fw/spm/core/arch/tfm_arch_v8m_base.c
+++ b/secure_fw/spm/core/arch/tfm_arch_v8m_base.c
@@ -35,8 +35,8 @@
#if CONFIG_TFM_SPM_BACKEND_IPC == 1
#pragma required = ipc_schedule
-#pragma required = cross_call_entering_c
-#pragma required = cross_call_exiting_c
+#pragma required = backend_abi_entering_spm
+#pragma required = backend_abi_leaving_spm
#endif /* CONFIG_TFM_SPM_BACKEND_IPC == 1 */
@@ -54,7 +54,7 @@
" push {r0-r5} \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_entering_c \n" /* r0: new SP, r1: new PSPLIM */
+ " bl backend_abi_entering_spm \n" /* r0: new SP, r1: new PSPLIM */
" mrs r6, psplim \n"
" mov r7, sp \n"
" cmp r0, #0 \n"
@@ -76,7 +76,7 @@
" blx r5 \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_exiting_c \n"
+ " bl backend_abi_leaving_spm \n"
" movs r2, #0 \n" /* Back to caller new stack */
" msr psplim, r2 \n"
" mov sp, r7 \n"
diff --git a/secure_fw/spm/core/arch/tfm_arch_v8m_main.c b/secure_fw/spm/core/arch/tfm_arch_v8m_main.c
index 0bec25f..5b54bbe 100644
--- a/secure_fw/spm/core/arch/tfm_arch_v8m_main.c
+++ b/secure_fw/spm/core/arch/tfm_arch_v8m_main.c
@@ -38,8 +38,8 @@
#if CONFIG_TFM_SPM_BACKEND_IPC == 1
#pragma required = ipc_schedule
-#pragma required = cross_call_entering_c
-#pragma required = cross_call_exiting_c
+#pragma required = backend_abi_entering_spm
+#pragma required = backend_abi_leaving_spm
#endif /* CONFIG_TFM_SPM_BACKEND_IPC == 1*/
@@ -55,7 +55,7 @@
" push {r0-r4, r12} \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_entering_c \n" /* r0: new SP, r1: new PSPLIM */
+ " bl backend_abi_entering_spm\n" /* r0: new SP, r1: new PSPLIM */
" mrs r5, psplim \n"
" mov r6, sp \n"
" cmp r0, #0 \n"
@@ -76,7 +76,7 @@
" blx r12 \n"
" cpsid i \n"
" isb \n"
- " bl cross_call_exiting_c \n"
+ " bl backend_abi_leaving_spm \n"
" mov r2, #0 \n" /* Back to caller new stack */
" msr psplim, r2 \n"
" mov sp, r6 \n"
diff --git a/secure_fw/spm/core/backend_ipc.c b/secure_fw/spm/core/backend_ipc.c
index e133c81..9915283 100644
--- a/secure_fw/spm/core/backend_ipc.c
+++ b/secure_fw/spm/core/backend_ipc.c
@@ -14,6 +14,7 @@
#include "critical_section.h"
#include "compiler_ext_defs.h"
#include "config_spm.h"
+#include "ffm/psa_api.h"
#include "runtime_defs.h"
#include "stack_watermark.h"
#include "spm.h"
@@ -380,6 +381,55 @@
return ret;
}
+uint64_t backend_abi_entering_spm(void)
+{
+ struct partition_t *caller = GET_CURRENT_COMPONENT();
+ uint32_t sp = 0;
+ uint32_t sp_limit = 0;
+ AAPCS_DUAL_U32_T spm_stack_info;
+
+#if TFM_ISOLATION_LEVEL == 1
+ /* PSA APIs must be called from Thread mode */
+ if (__get_active_exc_num() != EXC_NUM_THREAD_MODE) {
+ tfm_core_panic();
+ }
+#endif
+
+ /*
+ * Check if caller stack is within SPM stack. If not, then stack needs to
+ * switch. Otherwise, return zeros.
+ */
+ if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
+ (caller->ctx_ctrl.sp > SPM_THREAD_CONTEXT->sp_base)) {
+ sp = SPM_THREAD_CONTEXT->sp;
+ sp_limit = SPM_THREAD_CONTEXT->sp_limit;
+ }
+
+ AAPCS_DUAL_U32_SET(spm_stack_info, sp, sp_limit);
+
+ arch_acquire_sched_lock();
+
+ return AAPCS_DUAL_U32_AS_U64(spm_stack_info);
+}
+
+uint32_t backend_abi_leaving_spm(uint32_t result)
+{
+ uint32_t sched_attempted;
+
+ spm_handle_programmer_errors(result);
+
+ /* Release scheduler lock and check the record of schedule attempt. */
+ sched_attempted = arch_release_sched_lock();
+
+ /* Interrupt is masked, PendSV will not happen immediately. */
+ if (result == STATUS_NEED_SCHEDULE ||
+ sched_attempted == SCHEDULER_ATTEMPTED) {
+ tfm_arch_trigger_pendsv();
+ }
+
+ return result;
+}
+
uint64_t ipc_schedule(void)
{
fih_int fih_rc = FIH_FAILURE;
diff --git a/secure_fw/spm/core/spm.h b/secure_fw/spm/core/spm.h
index a0a7ed2..9ee77f7 100644
--- a/secure_fw/spm/core/spm.h
+++ b/secure_fw/spm/core/spm.h
@@ -318,21 +318,4 @@
void update_caller_outvec_len(struct connection_t *handle);
-#if CONFIG_TFM_SPM_BACKEND_IPC == 1
-
-/*
- * Executes with interrupt unmasked.Check the necessity of switching to SPM
- * stack and lock scheduler. Return value is the pair of SPM SP and PSPLIM if
- * necessary. Otherwise, zeros.
- */
-uint64_t cross_call_entering_c(void);
-
-/*
- * Executes with interrupt masked.
- * Check return value from backend and trigger scheduler in PendSV if necessary.
- */
-psa_status_t cross_call_exiting_c(psa_status_t status);
-
-#endif
-
#endif /* __SPM_H__ */
diff --git a/secure_fw/spm/core/spm_cross_call.c b/secure_fw/spm/core/spm_cross_call.c
deleted file mode 100644
index f2ee868..0000000
--- a/secure_fw/spm/core/spm_cross_call.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- */
-
-#include <stdint.h>
-#include "config_impl.h"
-#include "compiler_ext_defs.h"
-#include "current.h"
-#include "spm.h"
-#include "tfm_arch.h"
-#include "ffm/backend.h"
-#include "ffm/psa_api.h"
-#include "internal_status_code.h"
-#include "aapcs_local.h"
-
-__used
-uint64_t cross_call_entering_c(void)
-{
- struct partition_t *caller = GET_CURRENT_COMPONENT();
- uint32_t sp = 0, sp_limit = 0;
- AAPCS_DUAL_U32_T spm_stack_info;
-
- /* PSA APIs must be called from Thread mode */
- if (__get_active_exc_num() != EXC_NUM_THREAD_MODE) {
- tfm_core_panic();
- }
-
- /*
- * Check if caller stack is within SPM stack. If not, then stack needs to
- * switch.
- */
- if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
- (caller->ctx_ctrl.sp > SPM_THREAD_CONTEXT->sp_base)) {
- sp = SPM_THREAD_CONTEXT->sp;
- sp_limit = SPM_THREAD_CONTEXT->sp_limit;
- }
-
- AAPCS_DUAL_U32_SET(spm_stack_info, sp, sp_limit);
-
- arch_acquire_sched_lock();
-
- return AAPCS_DUAL_U32_AS_U64(spm_stack_info);
-}
-
-__used
-psa_status_t cross_call_exiting_c(psa_status_t status)
-{
- uint32_t sched_attempted;
-
- spm_handle_programmer_errors(status);
-
- /* Release scheduler lock and check the record of schedule attempt. */
- sched_attempted = arch_release_sched_lock();
-
- /* Interrupt is masked, PendSV will not happen immediately. */
- if (status == STATUS_NEED_SCHEDULE ||
- sched_attempted == SCHEDULER_ATTEMPTED) {
- tfm_arch_trigger_pendsv();
- }
-
- return status;
-}
diff --git a/secure_fw/spm/core/tfm_svcalls.c b/secure_fw/spm/core/tfm_svcalls.c
index 63a5b59..63cc443 100644
--- a/secure_fw/spm/core/tfm_svcalls.c
+++ b/secure_fw/spm/core/tfm_svcalls.c
@@ -6,6 +6,8 @@
*/
#include <string.h>
+#include <stdint.h>
+#include "aapcs_local.h"
#include "config_spm.h"
#include "interrupt.h"
#include "internal_status_code.h"
@@ -74,24 +76,15 @@
static uint32_t thread_mode_spm_return(psa_status_t result)
{
- uint32_t recorded_attempts;
struct tfm_state_context_t *p_tctx = (struct tfm_state_context_t *)saved_psp;
+ backend_abi_leaving_spm(result);
+
ARCH_STATE_CTX_SET_R0(p_tctx, result);
tfm_arch_set_psplim(saved_psp_limit);
__set_PSP(saved_psp);
- spm_handle_programmer_errors(result);
-
- /* Release scheduler lock and check the record of schedule attempt. */
- recorded_attempts = arch_release_sched_lock();
-
- if ((result == STATUS_NEED_SCHEDULE) ||
- (recorded_attempts != SCHEDULER_UNLOCKED)) {
- tfm_arch_trigger_pendsv();
- }
-
return saved_exc_return;
}
@@ -101,21 +94,18 @@
uint32_t sp = __get_PSP();
uint32_t sp_limit = tfm_arch_get_psplim();
struct full_context_t *p_tctx = NULL;
- struct partition_t *caller = GET_CURRENT_COMPONENT();
+ AAPCS_DUAL_U32_T sp_info;
saved_psp = sp;
saved_psp_limit = sp_limit;
- /* Check if caller stack is within SPM stack. If not, then stack needs to switch. */
- if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
- (caller->ctx_ctrl.sp > SPM_THREAD_CONTEXT->sp_base)) {
- sp = SPM_THREAD_CONTEXT->sp;
- sp_limit = SPM_THREAD_CONTEXT->sp_limit;
- }
+ sp_info.u64_val = backend_abi_entering_spm();
- /* Build PSA API function context */
- p_ctx_ctrl->sp = sp;
- p_ctx_ctrl->sp_limit = sp_limit;
+ /* SPM SP is saved in R0 */
+ if (sp_info.u32_regs.r0 != 0) {
+ sp = sp_info.u32_regs.r0;
+ sp_limit = sp_info.u32_regs.r1;
+ }
p_tctx = (struct full_context_t *)(sp);
@@ -134,6 +124,7 @@
/* Assign stack and return code to the context control instance. */
p_ctx_ctrl->sp = (uint32_t)(p_tctx);
+ p_ctx_ctrl->sp_limit = sp_limit;
p_ctx_ctrl->exc_ret = (uint32_t)(EXC_RETURN_THREAD_PSP);
}
@@ -170,9 +161,6 @@
/* svc_func can be executed in privileged Thread mode */
__set_CONTROL_nPRIV(0);
- /* Lock scheduler during Thread mode SPM execution */
- arch_acquire_sched_lock();
-
ctx[0] = PSA_SUCCESS;
return EXC_RETURN_THREAD_PSP;
diff --git a/secure_fw/spm/include/ffm/backend_ipc.h b/secure_fw/spm/include/ffm/backend_ipc.h
index 0756143..541de33 100644
--- a/secure_fw/spm/include/ffm/backend_ipc.h
+++ b/secure_fw/spm/include/ffm/backend_ipc.h
@@ -8,7 +8,26 @@
#ifndef __BACKEND_IPC_H__
#define __BACKEND_IPC_H__
+#include <stdint.h>
+
/* Calculate the service setting. In IPC it is the signal set. */
#define BACKEND_SERVICE_SET(set, p_service) ((set) |= (p_service)->signal)
+/*
+ * Actions done before entering SPM.
+ *
+ * Executes with interrupt unmasked. Check the necessity of switching to SPM
+ * stack and lock scheduler. Return value is the pair of SPM SP and PSPLIM if
+ * necessary. Otherwise, zeros.
+ */
+uint64_t backend_abi_entering_spm(void);
+
+/*
+ * Actions done after leaving SPM and before entering other components.
+ *
+ * Executes with interrupt masked.
+ * Check return value from backend and trigger scheduler in PendSV if necessary.
+ */
+uint32_t backend_abi_leaving_spm(uint32_t result);
+
#endif /* __BACKEND_IPC_H__ */