blob: 673b9803b2b8b26a5231ece9c35904420f170b2a [file] [log] [blame]
Mingyang Sundeae45d2021-09-06 15:31:07 +08001/*
shejia01a0ea10c2022-06-27 13:56:00 +08002 * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
Chris Brandb4c2b002022-07-21 12:54:00 -07003 * Copyright (c) 2021-2022 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
Mingyang Sundeae45d2021-09-06 15:31:07 +08006 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10
11#include <stdint.h>
Sherry Zhang049733e2022-04-20 21:37:51 +080012#include "aapcs_local.h"
Mingyang Sun620c8562021-11-10 11:44:58 +080013#include "critical_section.h"
Ken Liue07c3b72021-10-14 16:19:13 +080014#include "compiler_ext_defs.h"
Xinyu Zhangcdbe3622022-10-31 14:34:25 +080015#include "config_spm.h"
Summer Qin596f5552022-01-27 18:04:06 +080016#include "runtime_defs.h"
Chris Brand30106ba2022-01-13 13:48:50 -080017#include "ffm/stack_watermark.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080018#include "spm_ipc.h"
Sherry Zhang049733e2022-04-20 21:37:51 +080019#include "tfm_hal_memory_symbols.h"
Ken Liu62bae592021-10-19 22:15:43 +080020#include "tfm_hal_isolation.h"
Kevin Pengb288c522021-09-26 16:18:23 +080021#include "tfm_hal_platform.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "tfm_rpc.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080023#include "ffm/backend.h"
Ken Liu62bae592021-10-19 22:15:43 +080024#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080025#include "load/partition_defs.h"
26#include "load/service_defs.h"
27#include "load/spm_load_api.h"
28#include "psa/error.h"
29
30/* Declare the global component list */
31struct partition_head_t partition_listhead;
32
Kevin Peng9f1a7542022-02-07 16:32:27 +080033#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Ken Liu63a176b2022-06-09 22:36:56 +080034/* Instance for SPM_THREAD_CONTEXT */
Ken Liue07c3b72021-10-14 16:19:13 +080035
Chris Brandfe5adca2022-11-08 17:44:07 -080036#ifdef CONFIG_TFM_USE_TRUSTZONE
37struct context_ctrl_t *p_spm_thread_context;
38#else
39/* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
Ken Liu63a176b2022-06-09 22:36:56 +080040static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
41ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
42 spm_thread_stack,
43 sizeof(spm_thread_stack));
44
Ken Liue07c3b72021-10-14 16:19:13 +080045struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
Ken Liue07c3b72021-10-14 16:19:13 +080046#endif
47
48#endif
49
Sherry Zhang049733e2022-04-20 21:37:51 +080050/* Indicator point to the partition meta */
51uintptr_t *partition_meta_indicator_pos;
52
53extern uint32_t scheduler_lock;
54
Summer Qin596f5552022-01-27 18:04:06 +080055static void prv_process_metadata(struct partition_t *p_pt)
56{
Kevin Peng43160d52022-02-11 13:35:56 +080057 const struct partition_load_info_t *p_pt_ldi;
58 const struct service_load_info_t *p_srv_ldi;
59 struct context_ctrl_t *ctx_ctrl;
60 struct runtime_metadata_t *p_rt_meta;
61 service_fn_t *p_sfn_table;
62 uint32_t allocate_size;
Summer Qin596f5552022-01-27 18:04:06 +080063
Kevin Peng43160d52022-02-11 13:35:56 +080064 p_pt_ldi = p_pt->p_ldinf;
Chris Brand1fb796d2022-10-18 16:54:25 -070065 p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
Kevin Peng43160d52022-02-11 13:35:56 +080066 ctx_ctrl = &p_pt->ctx_ctrl;
67
68 /* common runtime metadata */
69 allocate_size = sizeof(*p_rt_meta);
70
71 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
72 /* SFN specific metadata - SFN function table */
73 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
Summer Qin596f5552022-01-27 18:04:06 +080074 }
75
Kevin Peng43160d52022-02-11 13:35:56 +080076 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
77 p_rt_meta = (struct runtime_metadata_t *)
78 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
79
80 p_rt_meta->entry = p_pt_ldi->entry;
81 p_rt_meta->n_sfn = 0;
82 p_sfn_table = p_rt_meta->sfn_table;
83
84 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
85 /* SFN table. The signal bit of the service is the same index of SFN. */
86 for (int i = 0; i < p_pt_ldi->nservices; i++) {
87 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
88 }
89
90 p_rt_meta->n_sfn = p_pt_ldi->nservices;
91 }
92
93 p_pt->p_metadata = (void *)p_rt_meta;
Summer Qin596f5552022-01-27 18:04:06 +080094}
95
Mingyang Sundeae45d2021-09-06 15:31:07 +080096/*
97 * Send message and wake up the SP who is waiting on message queue, block the
Ken Liuf39d8eb2021-10-07 12:55:33 +080098 * current thread and trigger scheduler.
Mingyang Sundeae45d2021-09-06 15:31:07 +080099 */
Ken Liu995a9742022-05-18 19:28:30 +0800100psa_status_t backend_messaging(struct service_t *service,
101 struct conn_handle_t *handle)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800102{
103 struct partition_t *p_owner = NULL;
104 psa_signal_t signal = 0;
Mingyang Sun620c8562021-11-10 11:44:58 +0800105 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800106
Mingyang Suna09adda2022-02-16 18:11:33 +0800107 if (!handle || !service || !service->p_ldinf || !service->partition) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800108 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800109 }
110
111 p_owner = service->partition;
112 signal = service->p_ldinf->signal;
113
Mingyang Sun620c8562021-11-10 11:44:58 +0800114 CRITICAL_SECTION_ENTER(cs_assert);
Ken Liu5a28da32022-01-19 14:37:05 +0800115
Mingyang Suna09adda2022-02-16 18:11:33 +0800116 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800117
118 /* Messages put. Update signals */
119 p_owner->signals_asserted |= signal;
120
121 if (p_owner->signals_waiting & signal) {
shejia01a0ea10c2022-06-27 13:56:00 +0800122 if (p_owner->thrd.state == THRD_STATE_BLOCK) {
123 thrd_set_state(&p_owner->thrd, THRD_STATE_RUNNABLE);
124 tfm_arch_set_context_ret_code(p_owner->thrd.p_context_ctrl,
125 (p_owner->signals_asserted & p_owner->signals_waiting));
126 }
Mingyang Sundeae45d2021-09-06 15:31:07 +0800127 p_owner->signals_waiting &= ~signal;
128 }
129
130 /*
131 * If it is a NS request via RPC, it is unnecessary to block current
132 * thread.
133 */
134
Mingyang Suna09adda2022-02-16 18:11:33 +0800135 if (!is_tfm_rpc_msg(handle)) {
shejia01a0ea10c2022-06-27 13:56:00 +0800136 thrd_set_state(&handle->p_client->thrd, THRD_STATE_BLOCK);
137 handle->p_client->signals_asserted |= TFM_IPC_REPLY_SIGNAL;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800138 }
shejia01a0ea10c2022-06-27 13:56:00 +0800139 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800140
Mingyang Sunaeca8e02022-02-24 14:47:56 +0800141 handle->status = TFM_HANDLE_STATUS_ACTIVE;
142
Mingyang Sundeae45d2021-09-06 15:31:07 +0800143 return PSA_SUCCESS;
144}
145
Ken Liu995a9742022-05-18 19:28:30 +0800146psa_status_t backend_replying(struct conn_handle_t *handle, int32_t status)
Ken Liu802a3702021-10-15 12:09:56 +0800147{
shejia01a0ea10c2022-06-27 13:56:00 +0800148 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
149
150 CRITICAL_SECTION_ENTER(cs_assert);
151
Mingyang Suna09adda2022-02-16 18:11:33 +0800152 if (is_tfm_rpc_msg(handle)) {
153 tfm_rpc_client_call_reply(handle, status);
Ken Liu802a3702021-10-15 12:09:56 +0800154 } else {
shejia01a0ea10c2022-06-27 13:56:00 +0800155 if (handle->p_client->signals_asserted & TFM_IPC_REPLY_SIGNAL) {
156 thrd_set_state(&handle->p_client->thrd, THRD_STATE_RUNNABLE);
157 tfm_arch_set_context_ret_code(handle->p_client->thrd.p_context_ctrl,
158 status);
159 handle->p_client->signals_asserted &= ~TFM_IPC_REPLY_SIGNAL;
160 }
Ken Liu802a3702021-10-15 12:09:56 +0800161 }
shejia01a0ea10c2022-06-27 13:56:00 +0800162 CRITICAL_SECTION_LEAVE(cs_assert);
Ken Liuf39d8eb2021-10-07 12:55:33 +0800163
164 /*
165 * 'psa_reply' exists in IPC model only and returns 'void'. Return
166 * 'PSA_SUCCESS' here always since SPM does not forward the status
167 * to the caller.
168 */
169 return PSA_SUCCESS;
Ken Liu802a3702021-10-15 12:09:56 +0800170}
171
Summer Qin596f5552022-01-27 18:04:06 +0800172extern void sprt_main(void);
173
Mingyang Sundeae45d2021-09-06 15:31:07 +0800174/* Parameters are treated as assuredly */
Ken Liu995a9742022-05-18 19:28:30 +0800175void backend_init_comp_assuredly(struct partition_t *p_pt,
176 uint32_t service_setting)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800177{
178 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800179
Kevin Peng613b4172022-02-15 14:41:44 +0800180#if CONFIG_TFM_DOORBELL_API == 1
181 p_pt->signals_allowed |= PSA_DOORBELL;
182#endif /* CONFIG_TFM_DOORBELL_API == 1 */
183
184 p_pt->signals_allowed |= service_setting;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800185
Ken Liu0bed7e02022-02-10 12:38:07 +0800186 UNI_LISI_INIT_NODE(p_pt, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800187
Ken Liubf4681f2022-02-11 11:15:03 +0800188 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
189 LOAD_ALLOCED_STACK_ADDR(p_pldi),
190 p_pldi->stack_size);
191
Chris Brand30106ba2022-01-13 13:48:50 -0800192 watermark_stack(p_pt);
193
Summer Qin596f5552022-01-27 18:04:06 +0800194 prv_process_metadata(p_pt);
195
Mingyang Sundeae45d2021-09-06 15:31:07 +0800196 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
197 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
198
Chris Brandfe5adca2022-11-08 17:44:07 -0800199#if (CONFIG_TFM_PSA_API_CROSS_CALL == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
Chris Brandb4c2b002022-07-21 12:54:00 -0700200 if (IS_PARTITION_NS_AGENT(p_pldi)) {
Chris Brandfe5adca2022-11-08 17:44:07 -0800201 /* Get the context from ns_agent_tz */
202 if (p_pldi->pid == 0) {
203 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
204 }
Mingyang Sundeae45d2021-09-06 15:31:07 +0800205 }
Summer Qin95444822022-01-27 11:22:00 +0800206#endif
Mingyang Sundeae45d2021-09-06 15:31:07 +0800207
208 thrd_start(&p_pt->thrd,
Summer Qin596f5552022-01-27 18:04:06 +0800209 POSITION_TO_ENTRY(sprt_main, thrd_fn_t),
Ken Liubf4681f2022-02-11 11:15:03 +0800210 THRD_GENERAL_EXIT);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800211}
212
Ken Liu995a9742022-05-18 19:28:30 +0800213uint32_t backend_system_run(void)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800214{
Ken Liu62bae592021-10-19 22:15:43 +0800215 uint32_t control;
216 struct partition_t *p_cur_pt;
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800217 fih_int fih_rc = FIH_FAILURE;
Ken Liu62bae592021-10-19 22:15:43 +0800218
Sherry Zhang68681642022-06-24 13:36:33 +0800219#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Summer Qin1056d1c2022-10-19 16:07:15 +0800220 SPM_ASSERT(SPM_THREAD_CONTEXT);
Chris Brand3778bc12021-12-15 17:01:05 -0800221#endif
222
Sherry Zhang049733e2022-04-20 21:37:51 +0800223 partition_meta_indicator_pos = (uintptr_t *)hal_mem_sp_meta_start;
Ken Liu62bae592021-10-19 22:15:43 +0800224 control = thrd_start_scheduler(&CURRENT_THREAD);
225
226 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
227 struct partition_t, ctx_ctrl);
228
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800229 FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
230 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
Ken Liu62bae592021-10-19 22:15:43 +0800231 tfm_core_panic();
232 }
233
234 return control;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800235}
236
Ken Liu995a9742022-05-18 19:28:30 +0800237psa_signal_t backend_wait(struct partition_t *p_pt, psa_signal_t signal_mask)
Kevin Pengdef92de2021-11-10 16:14:48 +0800238{
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800239 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
240 psa_signal_t ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800241
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800242 /*
Ken Liu995a9742022-05-18 19:28:30 +0800243 * 'backend_wait()' sets the waiting signal mask for partition, and
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800244 * blocks the partition thread state to wait for signals.
245 * These changes should be inside the ciritical section to avoid
246 * 'signal_waiting' or the thread state to be changed by interrupts
247 * while this function is reading or writing values.
248 */
249 CRITICAL_SECTION_ENTER(cs_assert);
250
251 ret_signal = p_pt->signals_asserted & signal_mask;
252 if (ret_signal == 0) {
253 p_pt->signals_waiting = signal_mask;
shejia01a0ea10c2022-06-27 13:56:00 +0800254 thrd_set_state(&p_pt->thrd, THRD_STATE_BLOCK);
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800255 }
256 CRITICAL_SECTION_LEAVE(cs_assert);
257
258 return ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800259}
260
Ken Liu995a9742022-05-18 19:28:30 +0800261void backend_wake_up(struct partition_t *p_pt)
Kevin Pengdef92de2021-11-10 16:14:48 +0800262{
shejia01a0ea10c2022-06-27 13:56:00 +0800263 if (p_pt->thrd.state == THRD_STATE_BLOCK) {
264 thrd_set_state(&p_pt->thrd, THRD_STATE_RUNNABLE);
265 tfm_arch_set_context_ret_code(p_pt->thrd.p_context_ctrl,
266 (p_pt->signals_asserted & p_pt->signals_waiting));
267 }
Mingyang Sun09328bd2022-03-25 11:55:13 +0800268 p_pt->signals_waiting = 0;
Kevin Pengdef92de2021-11-10 16:14:48 +0800269}
270
Sherry Zhang049733e2022-04-20 21:37:51 +0800271uint64_t ipc_schedule(void)
272{
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800273 fih_int fih_rc = FIH_FAILURE;
Sherry Zhang049733e2022-04-20 21:37:51 +0800274 AAPCS_DUAL_U32_T ctx_ctrls;
275 struct partition_t *p_part_curr, *p_part_next;
276 struct context_ctrl_t *p_curr_ctx;
277 struct thread_t *pth_next = thrd_next();
278 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
279
280 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
281
282 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
283
284 p_part_curr = GET_CURRENT_COMPONENT();
285 p_part_next = GET_THRD_OWNER(pth_next);
286
287 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
288 p_part_curr != p_part_next) {
289 /* Check if there is enough room on stack to save more context */
290 if ((p_curr_ctx->sp_limit +
291 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
292 tfm_core_panic();
293 }
294
295 CRITICAL_SECTION_ENTER(cs);
296 /*
297 * If required, let the platform update boundary based on its
298 * implementation. Change privilege, MPU or other configurations.
299 */
Chendi Sun0f7d2822022-10-28 12:24:12 +0800300 if (tfm_hal_boundary_need_switch(p_part_curr->boundary,
301 p_part_next->boundary)) {
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800302 FIH_CALL(tfm_hal_activate_boundary, fih_rc,
303 p_part_next->p_ldinf, p_part_next->boundary);
304 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
Sherry Zhang049733e2022-04-20 21:37:51 +0800305 tfm_core_panic();
306 }
307 }
308 ARCH_FLUSH_FP_CONTEXT();
309
310 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
311
312 CURRENT_THREAD = pth_next;
313 CRITICAL_SECTION_LEAVE(cs);
314 }
315
316 /* Update meta indicator */
317 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
318 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
319 }
320 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
321}