blob: e0b7f0534c7e481c020083063656e639b44c1b8b [file] [log] [blame]
Mingyang Sundeae45d2021-09-06 15:31:07 +08001/*
shejia01a0ea10c2022-06-27 13:56:00 +08002 * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
Chris Brandb4c2b002022-07-21 12:54:00 -07003 * Copyright (c) 2021-2022 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
Mingyang Sundeae45d2021-09-06 15:31:07 +08006 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10
11#include <stdint.h>
Sherry Zhang049733e2022-04-20 21:37:51 +080012#include "aapcs_local.h"
Mingyang Sun620c8562021-11-10 11:44:58 +080013#include "critical_section.h"
Ken Liue07c3b72021-10-14 16:19:13 +080014#include "compiler_ext_defs.h"
Xinyu Zhangcdbe3622022-10-31 14:34:25 +080015#include "config_spm.h"
Summer Qin596f5552022-01-27 18:04:06 +080016#include "runtime_defs.h"
Chris Brand30106ba2022-01-13 13:48:50 -080017#include "ffm/stack_watermark.h"
Sherry Zhangc7147022023-02-03 11:21:10 +080018#include "spm.h"
Sherry Zhang049733e2022-04-20 21:37:51 +080019#include "tfm_hal_memory_symbols.h"
Ken Liu62bae592021-10-19 22:15:43 +080020#include "tfm_hal_isolation.h"
Kevin Pengb288c522021-09-26 16:18:23 +080021#include "tfm_hal_platform.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "tfm_rpc.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080023#include "ffm/backend.h"
Ken Liu62bae592021-10-19 22:15:43 +080024#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080025#include "load/partition_defs.h"
26#include "load/service_defs.h"
27#include "load/spm_load_api.h"
28#include "psa/error.h"
29
30/* Declare the global component list */
31struct partition_head_t partition_listhead;
32
Kevin Peng9f1a7542022-02-07 16:32:27 +080033#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Ken Liu63a176b2022-06-09 22:36:56 +080034/* Instance for SPM_THREAD_CONTEXT */
Ken Liue07c3b72021-10-14 16:19:13 +080035
Chris Brandfe5adca2022-11-08 17:44:07 -080036#ifdef CONFIG_TFM_USE_TRUSTZONE
37struct context_ctrl_t *p_spm_thread_context;
38#else
39/* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
Ken Liu63a176b2022-06-09 22:36:56 +080040static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
41ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
42 spm_thread_stack,
43 sizeof(spm_thread_stack));
44
Ken Liue07c3b72021-10-14 16:19:13 +080045struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
Ken Liue07c3b72021-10-14 16:19:13 +080046#endif
47
48#endif
49
Sherry Zhang049733e2022-04-20 21:37:51 +080050/* Indicator point to the partition meta */
51uintptr_t *partition_meta_indicator_pos;
52
53extern uint32_t scheduler_lock;
54
shejia0195a88bc2023-01-16 15:44:46 +080055/*
56 * Query the state of current thread.
57 */
58static uint32_t query_state(struct thread_t *p_thrd, uint32_t *p_retval)
59{
60 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
61 struct partition_t *p_pt = NULL;
62 uint32_t state = p_thrd->state;
63 psa_signal_t signal_ret = 0;
64
65 /* Get current partition of thread. */
66 p_pt = TO_CONTAINER(p_thrd->p_context_ctrl,
67 struct partition_t, ctx_ctrl);
68
69 CRITICAL_SECTION_ENTER(cs_signal);
70
71 signal_ret = p_pt->signals_waiting & p_pt->signals_asserted;
72
73 if (signal_ret) {
74 /*
75 * If the partition is waiting some signals and any of them is asserted,
76 * change thread to be THRD_STATE_RET_VAL_AVAIL and fill the retval. If
77 * the waiting signal is TFM_IPC_REPLY_SIGNAL, it means the Secure
78 * Partition is waiting for the services to be fulfilled, then the
79 * return value comes from the backend_replying() by the server
80 * Partition. For other waiting signals by psa_wait(), the return value
81 * is just the signal.
82 */
83 if (signal_ret == TFM_IPC_REPLY_SIGNAL) {
84 p_pt->signals_asserted &= ~TFM_IPC_REPLY_SIGNAL;
85 *p_retval = (uint32_t)p_pt->reply_value;
86 } else {
87 *p_retval = signal_ret;
88 }
89
90 p_pt->signals_waiting = 0;
91 state = THRD_STATE_RET_VAL_AVAIL;
92 } else if (p_pt->signals_waiting != 0) {
93 /*
94 * If the thread is waiting some signals but none of them is asserted,
95 * block the thread.
96 */
97 state = THRD_STATE_BLOCK;
98 }
99
100 CRITICAL_SECTION_LEAVE(cs_signal);
101 return state;
102}
103
Summer Qin596f5552022-01-27 18:04:06 +0800104static void prv_process_metadata(struct partition_t *p_pt)
105{
Kevin Peng43160d52022-02-11 13:35:56 +0800106 const struct partition_load_info_t *p_pt_ldi;
107 const struct service_load_info_t *p_srv_ldi;
108 struct context_ctrl_t *ctx_ctrl;
109 struct runtime_metadata_t *p_rt_meta;
110 service_fn_t *p_sfn_table;
111 uint32_t allocate_size;
Summer Qin596f5552022-01-27 18:04:06 +0800112
Kevin Peng43160d52022-02-11 13:35:56 +0800113 p_pt_ldi = p_pt->p_ldinf;
Chris Brand1fb796d2022-10-18 16:54:25 -0700114 p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
Kevin Peng43160d52022-02-11 13:35:56 +0800115 ctx_ctrl = &p_pt->ctx_ctrl;
116
117 /* common runtime metadata */
118 allocate_size = sizeof(*p_rt_meta);
119
120 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
121 /* SFN specific metadata - SFN function table */
122 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
Summer Qin596f5552022-01-27 18:04:06 +0800123 }
124
Kevin Peng43160d52022-02-11 13:35:56 +0800125 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
126 p_rt_meta = (struct runtime_metadata_t *)
127 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
128
129 p_rt_meta->entry = p_pt_ldi->entry;
130 p_rt_meta->n_sfn = 0;
131 p_sfn_table = p_rt_meta->sfn_table;
132
133 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
134 /* SFN table. The signal bit of the service is the same index of SFN. */
135 for (int i = 0; i < p_pt_ldi->nservices; i++) {
136 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
137 }
138
139 p_rt_meta->n_sfn = p_pt_ldi->nservices;
140 }
141
142 p_pt->p_metadata = (void *)p_rt_meta;
Summer Qin596f5552022-01-27 18:04:06 +0800143}
144
Mingyang Sundeae45d2021-09-06 15:31:07 +0800145/*
146 * Send message and wake up the SP who is waiting on message queue, block the
Ken Liuf39d8eb2021-10-07 12:55:33 +0800147 * current thread and trigger scheduler.
Mingyang Sundeae45d2021-09-06 15:31:07 +0800148 */
Ken Liu995a9742022-05-18 19:28:30 +0800149psa_status_t backend_messaging(struct service_t *service,
150 struct conn_handle_t *handle)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800151{
152 struct partition_t *p_owner = NULL;
153 psa_signal_t signal = 0;
154
Mingyang Suna09adda2022-02-16 18:11:33 +0800155 if (!handle || !service || !service->p_ldinf || !service->partition) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800156 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800157 }
158
159 p_owner = service->partition;
160 signal = service->p_ldinf->signal;
161
Mingyang Suna09adda2022-02-16 18:11:33 +0800162 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800163
164 /* Messages put. Update signals */
shejia0195a88bc2023-01-16 15:44:46 +0800165 backend_assert_signal(p_owner, signal);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800166
167 /*
168 * If it is a NS request via RPC, it is unnecessary to block current
169 * thread.
170 */
171
Mingyang Suna09adda2022-02-16 18:11:33 +0800172 if (!is_tfm_rpc_msg(handle)) {
shejia0195a88bc2023-01-16 15:44:46 +0800173 backend_wait_signals(handle->p_client, TFM_IPC_REPLY_SIGNAL);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800174 }
175
Mingyang Sunaeca8e02022-02-24 14:47:56 +0800176 handle->status = TFM_HANDLE_STATUS_ACTIVE;
177
Mingyang Sundeae45d2021-09-06 15:31:07 +0800178 return PSA_SUCCESS;
179}
180
Ken Liu995a9742022-05-18 19:28:30 +0800181psa_status_t backend_replying(struct conn_handle_t *handle, int32_t status)
Ken Liu802a3702021-10-15 12:09:56 +0800182{
Mingyang Suna09adda2022-02-16 18:11:33 +0800183 if (is_tfm_rpc_msg(handle)) {
184 tfm_rpc_client_call_reply(handle, status);
Ken Liu802a3702021-10-15 12:09:56 +0800185 } else {
shejia0195a88bc2023-01-16 15:44:46 +0800186 handle->p_client->reply_value = (uintptr_t)status;
187 backend_assert_signal(handle->p_client, TFM_IPC_REPLY_SIGNAL);
Ken Liu802a3702021-10-15 12:09:56 +0800188 }
Ken Liuf39d8eb2021-10-07 12:55:33 +0800189
190 /*
191 * 'psa_reply' exists in IPC model only and returns 'void'. Return
192 * 'PSA_SUCCESS' here always since SPM does not forward the status
193 * to the caller.
194 */
195 return PSA_SUCCESS;
Ken Liu802a3702021-10-15 12:09:56 +0800196}
197
Summer Qin596f5552022-01-27 18:04:06 +0800198extern void sprt_main(void);
199
Mingyang Sundeae45d2021-09-06 15:31:07 +0800200/* Parameters are treated as assuredly */
Ken Liu995a9742022-05-18 19:28:30 +0800201void backend_init_comp_assuredly(struct partition_t *p_pt,
202 uint32_t service_setting)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800203{
204 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800205
Kevin Peng613b4172022-02-15 14:41:44 +0800206#if CONFIG_TFM_DOORBELL_API == 1
207 p_pt->signals_allowed |= PSA_DOORBELL;
208#endif /* CONFIG_TFM_DOORBELL_API == 1 */
209
210 p_pt->signals_allowed |= service_setting;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800211
Ken Liu0bed7e02022-02-10 12:38:07 +0800212 UNI_LISI_INIT_NODE(p_pt, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800213
Ken Liubf4681f2022-02-11 11:15:03 +0800214 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
215 LOAD_ALLOCED_STACK_ADDR(p_pldi),
216 p_pldi->stack_size);
217
Chris Brand30106ba2022-01-13 13:48:50 -0800218 watermark_stack(p_pt);
219
Summer Qin596f5552022-01-27 18:04:06 +0800220 prv_process_metadata(p_pt);
221
Mingyang Sundeae45d2021-09-06 15:31:07 +0800222 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
223 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
224
Chris Brandfe5adca2022-11-08 17:44:07 -0800225#if (CONFIG_TFM_PSA_API_CROSS_CALL == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
Chris Brandb4c2b002022-07-21 12:54:00 -0700226 if (IS_PARTITION_NS_AGENT(p_pldi)) {
Chris Brandfe5adca2022-11-08 17:44:07 -0800227 /* Get the context from ns_agent_tz */
228 if (p_pldi->pid == 0) {
229 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
230 }
Mingyang Sundeae45d2021-09-06 15:31:07 +0800231 }
Summer Qin95444822022-01-27 11:22:00 +0800232#endif
Mingyang Sundeae45d2021-09-06 15:31:07 +0800233
234 thrd_start(&p_pt->thrd,
Summer Qin596f5552022-01-27 18:04:06 +0800235 POSITION_TO_ENTRY(sprt_main, thrd_fn_t),
Ken Liubf4681f2022-02-11 11:15:03 +0800236 THRD_GENERAL_EXIT);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800237}
238
Ken Liu995a9742022-05-18 19:28:30 +0800239uint32_t backend_system_run(void)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800240{
Ken Liu62bae592021-10-19 22:15:43 +0800241 uint32_t control;
242 struct partition_t *p_cur_pt;
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800243 fih_int fih_rc = FIH_FAILURE;
Ken Liu62bae592021-10-19 22:15:43 +0800244
Sherry Zhang68681642022-06-24 13:36:33 +0800245#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Summer Qin1056d1c2022-10-19 16:07:15 +0800246 SPM_ASSERT(SPM_THREAD_CONTEXT);
Chris Brand3778bc12021-12-15 17:01:05 -0800247#endif
248
shejia0195a88bc2023-01-16 15:44:46 +0800249 /* Init thread callback function. */
250 thrd_set_query_callback(query_state);
251
Sherry Zhang049733e2022-04-20 21:37:51 +0800252 partition_meta_indicator_pos = (uintptr_t *)hal_mem_sp_meta_start;
Ken Liu62bae592021-10-19 22:15:43 +0800253 control = thrd_start_scheduler(&CURRENT_THREAD);
254
255 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
256 struct partition_t, ctx_ctrl);
257
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800258 FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
259 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
Ken Liu62bae592021-10-19 22:15:43 +0800260 tfm_core_panic();
261 }
262
263 return control;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800264}
265
shejia0195a88bc2023-01-16 15:44:46 +0800266psa_signal_t backend_wait_signals(struct partition_t *p_pt, psa_signal_t signals)
Kevin Pengdef92de2021-11-10 16:14:48 +0800267{
shejia0195a88bc2023-01-16 15:44:46 +0800268 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800269 psa_signal_t ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800270
shejia0195a88bc2023-01-16 15:44:46 +0800271 if (!p_pt) {
272 tfm_core_panic();
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800273 }
shejia0195a88bc2023-01-16 15:44:46 +0800274
275 CRITICAL_SECTION_ENTER(cs_signal);
276
277 ret_signal = p_pt->signals_asserted & signals;
278 if (ret_signal == 0) {
279 p_pt->signals_waiting = signals;
280 }
281
282 CRITICAL_SECTION_LEAVE(cs_signal);
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800283
284 return ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800285}
286
shejia0195a88bc2023-01-16 15:44:46 +0800287uint32_t backend_assert_signal(struct partition_t *p_pt, psa_signal_t signal)
Kevin Pengdef92de2021-11-10 16:14:48 +0800288{
shejia0195a88bc2023-01-16 15:44:46 +0800289 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
290
291 if (!p_pt) {
292 tfm_core_panic();
shejia01a0ea10c2022-06-27 13:56:00 +0800293 }
shejia0195a88bc2023-01-16 15:44:46 +0800294
295 CRITICAL_SECTION_ENTER(cs_signal);
296 p_pt->signals_asserted |= signal;
297 CRITICAL_SECTION_LEAVE(cs_signal);
298
299 return PSA_SUCCESS;
Kevin Pengdef92de2021-11-10 16:14:48 +0800300}
301
Sherry Zhang049733e2022-04-20 21:37:51 +0800302uint64_t ipc_schedule(void)
303{
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800304 fih_int fih_rc = FIH_FAILURE;
Sherry Zhang049733e2022-04-20 21:37:51 +0800305 AAPCS_DUAL_U32_T ctx_ctrls;
306 struct partition_t *p_part_curr, *p_part_next;
307 struct context_ctrl_t *p_curr_ctx;
308 struct thread_t *pth_next = thrd_next();
309 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
310
311 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
312
313 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
314
315 p_part_curr = GET_CURRENT_COMPONENT();
316 p_part_next = GET_THRD_OWNER(pth_next);
317
318 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
319 p_part_curr != p_part_next) {
320 /* Check if there is enough room on stack to save more context */
321 if ((p_curr_ctx->sp_limit +
322 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
323 tfm_core_panic();
324 }
325
326 CRITICAL_SECTION_ENTER(cs);
327 /*
328 * If required, let the platform update boundary based on its
329 * implementation. Change privilege, MPU or other configurations.
330 */
Chendi Sun0f7d2822022-10-28 12:24:12 +0800331 if (tfm_hal_boundary_need_switch(p_part_curr->boundary,
332 p_part_next->boundary)) {
Xinyu Zhang6ad07032022-08-10 14:45:56 +0800333 FIH_CALL(tfm_hal_activate_boundary, fih_rc,
334 p_part_next->p_ldinf, p_part_next->boundary);
335 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
Sherry Zhang049733e2022-04-20 21:37:51 +0800336 tfm_core_panic();
337 }
338 }
339 ARCH_FLUSH_FP_CONTEXT();
340
341 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
342
343 CURRENT_THREAD = pth_next;
344 CRITICAL_SECTION_LEAVE(cs);
345 }
346
347 /* Update meta indicator */
348 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
349 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
350 }
351 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
352}