blob: 515af6176304f6338aa8ab9616a2f1a50a6309fb [file] [log] [blame]
Mingyang Sundeae45d2021-09-06 15:31:07 +08001/*
Mingyang Sunbb4a42a2021-12-14 15:18:52 +08002 * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
Chris Brand30106ba2022-01-13 13:48:50 -08003 * Copyright (c) 2021-2022, Cypress Semiconductor Corporation. All rights reserved.
Mingyang Sundeae45d2021-09-06 15:31:07 +08004 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 */
8
9#include <stdint.h>
Sherry Zhang049733e2022-04-20 21:37:51 +080010#include "aapcs_local.h"
Mingyang Sun620c8562021-11-10 11:44:58 +080011#include "critical_section.h"
Ken Liue07c3b72021-10-14 16:19:13 +080012#include "compiler_ext_defs.h"
Summer Qin596f5552022-01-27 18:04:06 +080013#include "runtime_defs.h"
Chris Brand30106ba2022-01-13 13:48:50 -080014#include "ffm/stack_watermark.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080015#include "spm_ipc.h"
Sherry Zhang049733e2022-04-20 21:37:51 +080016#include "tfm_hal_memory_symbols.h"
Ken Liu62bae592021-10-19 22:15:43 +080017#include "tfm_hal_isolation.h"
Kevin Pengb288c522021-09-26 16:18:23 +080018#include "tfm_hal_platform.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080019#include "tfm_rpc.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080020#include "ffm/backend.h"
Ken Liu62bae592021-10-19 22:15:43 +080021#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "load/partition_defs.h"
23#include "load/service_defs.h"
24#include "load/spm_load_api.h"
25#include "psa/error.h"
26
27/* Declare the global component list */
28struct partition_head_t partition_listhead;
29
Kevin Peng9f1a7542022-02-07 16:32:27 +080030#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Ken Liue07c3b72021-10-14 16:19:13 +080031
32#ifdef TFM_MULTI_CORE_TOPOLOGY
33/* TODO: To be checked when RPC design updates. */
34static uint8_t spm_stack_local[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
35struct context_ctrl_t spm_thread_context = {
Ken Liubf4681f2022-02-11 11:15:03 +080036 .sp = (uint32_t)&spm_stack_local[CONFIG_TFM_SPM_THREAD_STACK_SIZE],
37 .sp_limit = (uint32_t)spm_stack_local,
38 .allocated = 0,
39 .exc_ret = 0,
Ken Liue07c3b72021-10-14 16:19:13 +080040};
41struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
42#else
43struct context_ctrl_t *p_spm_thread_context;
44#endif
45
46#endif
47
Sherry Zhang049733e2022-04-20 21:37:51 +080048/* Indicator point to the partition meta */
49uintptr_t *partition_meta_indicator_pos;
50
51extern uint32_t scheduler_lock;
52
Summer Qin596f5552022-01-27 18:04:06 +080053static void prv_process_metadata(struct partition_t *p_pt)
54{
Kevin Peng43160d52022-02-11 13:35:56 +080055 const struct partition_load_info_t *p_pt_ldi;
56 const struct service_load_info_t *p_srv_ldi;
57 struct context_ctrl_t *ctx_ctrl;
58 struct runtime_metadata_t *p_rt_meta;
59 service_fn_t *p_sfn_table;
60 uint32_t allocate_size;
Summer Qin596f5552022-01-27 18:04:06 +080061
Kevin Peng43160d52022-02-11 13:35:56 +080062 p_pt_ldi = p_pt->p_ldinf;
63 p_srv_ldi = (struct service_load_info_t *)LOAD_INFO_SERVICE(p_pt_ldi);
64 ctx_ctrl = &p_pt->ctx_ctrl;
65
66 /* common runtime metadata */
67 allocate_size = sizeof(*p_rt_meta);
68
69 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
70 /* SFN specific metadata - SFN function table */
71 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
Summer Qin596f5552022-01-27 18:04:06 +080072 }
73
Kevin Peng43160d52022-02-11 13:35:56 +080074 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
75 p_rt_meta = (struct runtime_metadata_t *)
76 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
77
78 p_rt_meta->entry = p_pt_ldi->entry;
79 p_rt_meta->n_sfn = 0;
80 p_sfn_table = p_rt_meta->sfn_table;
81
82 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
83 /* SFN table. The signal bit of the service is the same index of SFN. */
84 for (int i = 0; i < p_pt_ldi->nservices; i++) {
85 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
86 }
87
88 p_rt_meta->n_sfn = p_pt_ldi->nservices;
89 }
90
91 p_pt->p_metadata = (void *)p_rt_meta;
Summer Qin596f5552022-01-27 18:04:06 +080092}
93
Mingyang Sundeae45d2021-09-06 15:31:07 +080094/*
95 * Send message and wake up the SP who is waiting on message queue, block the
Ken Liuf39d8eb2021-10-07 12:55:33 +080096 * current thread and trigger scheduler.
Mingyang Sundeae45d2021-09-06 15:31:07 +080097 */
98static psa_status_t ipc_messaging(struct service_t *service,
Mingyang Suna09adda2022-02-16 18:11:33 +080099 struct conn_handle_t *handle)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800100{
101 struct partition_t *p_owner = NULL;
102 psa_signal_t signal = 0;
Mingyang Sun620c8562021-11-10 11:44:58 +0800103 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800104
Mingyang Suna09adda2022-02-16 18:11:33 +0800105 if (!handle || !service || !service->p_ldinf || !service->partition) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800106 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800107 }
108
109 p_owner = service->partition;
110 signal = service->p_ldinf->signal;
111
Mingyang Sun620c8562021-11-10 11:44:58 +0800112 CRITICAL_SECTION_ENTER(cs_assert);
Ken Liu5a28da32022-01-19 14:37:05 +0800113
Mingyang Suna09adda2022-02-16 18:11:33 +0800114 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800115
116 /* Messages put. Update signals */
117 p_owner->signals_asserted |= signal;
118
119 if (p_owner->signals_waiting & signal) {
120 thrd_wake_up(&p_owner->waitobj,
121 (p_owner->signals_asserted & p_owner->signals_waiting));
122 p_owner->signals_waiting &= ~signal;
123 }
Mingyang Sun620c8562021-11-10 11:44:58 +0800124 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800125
126 /*
127 * If it is a NS request via RPC, it is unnecessary to block current
128 * thread.
129 */
130
Mingyang Suna09adda2022-02-16 18:11:33 +0800131 if (!is_tfm_rpc_msg(handle)) {
Mingyang Sunac1114e2022-03-23 17:32:07 +0800132 thrd_set_wait(&handle->ack_evnt, CURRENT_THREAD);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800133 }
134
Mingyang Sunaeca8e02022-02-24 14:47:56 +0800135 handle->status = TFM_HANDLE_STATUS_ACTIVE;
136
Mingyang Sundeae45d2021-09-06 15:31:07 +0800137 return PSA_SUCCESS;
138}
139
Mingyang Suna09adda2022-02-16 18:11:33 +0800140static psa_status_t ipc_replying(struct conn_handle_t *handle, int32_t status)
Ken Liu802a3702021-10-15 12:09:56 +0800141{
Mingyang Suna09adda2022-02-16 18:11:33 +0800142 if (is_tfm_rpc_msg(handle)) {
143 tfm_rpc_client_call_reply(handle, status);
Ken Liu802a3702021-10-15 12:09:56 +0800144 } else {
Mingyang Suna09adda2022-02-16 18:11:33 +0800145 thrd_wake_up(&handle->ack_evnt, status);
Ken Liu802a3702021-10-15 12:09:56 +0800146 }
Ken Liuf39d8eb2021-10-07 12:55:33 +0800147
148 /*
149 * 'psa_reply' exists in IPC model only and returns 'void'. Return
150 * 'PSA_SUCCESS' here always since SPM does not forward the status
151 * to the caller.
152 */
153 return PSA_SUCCESS;
Ken Liu802a3702021-10-15 12:09:56 +0800154}
155
Summer Qin596f5552022-01-27 18:04:06 +0800156extern void sprt_main(void);
157
Mingyang Sundeae45d2021-09-06 15:31:07 +0800158/* Parameters are treated as assuredly */
159static void ipc_comp_init_assuredly(struct partition_t *p_pt,
160 uint32_t service_setting)
161{
162 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800163
Kevin Peng613b4172022-02-15 14:41:44 +0800164#if CONFIG_TFM_DOORBELL_API == 1
165 p_pt->signals_allowed |= PSA_DOORBELL;
166#endif /* CONFIG_TFM_DOORBELL_API == 1 */
167
168 p_pt->signals_allowed |= service_setting;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800169
170 THRD_SYNC_INIT(&p_pt->waitobj);
Ken Liu0bed7e02022-02-10 12:38:07 +0800171 UNI_LISI_INIT_NODE(p_pt, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800172
Ken Liubf4681f2022-02-11 11:15:03 +0800173 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
174 LOAD_ALLOCED_STACK_ADDR(p_pldi),
175 p_pldi->stack_size);
176
Chris Brand30106ba2022-01-13 13:48:50 -0800177 watermark_stack(p_pt);
178
Summer Qin596f5552022-01-27 18:04:06 +0800179 prv_process_metadata(p_pt);
180
Mingyang Sundeae45d2021-09-06 15:31:07 +0800181 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
182 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
183
Kevin Peng9f1a7542022-02-07 16:32:27 +0800184#if (CONFIG_TFM_PSA_API_CROSS_CALL == 1) && !defined(TFM_MULTI_CORE_TOPOLOGY)
Ken Liu897e8f12022-02-10 03:21:17 +0100185 if (p_pldi->pid == TFM_SP_NON_SECURE_ID) {
Ken Liue07c3b72021-10-14 16:19:13 +0800186 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800187 }
Summer Qin95444822022-01-27 11:22:00 +0800188#endif
Mingyang Sundeae45d2021-09-06 15:31:07 +0800189
190 thrd_start(&p_pt->thrd,
Summer Qin596f5552022-01-27 18:04:06 +0800191 POSITION_TO_ENTRY(sprt_main, thrd_fn_t),
Ken Liubf4681f2022-02-11 11:15:03 +0800192 THRD_GENERAL_EXIT);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800193}
194
195static uint32_t ipc_system_run(void)
196{
Ken Liu62bae592021-10-19 22:15:43 +0800197 uint32_t control;
198 struct partition_t *p_cur_pt;
199
Kevin Peng9f1a7542022-02-07 16:32:27 +0800200#if CONFIG_TFM_PSA_API_THREAD_CALL == 1
Chris Brand3778bc12021-12-15 17:01:05 -0800201 TFM_CORE_ASSERT(SPM_THREAD_CONTEXT);
202#endif
203
Sherry Zhang049733e2022-04-20 21:37:51 +0800204 partition_meta_indicator_pos = (uintptr_t *)hal_mem_sp_meta_start;
Ken Liu62bae592021-10-19 22:15:43 +0800205 control = thrd_start_scheduler(&CURRENT_THREAD);
206
207 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
208 struct partition_t, ctx_ctrl);
209
210 if (tfm_hal_update_boundaries(p_cur_pt->p_ldinf, p_cur_pt->p_boundaries)
211 != TFM_HAL_SUCCESS) {
212 tfm_core_panic();
213 }
214
215 return control;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800216}
217
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800218static psa_signal_t ipc_wait(struct partition_t *p_pt, psa_signal_t signal_mask)
Kevin Pengdef92de2021-11-10 16:14:48 +0800219{
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800220 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
221 psa_signal_t ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800222
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800223 /*
224 * 'ipc_wait()' sets the waiting signal mask for partition, and
225 * blocks the partition thread state to wait for signals.
226 * These changes should be inside the ciritical section to avoid
227 * 'signal_waiting' or the thread state to be changed by interrupts
228 * while this function is reading or writing values.
229 */
230 CRITICAL_SECTION_ENTER(cs_assert);
231
232 ret_signal = p_pt->signals_asserted & signal_mask;
233 if (ret_signal == 0) {
234 p_pt->signals_waiting = signal_mask;
Mingyang Sunac1114e2022-03-23 17:32:07 +0800235 thrd_set_wait(&p_pt->waitobj, CURRENT_THREAD);
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800236 }
237 CRITICAL_SECTION_LEAVE(cs_assert);
238
239 return ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800240}
241
Mingyang Sun09328bd2022-03-25 11:55:13 +0800242static void ipc_wake_up(struct partition_t *p_pt)
Kevin Pengdef92de2021-11-10 16:14:48 +0800243{
244 thrd_wake_up(&p_pt->waitobj,
245 p_pt->signals_asserted & p_pt->signals_waiting);
Mingyang Sun09328bd2022-03-25 11:55:13 +0800246 p_pt->signals_waiting = 0;
Kevin Pengdef92de2021-11-10 16:14:48 +0800247}
248
Sherry Zhang049733e2022-04-20 21:37:51 +0800249uint64_t ipc_schedule(void)
250{
251 AAPCS_DUAL_U32_T ctx_ctrls;
252 struct partition_t *p_part_curr, *p_part_next;
253 struct context_ctrl_t *p_curr_ctx;
254 struct thread_t *pth_next = thrd_next();
255 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
256
257 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
258
259 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
260
261 p_part_curr = GET_CURRENT_COMPONENT();
262 p_part_next = GET_THRD_OWNER(pth_next);
263
264 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
265 p_part_curr != p_part_next) {
266 /* Check if there is enough room on stack to save more context */
267 if ((p_curr_ctx->sp_limit +
268 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
269 tfm_core_panic();
270 }
271
272 CRITICAL_SECTION_ENTER(cs);
273 /*
274 * If required, let the platform update boundary based on its
275 * implementation. Change privilege, MPU or other configurations.
276 */
277 if (p_part_curr->p_boundaries != p_part_next->p_boundaries) {
278 if (tfm_hal_update_boundaries(p_part_next->p_ldinf,
279 p_part_next->p_boundaries)
280 != TFM_HAL_SUCCESS) {
281 tfm_core_panic();
282 }
283 }
284 ARCH_FLUSH_FP_CONTEXT();
285
286 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
287
288 CURRENT_THREAD = pth_next;
289 CRITICAL_SECTION_LEAVE(cs);
290 }
291
292 /* Update meta indicator */
293 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
294 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
295 }
296 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
297}
298
Mingyang Sundeae45d2021-09-06 15:31:07 +0800299const struct backend_ops_t backend_instance = {
300 .comp_init_assuredly = ipc_comp_init_assuredly,
301 .system_run = ipc_system_run,
302 .messaging = ipc_messaging,
Ken Liu802a3702021-10-15 12:09:56 +0800303 .replying = ipc_replying,
Kevin Pengdef92de2021-11-10 16:14:48 +0800304 .wait = ipc_wait,
305 .wake_up = ipc_wake_up,
Mingyang Sundeae45d2021-09-06 15:31:07 +0800306};