blob: 16f3f0280c866c4f33ceb270d82ef42998056e66 [file] [log] [blame]
Mingyang Sundeae45d2021-09-06 15:31:07 +08001/*
Mingyang Sunbb4a42a2021-12-14 15:18:52 +08002 * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
Chris Brand30106ba2022-01-13 13:48:50 -08003 * Copyright (c) 2021-2022, Cypress Semiconductor Corporation. All rights reserved.
Mingyang Sundeae45d2021-09-06 15:31:07 +08004 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 */
8
9#include <stdint.h>
Sherry Zhang049733e2022-04-20 21:37:51 +080010#include "aapcs_local.h"
Mingyang Sun620c8562021-11-10 11:44:58 +080011#include "critical_section.h"
Ken Liue07c3b72021-10-14 16:19:13 +080012#include "compiler_ext_defs.h"
Summer Qin596f5552022-01-27 18:04:06 +080013#include "runtime_defs.h"
Chris Brand30106ba2022-01-13 13:48:50 -080014#include "ffm/stack_watermark.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080015#include "spm_ipc.h"
Sherry Zhang049733e2022-04-20 21:37:51 +080016#include "tfm_hal_memory_symbols.h"
Ken Liu62bae592021-10-19 22:15:43 +080017#include "tfm_hal_isolation.h"
Kevin Pengb288c522021-09-26 16:18:23 +080018#include "tfm_hal_platform.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080019#include "tfm_rpc.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080020#include "ffm/backend.h"
Ken Liu62bae592021-10-19 22:15:43 +080021#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "load/partition_defs.h"
23#include "load/service_defs.h"
24#include "load/spm_load_api.h"
25#include "psa/error.h"
26
27/* Declare the global component list */
28struct partition_head_t partition_listhead;
29
Kevin Peng9f1a7542022-02-07 16:32:27 +080030#if CONFIG_TFM_PSA_API_CROSS_CALL == 1
Ken Liu63a176b2022-06-09 22:36:56 +080031/* Instance for SPM_THREAD_CONTEXT */
Ken Liue07c3b72021-10-14 16:19:13 +080032
33#ifdef TFM_MULTI_CORE_TOPOLOGY
Ken Liu63a176b2022-06-09 22:36:56 +080034
35static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
36ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
37 spm_thread_stack,
38 sizeof(spm_thread_stack));
39
Ken Liue07c3b72021-10-14 16:19:13 +080040struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
41#else
42struct context_ctrl_t *p_spm_thread_context;
43#endif
44
45#endif
46
Sherry Zhang049733e2022-04-20 21:37:51 +080047/* Indicator point to the partition meta */
48uintptr_t *partition_meta_indicator_pos;
49
50extern uint32_t scheduler_lock;
51
Summer Qin596f5552022-01-27 18:04:06 +080052static void prv_process_metadata(struct partition_t *p_pt)
53{
Kevin Peng43160d52022-02-11 13:35:56 +080054 const struct partition_load_info_t *p_pt_ldi;
55 const struct service_load_info_t *p_srv_ldi;
56 struct context_ctrl_t *ctx_ctrl;
57 struct runtime_metadata_t *p_rt_meta;
58 service_fn_t *p_sfn_table;
59 uint32_t allocate_size;
Summer Qin596f5552022-01-27 18:04:06 +080060
Kevin Peng43160d52022-02-11 13:35:56 +080061 p_pt_ldi = p_pt->p_ldinf;
62 p_srv_ldi = (struct service_load_info_t *)LOAD_INFO_SERVICE(p_pt_ldi);
63 ctx_ctrl = &p_pt->ctx_ctrl;
64
65 /* common runtime metadata */
66 allocate_size = sizeof(*p_rt_meta);
67
68 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
69 /* SFN specific metadata - SFN function table */
70 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
Summer Qin596f5552022-01-27 18:04:06 +080071 }
72
Kevin Peng43160d52022-02-11 13:35:56 +080073 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
74 p_rt_meta = (struct runtime_metadata_t *)
75 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
76
77 p_rt_meta->entry = p_pt_ldi->entry;
78 p_rt_meta->n_sfn = 0;
79 p_sfn_table = p_rt_meta->sfn_table;
80
81 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
82 /* SFN table. The signal bit of the service is the same index of SFN. */
83 for (int i = 0; i < p_pt_ldi->nservices; i++) {
84 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
85 }
86
87 p_rt_meta->n_sfn = p_pt_ldi->nservices;
88 }
89
90 p_pt->p_metadata = (void *)p_rt_meta;
Summer Qin596f5552022-01-27 18:04:06 +080091}
92
Mingyang Sundeae45d2021-09-06 15:31:07 +080093/*
94 * Send message and wake up the SP who is waiting on message queue, block the
Ken Liuf39d8eb2021-10-07 12:55:33 +080095 * current thread and trigger scheduler.
Mingyang Sundeae45d2021-09-06 15:31:07 +080096 */
Ken Liu995a9742022-05-18 19:28:30 +080097psa_status_t backend_messaging(struct service_t *service,
98 struct conn_handle_t *handle)
Mingyang Sundeae45d2021-09-06 15:31:07 +080099{
100 struct partition_t *p_owner = NULL;
101 psa_signal_t signal = 0;
Mingyang Sun620c8562021-11-10 11:44:58 +0800102 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800103
Mingyang Suna09adda2022-02-16 18:11:33 +0800104 if (!handle || !service || !service->p_ldinf || !service->partition) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800105 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800106 }
107
108 p_owner = service->partition;
109 signal = service->p_ldinf->signal;
110
Mingyang Sun620c8562021-11-10 11:44:58 +0800111 CRITICAL_SECTION_ENTER(cs_assert);
Ken Liu5a28da32022-01-19 14:37:05 +0800112
Mingyang Suna09adda2022-02-16 18:11:33 +0800113 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800114
115 /* Messages put. Update signals */
116 p_owner->signals_asserted |= signal;
117
118 if (p_owner->signals_waiting & signal) {
119 thrd_wake_up(&p_owner->waitobj,
120 (p_owner->signals_asserted & p_owner->signals_waiting));
121 p_owner->signals_waiting &= ~signal;
122 }
Mingyang Sun620c8562021-11-10 11:44:58 +0800123 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800124
125 /*
126 * If it is a NS request via RPC, it is unnecessary to block current
127 * thread.
128 */
129
Mingyang Suna09adda2022-02-16 18:11:33 +0800130 if (!is_tfm_rpc_msg(handle)) {
Mingyang Sunac1114e2022-03-23 17:32:07 +0800131 thrd_set_wait(&handle->ack_evnt, CURRENT_THREAD);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800132 }
133
Mingyang Sunaeca8e02022-02-24 14:47:56 +0800134 handle->status = TFM_HANDLE_STATUS_ACTIVE;
135
Mingyang Sundeae45d2021-09-06 15:31:07 +0800136 return PSA_SUCCESS;
137}
138
Ken Liu995a9742022-05-18 19:28:30 +0800139psa_status_t backend_replying(struct conn_handle_t *handle, int32_t status)
Ken Liu802a3702021-10-15 12:09:56 +0800140{
Mingyang Suna09adda2022-02-16 18:11:33 +0800141 if (is_tfm_rpc_msg(handle)) {
142 tfm_rpc_client_call_reply(handle, status);
Ken Liu802a3702021-10-15 12:09:56 +0800143 } else {
Mingyang Suna09adda2022-02-16 18:11:33 +0800144 thrd_wake_up(&handle->ack_evnt, status);
Ken Liu802a3702021-10-15 12:09:56 +0800145 }
Ken Liuf39d8eb2021-10-07 12:55:33 +0800146
147 /*
148 * 'psa_reply' exists in IPC model only and returns 'void'. Return
149 * 'PSA_SUCCESS' here always since SPM does not forward the status
150 * to the caller.
151 */
152 return PSA_SUCCESS;
Ken Liu802a3702021-10-15 12:09:56 +0800153}
154
Summer Qin596f5552022-01-27 18:04:06 +0800155extern void sprt_main(void);
156
Mingyang Sundeae45d2021-09-06 15:31:07 +0800157/* Parameters are treated as assuredly */
Ken Liu995a9742022-05-18 19:28:30 +0800158void backend_init_comp_assuredly(struct partition_t *p_pt,
159 uint32_t service_setting)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800160{
161 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800162
Kevin Peng613b4172022-02-15 14:41:44 +0800163#if CONFIG_TFM_DOORBELL_API == 1
164 p_pt->signals_allowed |= PSA_DOORBELL;
165#endif /* CONFIG_TFM_DOORBELL_API == 1 */
166
167 p_pt->signals_allowed |= service_setting;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800168
169 THRD_SYNC_INIT(&p_pt->waitobj);
Ken Liu0bed7e02022-02-10 12:38:07 +0800170 UNI_LISI_INIT_NODE(p_pt, p_handles);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800171
Ken Liubf4681f2022-02-11 11:15:03 +0800172 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
173 LOAD_ALLOCED_STACK_ADDR(p_pldi),
174 p_pldi->stack_size);
175
Chris Brand30106ba2022-01-13 13:48:50 -0800176 watermark_stack(p_pt);
177
Summer Qin596f5552022-01-27 18:04:06 +0800178 prv_process_metadata(p_pt);
179
Mingyang Sundeae45d2021-09-06 15:31:07 +0800180 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
181 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
182
Kevin Peng9f1a7542022-02-07 16:32:27 +0800183#if (CONFIG_TFM_PSA_API_CROSS_CALL == 1) && !defined(TFM_MULTI_CORE_TOPOLOGY)
Ken Liu897e8f12022-02-10 03:21:17 +0100184 if (p_pldi->pid == TFM_SP_NON_SECURE_ID) {
Ken Liue07c3b72021-10-14 16:19:13 +0800185 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800186 }
Summer Qin95444822022-01-27 11:22:00 +0800187#endif
Mingyang Sundeae45d2021-09-06 15:31:07 +0800188
189 thrd_start(&p_pt->thrd,
Summer Qin596f5552022-01-27 18:04:06 +0800190 POSITION_TO_ENTRY(sprt_main, thrd_fn_t),
Ken Liubf4681f2022-02-11 11:15:03 +0800191 THRD_GENERAL_EXIT);
Mingyang Sundeae45d2021-09-06 15:31:07 +0800192}
193
Ken Liu995a9742022-05-18 19:28:30 +0800194uint32_t backend_system_run(void)
Mingyang Sundeae45d2021-09-06 15:31:07 +0800195{
Ken Liu62bae592021-10-19 22:15:43 +0800196 uint32_t control;
197 struct partition_t *p_cur_pt;
198
Kevin Peng9f1a7542022-02-07 16:32:27 +0800199#if CONFIG_TFM_PSA_API_THREAD_CALL == 1
Chris Brand3778bc12021-12-15 17:01:05 -0800200 TFM_CORE_ASSERT(SPM_THREAD_CONTEXT);
201#endif
202
Sherry Zhang049733e2022-04-20 21:37:51 +0800203 partition_meta_indicator_pos = (uintptr_t *)hal_mem_sp_meta_start;
Ken Liu62bae592021-10-19 22:15:43 +0800204 control = thrd_start_scheduler(&CURRENT_THREAD);
205
206 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
207 struct partition_t, ctx_ctrl);
208
Ken Liu967ffa92022-05-25 15:13:34 +0800209 if (tfm_hal_activate_boundary(p_cur_pt->p_ldinf, p_cur_pt->boundary)
Ken Liu62bae592021-10-19 22:15:43 +0800210 != TFM_HAL_SUCCESS) {
211 tfm_core_panic();
212 }
213
214 return control;
Mingyang Sundeae45d2021-09-06 15:31:07 +0800215}
216
Ken Liu995a9742022-05-18 19:28:30 +0800217psa_signal_t backend_wait(struct partition_t *p_pt, psa_signal_t signal_mask)
Kevin Pengdef92de2021-11-10 16:14:48 +0800218{
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800219 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
220 psa_signal_t ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800221
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800222 /*
Ken Liu995a9742022-05-18 19:28:30 +0800223 * 'backend_wait()' sets the waiting signal mask for partition, and
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800224 * blocks the partition thread state to wait for signals.
225 * These changes should be inside the ciritical section to avoid
226 * 'signal_waiting' or the thread state to be changed by interrupts
227 * while this function is reading or writing values.
228 */
229 CRITICAL_SECTION_ENTER(cs_assert);
230
231 ret_signal = p_pt->signals_asserted & signal_mask;
232 if (ret_signal == 0) {
233 p_pt->signals_waiting = signal_mask;
Mingyang Sunac1114e2022-03-23 17:32:07 +0800234 thrd_set_wait(&p_pt->waitobj, CURRENT_THREAD);
Mingyang Sun5c9529f2022-03-15 17:51:56 +0800235 }
236 CRITICAL_SECTION_LEAVE(cs_assert);
237
238 return ret_signal;
Kevin Pengdef92de2021-11-10 16:14:48 +0800239}
240
Ken Liu995a9742022-05-18 19:28:30 +0800241void backend_wake_up(struct partition_t *p_pt)
Kevin Pengdef92de2021-11-10 16:14:48 +0800242{
243 thrd_wake_up(&p_pt->waitobj,
244 p_pt->signals_asserted & p_pt->signals_waiting);
Mingyang Sun09328bd2022-03-25 11:55:13 +0800245 p_pt->signals_waiting = 0;
Kevin Pengdef92de2021-11-10 16:14:48 +0800246}
247
Sherry Zhang049733e2022-04-20 21:37:51 +0800248uint64_t ipc_schedule(void)
249{
250 AAPCS_DUAL_U32_T ctx_ctrls;
251 struct partition_t *p_part_curr, *p_part_next;
252 struct context_ctrl_t *p_curr_ctx;
253 struct thread_t *pth_next = thrd_next();
254 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
255
256 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
257
258 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
259
260 p_part_curr = GET_CURRENT_COMPONENT();
261 p_part_next = GET_THRD_OWNER(pth_next);
262
263 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
264 p_part_curr != p_part_next) {
265 /* Check if there is enough room on stack to save more context */
266 if ((p_curr_ctx->sp_limit +
267 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
268 tfm_core_panic();
269 }
270
271 CRITICAL_SECTION_ENTER(cs);
272 /*
273 * If required, let the platform update boundary based on its
274 * implementation. Change privilege, MPU or other configurations.
275 */
Ken Liu967ffa92022-05-25 15:13:34 +0800276 if (p_part_curr->boundary != p_part_next->boundary) {
277 if (tfm_hal_activate_boundary(p_part_next->p_ldinf,
278 p_part_next->boundary)
Sherry Zhang049733e2022-04-20 21:37:51 +0800279 != TFM_HAL_SUCCESS) {
280 tfm_core_panic();
281 }
282 }
283 ARCH_FLUSH_FP_CONTEXT();
284
285 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
286
287 CURRENT_THREAD = pth_next;
288 CRITICAL_SECTION_LEAVE(cs);
289 }
290
291 /* Update meta indicator */
292 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
293 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
294 }
295 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
296}