blob: eae55f0552b9bc2ac289d3bf00012c2d6a72cabf [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080020#include "spm_func.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080021#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080022#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_partition_defs.h"
24#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080025#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080026#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080027
28#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
29#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
30
31#ifndef TFM_LVL
32#error TFM_LVL is not defined!
33#endif
34
35REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
36REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
37
38/*
39 * This is the "Big Lock" on the secure side, to guarantee single entry
40 * to SPE
41 */
Summer Qin5fdcf632020-06-22 16:49:24 +080042static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +080043static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080044
Mingyang Sunabb1aab2020-02-18 13:49:08 +080045static uint32_t *prepare_partition_iovec_ctx(
46 const struct tfm_state_context_t *svc_ctx,
47 const struct tfm_sfn_req_s *desc_ptr,
48 const struct iovec_args_t *iovec_args,
49 uint32_t *dst)
50{
51 /* XPSR = as was when called, but make sure it's thread mode */
52 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
53 /* ReturnAddress = resume veneer in new context */
54 *(--dst) = svc_ctx->ra;
55 /* LR = sfn address */
56 *(--dst) = (uint32_t)desc_ptr->sfn;
57 /* R12 = don't care */
58 *(--dst) = 0U;
59
60 /* R0-R3 = sfn arguments */
61 *(--dst) = iovec_args->out_len;
62 *(--dst) = (uint32_t)iovec_args->out_vec;
63 *(--dst) = iovec_args->in_len;
64 *(--dst) = (uint32_t)iovec_args->in_vec;
65
66 return dst;
67}
68
69/**
70 * \brief Create a stack frame that sets the execution environment to thread
71 * mode on exception return.
72 *
73 * \param[in] svc_ctx The stacked SVC context
74 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
75 * \param[in] dst A pointer where the context is to be created. (the
76 * pointer is considered to be a stack pointer, and
77 * the frame is created below it)
78 *
79 * \return A pointer pointing at the created stack frame.
80 */
81static int32_t *prepare_partition_irq_ctx(
82 const struct tfm_state_context_t *svc_ctx,
83 sfn_t unpriv_handler,
84 int32_t *dst)
85{
86 int i;
87
88 /* XPSR = as was when called, but make sure it's thread mode */
89 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
90 /* ReturnAddress = resume to the privileged handler code, but execute it
91 * unprivileged.
92 */
93 *(--dst) = svc_ctx->ra;
94 /* LR = start address */
95 *(--dst) = (int32_t)unpriv_handler;
96
97 /* R12, R0-R3 unused arguments */
98 for (i = 0; i < 5; ++i) {
99 *(--dst) = 0;
100 }
101
102 return dst;
103}
104
105static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
106 struct tfm_state_context_t *target_ctx)
107{
108 /* ReturnAddress = resume veneer after second SVC */
109 target_ctx->ra = svc_ctx->ra;
110
111 /* R0 = function return value */
112 target_ctx->r0 = svc_ctx->r0;
113
114 return;
115}
116
117/**
118 * \brief Check whether the iovec parameters are valid, and the memory ranges
119 * are in the possession of the calling partition.
120 *
121 * \param[in] desc_ptr The secure function request descriptor
122 *
123 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
124 * otherwise as in /ref tfm_status_e
125 */
126static enum tfm_status_e tfm_core_check_sfn_parameters(
127 const struct tfm_sfn_req_s *desc_ptr)
128{
129 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
130 size_t in_len;
131 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
132 size_t out_len;
133 uint32_t i;
134
135 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
136 return TFM_ERROR_INVALID_PARAMETER;
137 }
138
139 in_len = (size_t)(desc_ptr->args[1]);
140 out_len = (size_t)(desc_ptr->args[3]);
141
142 /* The number of vectors are within range. Extra checks to avoid overflow */
143 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
144 (in_len + out_len > PSA_MAX_IOVEC)) {
145 return TFM_ERROR_INVALID_PARAMETER;
146 }
147
148 /* Check whether the caller partition has at write access to the iovec
149 * structures themselves. Use the TT instruction for this.
150 */
151 if (in_len > 0) {
152 if ((in_vec == NULL) ||
153 (tfm_core_has_write_access_to_region(in_vec,
154 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
155 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
156 return TFM_ERROR_INVALID_PARAMETER;
157 }
158 } else {
159 if (in_vec != NULL) {
160 return TFM_ERROR_INVALID_PARAMETER;
161 }
162 }
163 if (out_len > 0) {
164 if ((out_vec == NULL) ||
165 (tfm_core_has_write_access_to_region(out_vec,
166 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
167 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
168 return TFM_ERROR_INVALID_PARAMETER;
169 }
170 } else {
171 if (out_vec != NULL) {
172 return TFM_ERROR_INVALID_PARAMETER;
173 }
174 }
175
176 /* Check whether the caller partition has access to the data inside the
177 * iovecs
178 */
179 for (i = 0; i < in_len; ++i) {
180 if (in_vec[i].len > 0) {
181 if ((in_vec[i].base == NULL) ||
182 (tfm_core_has_read_access_to_region(in_vec[i].base,
183 in_vec[i].len, desc_ptr->ns_caller,
184 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
185 return TFM_ERROR_INVALID_PARAMETER;
186 }
187 }
188 }
189 for (i = 0; i < out_len; ++i) {
190 if (out_vec[i].len > 0) {
191 if ((out_vec[i].base == NULL) ||
192 (tfm_core_has_write_access_to_region(out_vec[i].base,
193 out_vec[i].len, desc_ptr->ns_caller,
194 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
195 return TFM_ERROR_INVALID_PARAMETER;
196 }
197 }
198 }
199
200 return TFM_SUCCESS;
201}
202
203static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
204 const struct iovec_args_t *source)
205{
206 size_t i;
207
208 /* The vectors have been sanity checked already, and since then the
209 * interrupts have been kept disabled. So we can be sure that the
210 * vectors haven't been tampered with since the check. So it is safe to pass
211 * it to the called partition.
212 */
213
214 target->in_len = source->in_len;
215 for (i = 0; i < source->in_len; ++i) {
216 target->in_vec[i].base = source->in_vec[i].base;
217 target->in_vec[i].len = source->in_vec[i].len;
218 }
219 target->out_len = source->out_len;
220 for (i = 0; i < source->out_len; ++i) {
221 target->out_vec[i].base = source->out_vec[i].base;
222 target->out_vec[i].len = source->out_vec[i].len;
223 }
224}
225
226static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
227{
228 int i;
229
230 args->in_len = 0;
231 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
232 args->in_vec[i].base = NULL;
233 args->in_vec[i].len = 0;
234 }
235 args->out_len = 0;
236 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
237 args->out_vec[i].base = NULL;
238 args->out_vec[i].len = 0;
239 }
240}
241
242/**
243 * \brief Check whether the partitions for the secure function call are in a
244 * proper state.
245 *
246 * \param[in] curr_partition_state State of the partition to be called
247 * \param[in] caller_partition_state State of the caller partition
248 *
249 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
250 */
251static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
252 uint32_t caller_partition_state)
253{
254 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
255 /* Calling partition from non-running state (e.g. during handling IRQ)
256 * is not allowed.
257 */
258 return TFM_ERROR_INVALID_EXC_MODE;
259 }
260
261 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
262 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
263 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
264 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
265 /* Active partitions cannot be called! */
266 return TFM_ERROR_PARTITION_NON_REENTRANT;
267 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
268 /* The partition to be called is not in a proper state */
269 return TFM_SECURE_LOCK_FAILED;
270 }
271 return TFM_SUCCESS;
272}
273
274/**
275 * \brief Check whether the partitions for the secure function call of irq are
276 * in a proper state.
277 *
278 * \param[in] called_partition_state State of the partition to be called
279 *
280 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
281 */
282static enum tfm_status_e check_irq_partition_state(
283 uint32_t called_partition_state)
284{
285 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
286 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
287 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
288 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
289 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
290 return TFM_SUCCESS;
291 }
292 return TFM_SECURE_LOCK_FAILED;
293}
294
295/**
296 * \brief Calculate the address where the iovec parameters are to be saved for
297 * the called partition.
298 *
299 * \param[in] partition_idx The index of the partition to be called.
300 *
301 * \return The address where the iovec parameters should be saved.
302 */
303static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
304{
305 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100306 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800307}
308
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800309/**
310 * \brief Returns the index of the partition with the given partition ID.
311 *
312 * \param[in] partition_id Partition id
313 *
314 * \return the partition idx if partition_id is valid,
315 * \ref SPM_INVALID_PARTITION_IDX othervise
316 */
317static uint32_t get_partition_idx(uint32_t partition_id)
318{
319 uint32_t i;
320
321 if (partition_id == INVALID_PARTITION_ID) {
322 return SPM_INVALID_PARTITION_IDX;
323 }
324
325 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
326 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
327 partition_id) {
328 return i;
329 }
330 }
331 return SPM_INVALID_PARTITION_IDX;
332}
333
334/**
335 * \brief Get the flags associated with a partition
336 *
337 * \param[in] partition_idx Partition index
338 *
339 * \return Flags associated with the partition
340 *
341 * \note This function doesn't check if partition_idx is valid.
342 */
343static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
344{
345 return g_spm_partition_db.partitions[partition_idx].static_data->
346 partition_flags;
347}
348
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800349static enum tfm_status_e tfm_start_partition(
350 const struct tfm_sfn_req_s *desc_ptr,
351 uint32_t excReturn)
352{
353 enum tfm_status_e res;
354 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
355 const struct spm_partition_runtime_data_t *curr_part_data;
356 const struct spm_partition_runtime_data_t *caller_part_data;
357 uint32_t caller_flags;
358 register uint32_t partition_idx;
359 uint32_t psp;
360 uint32_t partition_psp, partition_psplim;
361 uint32_t partition_state;
362 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800363 struct tfm_state_context_t *svc_ctx;
364 uint32_t caller_partition_id;
365 int32_t client_id;
366 struct iovec_args_t *iovec_args;
367
368 psp = __get_PSP();
369 svc_ctx = (struct tfm_state_context_t *)psp;
370 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
371
372 /* Check partition state consistency */
373 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
374 != (!desc_ptr->ns_caller)) {
375 /* Partition state inconsistency detected */
376 return TFM_SECURE_LOCK_FAILED;
377 }
378
379 partition_idx = get_partition_idx(desc_ptr->sp_id);
380
381 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
382 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
383 partition_state = curr_part_data->partition_state;
384 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800385 caller_partition_id = tfm_spm_partition_get_partition_id(
386 caller_partition_idx);
387
388 if (!tfm_secure_api_initializing) {
389 res = check_partition_state(partition_state, caller_partition_state);
390 if (res != TFM_SUCCESS) {
391 return res;
392 }
393 }
394
395 /* Prepare switch to shared secure partition stack */
396 /* In case the call is coming from the non-secure world, we save the iovecs
397 * on the stop of the stack. So the memory area, that can actually be used
398 * as stack by the partitions starts at a lower address
399 */
400 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100401 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800402 partition_psplim =
403 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
404
405 /* Store the context for the partition call */
406 tfm_spm_partition_set_caller_partition_idx(partition_idx,
407 caller_partition_idx);
408 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
409
410 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
411 tfm_spm_partition_set_caller_client_id(partition_idx,
412 caller_partition_id);
413 } else {
414 client_id = tfm_nspm_get_current_client_id();
415 if (client_id >= 0) {
416 return TFM_SECURE_LOCK_FAILED;
417 }
418 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
419 }
420
421 /* In level one, only switch context and return from exception if in
422 * handler mode
423 */
424 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
425 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
426 SPM_ERR_OK) {
427 return TFM_ERROR_GENERIC;
428 }
429 iovec_args = get_iovec_args_stack_address(partition_idx);
430 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
431
432 /* Prepare the partition context, update stack ptr */
433 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
434 iovec_args,
435 (uint32_t *)partition_psp);
436 __set_PSP(psp);
437 tfm_arch_set_psplim(partition_psplim);
438 }
439
440 tfm_spm_partition_set_state(caller_partition_idx,
441 SPM_PARTITION_STATE_BLOCKED);
442 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
443 tfm_secure_lock++;
444
445 return TFM_SUCCESS;
446}
447
448static enum tfm_status_e tfm_start_partition_for_irq_handling(
449 uint32_t excReturn,
450 struct tfm_state_context_t *svc_ctx)
451{
452 uint32_t handler_partition_id = svc_ctx->r0;
453 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
454 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100455 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800456 enum tfm_status_e res;
457 uint32_t psp = __get_PSP();
458 uint32_t handler_partition_psp;
459 uint32_t handler_partition_state;
460 uint32_t interrupted_partition_idx =
461 tfm_spm_partition_get_running_partition_idx();
462 const struct spm_partition_runtime_data_t *handler_part_data;
463 uint32_t handler_partition_idx;
464
465 handler_partition_idx = get_partition_idx(handler_partition_id);
466 handler_part_data = tfm_spm_partition_get_runtime_data(
467 handler_partition_idx);
468 handler_partition_state = handler_part_data->partition_state;
469
470 res = check_irq_partition_state(handler_partition_state);
471 if (res != TFM_SUCCESS) {
472 return res;
473 }
474
475 /* set mask for the partition */
476 tfm_spm_partition_set_signal_mask(
477 handler_partition_idx,
478 handler_part_data->signal_mask | irq_signal);
479
480 tfm_spm_hal_disable_irq(irq_line);
481
482 /* save the current context of the interrupted partition */
483 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
484
485 handler_partition_psp = psp;
486
487 /* save the current context of the handler partition */
488 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
489
490 /* Store caller for the partition */
491 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
492 interrupted_partition_idx);
493
494 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
495 (int32_t *)handler_partition_psp);
496 __set_PSP(psp);
497
498 tfm_spm_partition_set_state(interrupted_partition_idx,
499 SPM_PARTITION_STATE_SUSPENDED);
500 tfm_spm_partition_set_state(handler_partition_idx,
501 SPM_PARTITION_STATE_HANDLING_IRQ);
502
503 return TFM_SUCCESS;
504}
505
506static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
507{
508 uint32_t current_partition_idx =
509 tfm_spm_partition_get_running_partition_idx();
510 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800511 uint32_t return_partition_idx;
512 uint32_t return_partition_flags;
513 uint32_t psp = __get_PSP();
514 size_t i;
515 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
516 struct iovec_args_t *iovec_args;
517
518 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
519 return TFM_SECURE_UNLOCK_FAILED;
520 }
521
522 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
523 return_partition_idx = curr_part_data->caller_partition_idx;
524
525 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
526 return TFM_SECURE_UNLOCK_FAILED;
527 }
528
529 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
530
531 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800532
533 tfm_secure_lock--;
534
535 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
536 (tfm_secure_api_initializing)) {
537 /* In TFM level 1 context restore is only done when
538 * returning to NS or after initialization
539 */
540 /* Restore caller context */
541 restore_caller_ctx(svc_ctx,
542 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
543 *excReturn = ret_part_data->lr;
544 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100545 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800546 uint32_t psp_stack_bottom =
547 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
548 tfm_arch_set_psplim(psp_stack_bottom);
549
TTornblom99f0be22019-12-17 16:22:38 +0100550 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800551
552 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
553 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
554 }
555 tfm_clear_iovec_parameters(iovec_args);
556 }
557
558 tfm_spm_partition_cleanup_context(current_partition_idx);
559
560 tfm_spm_partition_set_state(current_partition_idx,
561 SPM_PARTITION_STATE_IDLE);
562 tfm_spm_partition_set_state(return_partition_idx,
563 SPM_PARTITION_STATE_RUNNING);
564
565 return TFM_SUCCESS;
566}
567
568static enum tfm_status_e tfm_return_from_partition_irq_handling(
569 uint32_t *excReturn)
570{
571 uint32_t handler_partition_idx =
572 tfm_spm_partition_get_running_partition_idx();
573 const struct spm_partition_runtime_data_t *handler_part_data;
574 uint32_t interrupted_partition_idx;
575 uint32_t psp = __get_PSP();
576 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
577
578 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
579 return TFM_SECURE_UNLOCK_FAILED;
580 }
581
582 handler_part_data = tfm_spm_partition_get_runtime_data(
583 handler_partition_idx);
584 interrupted_partition_idx = handler_part_data->caller_partition_idx;
585
586 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
587 return TFM_SECURE_UNLOCK_FAILED;
588 }
589
590 /* For level 1, modify PSP, so that the SVC stack frame disappears,
591 * and return to the privileged handler using the stack frame still on the
592 * MSP stack.
593 */
594 *excReturn = svc_ctx->ra;
595 psp += sizeof(struct tfm_state_context_t);
596
597 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
598 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
599
600 __set_PSP(psp);
601
602 return TFM_SUCCESS;
603}
604
605static enum tfm_status_e tfm_check_sfn_req_integrity(
606 const struct tfm_sfn_req_s *desc_ptr)
607{
608 if ((desc_ptr == NULL) ||
609 (desc_ptr->sp_id == 0) ||
610 (desc_ptr->sfn == NULL)) {
611 /* invalid parameter */
612 return TFM_ERROR_INVALID_PARAMETER;
613 }
614 return TFM_SUCCESS;
615}
616
617static enum tfm_status_e tfm_core_check_sfn_req_rules(
618 const struct tfm_sfn_req_s *desc_ptr)
619{
620 /* Check partition idx validity */
621 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
622 return TFM_ERROR_NO_ACTIVE_PARTITION;
623 }
624
625 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
626 /* Secure domain is already locked!
627 * This should only happen if caller is secure partition!
628 */
629 /* This scenario is a potential security breach.
630 * Error is handled in caller.
631 */
632 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
633 }
634
635 if (tfm_secure_api_initializing) {
636 int32_t id =
637 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
638
639 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
640 /* Invalid request during system initialization */
641 ERROR_MSG("Invalid service request during initialization!");
642 return TFM_ERROR_NOT_INITIALIZED;
643 }
644 }
645
646 return TFM_SUCCESS;
647}
648
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800649uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
650{
651 return g_spm_partition_db.partitions[partition_idx].static_data->
652 partition_id;
653}
654
655uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
656{
657 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
658 return TFM_PARTITION_PRIVILEGED_MODE;
659 } else {
660 return TFM_PARTITION_UNPRIVILEGED_MODE;
661 }
662}
663
664bool tfm_is_partition_privileged(uint32_t partition_idx)
665{
666 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
667
668 return tfm_spm_partition_get_privileged_mode(flags) ==
669 TFM_PARTITION_PRIVILEGED_MODE;
670}
671
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800672void tfm_spm_secure_api_init_done(void)
673{
674 tfm_secure_api_initializing = 0;
675}
676
677enum tfm_status_e tfm_spm_sfn_request_handler(
678 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
679{
680 enum tfm_status_e res;
681
682 res = tfm_check_sfn_req_integrity(desc_ptr);
683 if (res != TFM_SUCCESS) {
684 ERROR_MSG("Invalid service request!");
685 tfm_secure_api_error_handler();
686 }
687
688 __disable_irq();
689
690 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
691
692 res = tfm_core_check_sfn_parameters(desc_ptr);
693 if (res != TFM_SUCCESS) {
694 /* The sanity check of iovecs failed. */
695 __enable_irq();
696 tfm_secure_api_error_handler();
697 }
698
699 res = tfm_core_check_sfn_req_rules(desc_ptr);
700 if (res != TFM_SUCCESS) {
701 /* FixMe: error compartmentalization TBD */
702 tfm_spm_partition_set_state(
703 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
704 __enable_irq();
705 ERROR_MSG("Unauthorized service request!");
706 tfm_secure_api_error_handler();
707 }
708
709 res = tfm_start_partition(desc_ptr, excReturn);
710 if (res != TFM_SUCCESS) {
711 /* FixMe: consider possible fault scenarios */
712 __enable_irq();
713 ERROR_MSG("Failed to process service request!");
714 tfm_secure_api_error_handler();
715 }
716
717 __enable_irq();
718
719 return res;
720}
721
722int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
723{
724 enum tfm_status_e res;
725 int32_t *args;
726 int32_t retVal;
727
728 res = tfm_core_check_sfn_parameters(desc_ptr);
729 if (res != TFM_SUCCESS) {
730 /* The sanity check of iovecs failed. */
731 return (int32_t)res;
732 }
733
734 /* No excReturn value is needed as no exception handling is used */
735 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
736
737 if (res != TFM_SUCCESS) {
738 tfm_secure_api_error_handler();
739 }
740
741 /* Secure partition to secure partition call in TFM level 1 */
742 args = desc_ptr->args;
743 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
744
745 /* return handler should restore original exc_return value... */
746 res = tfm_return_from_partition(NULL);
747 if (res == TFM_SUCCESS) {
748 /* If unlock successful, pass SS return value to caller */
749 return retVal;
750 } else {
751 /* Unlock errors indicate ctx database corruption or unknown
752 * anomalies. Halt execution
753 */
754 ERROR_MSG("Secure API error during unlock!");
755 tfm_secure_api_error_handler();
756 }
757 return (int32_t)res;
758}
759
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800760int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
761 void *start_addr,
762 size_t len,
763 uint32_t alignment)
764{
765 uintptr_t start_addr_value = (uintptr_t)start_addr;
766 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
767 uintptr_t alignment_mask;
768
769 alignment_mask = (((uintptr_t)1) << alignment) - 1;
770
771 /* Check that the pointer is aligned properly */
772 if (start_addr_value & alignment_mask) {
773 /* not aligned, return error */
774 return 0;
775 }
776
777 /* Protect against overflow (and zero len) */
778 if (end_addr_value <= start_addr_value) {
779 return 0;
780 }
781
782 /* For privileged partition execution, all secure data memory and stack
783 * is accessible
784 */
785 if (start_addr_value >= S_DATA_START &&
786 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
787 return 1;
788 }
789
790 return 0;
791}
792
793void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
794{
795 uintptr_t result_ptr_value = svc_args[0];
796 uint32_t running_partition_idx =
797 tfm_spm_partition_get_running_partition_idx();
798 const uint32_t running_partition_flags =
799 tfm_spm_partition_get_flags(running_partition_idx);
800 const struct spm_partition_runtime_data_t *curr_part_data =
801 tfm_spm_partition_get_runtime_data(running_partition_idx);
802 int res = 0;
803
804 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
805 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
806 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
807 /* This handler shouldn't be called from outside partition context.
808 * Also if the current partition is handling IRQ, the caller partition
809 * index might not be valid;
810 * Partitions are only allowed to run while S domain is locked.
811 */
812 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
813 return;
814 }
815
816 /* Make sure that the output pointer points to a memory area that is owned
817 * by the partition
818 */
819 res = tfm_spm_check_buffer_access(running_partition_idx,
820 (void *)result_ptr_value,
821 sizeof(curr_part_data->caller_client_id),
822 2);
823 if (!res) {
824 /* Not in accessible range, return error */
825 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
826 return;
827 }
828
829 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
830
831 /* Store return value in r0 */
832 svc_args[0] = (uint32_t)TFM_SUCCESS;
833}
834
835/* This SVC handler is called if veneer is running in thread mode */
836uint32_t tfm_spm_partition_request_svc_handler(
837 const uint32_t *svc_ctx, uint32_t excReturn)
838{
839 struct tfm_sfn_req_s *desc_ptr;
840
841 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
842 /* Service request SVC called with MSP active.
843 * Either invalid configuration for Thread mode or SVC called
844 * from Handler mode, which is not supported.
845 * FixMe: error severity TBD
846 */
847 ERROR_MSG("Service request SVC called with MSP active!");
848 tfm_secure_api_error_handler();
849 }
850
851 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
852
853 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
854 tfm_secure_api_error_handler();
855 }
856
857 return EXC_RETURN_SECURE_FUNCTION;
858}
859
860/* This SVC handler is called, if a thread mode execution environment is to
861 * be set up, to run an unprivileged IRQ handler
862 */
863uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
864{
865 struct tfm_state_context_t *svc_ctx =
866 (struct tfm_state_context_t *)svc_args;
867
868 enum tfm_status_e res;
869
870 if (excReturn & EXC_RETURN_STACK_PROCESS) {
871 /* FixMe: error severity TBD */
872 ERROR_MSG("Partition request SVC called with PSP active!");
873 tfm_secure_api_error_handler();
874 }
875
876 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
877 if (res != TFM_SUCCESS) {
878 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
879 * its code can be run
880 */
881 /* FixMe: For now this case is handled with TF-M panic, however it would
882 * be possible to skip the execution of the interrupt handler, and
883 * resume the execution of the interrupted code.
884 */
885 tfm_secure_api_error_handler();
886 }
887 return EXC_RETURN_SECURE_FUNCTION;
888}
889
890/* This SVC handler is called when sfn returns */
891uint32_t tfm_spm_partition_return_handler(uint32_t lr)
892{
893 enum tfm_status_e res;
894
895 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
896 /* Partition return SVC called with MSP active.
897 * This should not happen!
898 */
899 ERROR_MSG("Partition return SVC called with MSP active!");
900 tfm_secure_api_error_handler();
901 }
902
903 res = tfm_return_from_partition(&lr);
904 if (res != TFM_SUCCESS) {
905 /* Unlock errors indicate ctx database corruption or unknown anomalies
906 * Halt execution
907 */
908 ERROR_MSG("Secure API error during unlock!");
909 tfm_secure_api_error_handler();
910 }
911
912 return lr;
913}
914
915/* This SVC handler is called if a deprivileged IRQ handler was executed, and
916 * the execution environment is to be set back for the privileged handler mode
917 */
918uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
919{
920 enum tfm_status_e res;
921 struct tfm_state_context_t *irq_svc_ctx =
922 (struct tfm_state_context_t *)irq_svc_args;
923
924 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
925 /* Partition request SVC called with MSP active.
926 * FixMe: error severity TBD
927 */
928 ERROR_MSG("Partition request SVC called with MSP active!");
929 tfm_secure_api_error_handler();
930 }
931
932 res = tfm_return_from_partition_irq_handling(&lr);
933 if (res != TFM_SUCCESS) {
934 /* Unlock errors indicate ctx database corruption or unknown anomalies
935 * Halt execution
936 */
937 ERROR_MSG("Secure API error during unlock!");
938 tfm_secure_api_error_handler();
939 }
940
941 irq_svc_ctx->ra = lr;
942
943 return EXC_RETURN_SECURE_HANDLER;
944}
945
946/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
947/**
948 * \brief Return the IRQ line number associated with a signal
949 *
950 * \param[in] partition_id The ID of the partition in which we look for the
951 * signal
952 * \param[in] signal The signal we do the query for
953 *
954 * \retval >=0 The IRQ line number associated with a signal in the partition
955 * \retval <0 error
956 */
TTornblomfaf74f52020-03-04 17:56:27 +0100957static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800958 psa_signal_t signal)
959{
960 size_t i;
961
962 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
963 if (tfm_core_irq_signals[i].partition_id == partition_id &&
964 tfm_core_irq_signals[i].signal_value == signal) {
965 return tfm_core_irq_signals[i].irq_line;
966 }
967 }
TTornblomfaf74f52020-03-04 17:56:27 +0100968 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800969}
970
971void tfm_spm_enable_irq_handler(uint32_t *svc_args)
972{
973 struct tfm_state_context_t *svc_ctx =
974 (struct tfm_state_context_t *)svc_args;
975 psa_signal_t irq_signal = svc_ctx->r0;
976 uint32_t running_partition_idx =
977 tfm_spm_partition_get_running_partition_idx();
978 uint32_t running_partition_id =
979 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100980 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800981
982 /* Only a single signal is allowed */
983 if (!tfm_is_one_bit_set(irq_signal)) {
984 /* FixMe: error severity TBD */
985 tfm_secure_api_error_handler();
986 }
987
988 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
989
990 if (irq_line < 0) {
991 /* FixMe: error severity TBD */
992 tfm_secure_api_error_handler();
993 }
994
995 tfm_spm_hal_enable_irq(irq_line);
996}
997
998void tfm_spm_disable_irq_handler(uint32_t *svc_args)
999{
1000 struct tfm_state_context_t *svc_ctx =
1001 (struct tfm_state_context_t *)svc_args;
1002 psa_signal_t irq_signal = svc_ctx->r0;
1003 uint32_t running_partition_idx =
1004 tfm_spm_partition_get_running_partition_idx();
1005 uint32_t running_partition_id =
1006 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +01001007 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001008
1009 /* Only a single signal is allowed */
1010 if (!tfm_is_one_bit_set(irq_signal)) {
1011 /* FixMe: error severity TBD */
1012 tfm_secure_api_error_handler();
1013 }
1014
1015 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1016
1017 if (irq_line < 0) {
1018 /* FixMe: error severity TBD */
1019 tfm_secure_api_error_handler();
1020 }
1021
1022 tfm_spm_hal_disable_irq(irq_line);
1023}
1024
1025void tfm_spm_psa_wait(uint32_t *svc_args)
1026{
1027 /* Look for partition that is ready for run */
1028 struct tfm_state_context_t *svc_ctx =
1029 (struct tfm_state_context_t *)svc_args;
1030 uint32_t running_partition_idx;
1031 const struct spm_partition_runtime_data_t *curr_part_data;
1032
1033 psa_signal_t signal_mask = svc_ctx->r0;
1034 uint32_t timeout = svc_ctx->r1;
1035
1036 /*
1037 * Timeout[30:0] are reserved for future use.
1038 * SPM must ignore the value of RES.
1039 */
1040 timeout &= PSA_TIMEOUT_MASK;
1041
1042 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1043 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1044
1045 if (timeout == PSA_BLOCK) {
1046 /* FIXME: Scheduling is not available in library model, and busy wait is
1047 * also not possible as this code is running in SVC context, and it
1048 * cannot be pre-empted by interrupts. So do nothing here for now
1049 */
1050 (void) signal_mask;
1051 }
1052
1053 svc_ctx->r0 = curr_part_data->signal_mask;
1054}
1055
1056void tfm_spm_psa_eoi(uint32_t *svc_args)
1057{
1058 struct tfm_state_context_t *svc_ctx =
1059 (struct tfm_state_context_t *)svc_args;
1060 psa_signal_t irq_signal = svc_ctx->r0;
1061 uint32_t signal_mask;
1062 uint32_t running_partition_idx;
1063 uint32_t running_partition_id;
1064 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001065 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001066
1067 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1068 running_partition_id =
1069 tfm_spm_partition_get_partition_id(running_partition_idx);
1070 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1071
1072 /* Only a single signal is allowed */
1073 if (!tfm_is_one_bit_set(irq_signal)) {
1074 tfm_secure_api_error_handler();
1075 }
1076
1077 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1078
1079 if (irq_line < 0) {
1080 /* FixMe: error severity TBD */
1081 tfm_secure_api_error_handler();
1082 }
1083
1084 tfm_spm_hal_clear_pending_irq(irq_line);
1085 tfm_spm_hal_enable_irq(irq_line);
1086
1087 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1088 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1089}
Mingyang Sunda01a972019-07-12 17:32:59 +08001090
1091/*
1092 * This function is called when a secure partition causes an error.
1093 * In case of an error in the error handling, a non-zero value have to be
1094 * returned.
1095 */
1096static void tfm_spm_partition_err_handler(
1097 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001098 int32_t err_code)
1099{
Mingyang Sunda01a972019-07-12 17:32:59 +08001100 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001101
Summer Qin423dbef2019-08-22 15:59:35 +08001102 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001103 SPM_PARTITION_STATE_CLOSED);
1104}
1105
1106enum spm_err_t tfm_spm_partition_init(void)
1107{
1108 struct spm_partition_desc_t *part;
1109 struct tfm_sfn_req_s desc;
1110 int32_t args[4] = {0};
1111 int32_t fail_cnt = 0;
1112 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001113 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001114
1115 /* Call the init function for each partition */
1116 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1117 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001118 platform_data_p = part->platform_data_list;
1119 if (platform_data_p != NULL) {
1120 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001121 if (tfm_spm_hal_configure_default_isolation(idx,
1122 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1123 fail_cnt++;
1124 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001125 ++platform_data_p;
1126 }
1127 }
Summer Qin423dbef2019-08-22 15:59:35 +08001128 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001129 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1130 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001131 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001132 } else {
1133 int32_t res;
1134
1135 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001136 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001137 desc.sfn = (sfn_t)part->static_data->partition_init;
1138 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001139 res = tfm_core_sfn_request(&desc);
1140 if (res == TFM_SUCCESS) {
1141 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1142 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001143 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001144 fail_cnt++;
1145 }
1146 }
1147 }
1148
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001149 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001150
1151 if (fail_cnt == 0) {
1152 return SPM_ERR_OK;
1153 } else {
1154 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1155 }
1156}
1157
1158void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1159{
1160 struct spm_partition_runtime_data_t *runtime_data =
1161 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1162 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001163 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001164
1165 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001166
1167 runtime_data->ctx_stack_ptr +=
1168 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001169}
1170
1171void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1172{
1173 struct spm_partition_runtime_data_t *runtime_data =
1174 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1175 struct interrupted_ctx_stack_frame_t *stack_frame;
1176
Matt463ed582019-12-20 12:31:25 +08001177 runtime_data->ctx_stack_ptr -=
1178 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1179
Mingyang Sunda01a972019-07-12 17:32:59 +08001180 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1181 runtime_data->ctx_stack_ptr;
1182 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1183 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001184}
1185
1186void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1187{
1188 struct spm_partition_runtime_data_t *runtime_data =
1189 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1190 struct handler_ctx_stack_frame_t *stack_frame =
1191 (struct handler_ctx_stack_frame_t *)
1192 runtime_data->ctx_stack_ptr;
1193
1194 stack_frame->partition_state = runtime_data->partition_state;
1195 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1196
1197 runtime_data->ctx_stack_ptr +=
1198 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1199}
1200
1201void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1202{
1203 struct spm_partition_runtime_data_t *runtime_data =
1204 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1205 struct handler_ctx_stack_frame_t *stack_frame;
1206
1207 runtime_data->ctx_stack_ptr -=
1208 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1209
1210 stack_frame = (struct handler_ctx_stack_frame_t *)
1211 runtime_data->ctx_stack_ptr;
1212
1213 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1214 stack_frame->partition_state = 0;
1215 tfm_spm_partition_set_caller_partition_idx(
1216 partition_idx, stack_frame->caller_partition_idx);
1217 stack_frame->caller_partition_idx = 0;
1218}
1219
Mingyang Sunda01a972019-07-12 17:32:59 +08001220void tfm_spm_partition_store_context(uint32_t partition_idx,
1221 uint32_t stack_ptr, uint32_t lr)
1222{
1223 g_spm_partition_db.partitions[partition_idx].
1224 runtime_data.stack_ptr = stack_ptr;
1225 g_spm_partition_db.partitions[partition_idx].
1226 runtime_data.lr = lr;
1227}
1228
1229const struct spm_partition_runtime_data_t *
1230 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1231{
1232 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1233}
1234
1235void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1236{
1237 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1238 state;
1239 if (state == SPM_PARTITION_STATE_RUNNING ||
1240 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1241 g_spm_partition_db.running_partition_idx = partition_idx;
1242 }
1243}
1244
1245void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1246 uint32_t caller_partition_idx)
1247{
1248 g_spm_partition_db.partitions[partition_idx].runtime_data.
1249 caller_partition_idx = caller_partition_idx;
1250}
1251
1252void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1253 uint32_t signal_mask)
1254{
1255 g_spm_partition_db.partitions[partition_idx].runtime_data.
1256 signal_mask = signal_mask;
1257}
1258
1259void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1260 int32_t caller_client_id)
1261{
1262 g_spm_partition_db.partitions[partition_idx].runtime_data.
1263 caller_client_id = caller_client_id;
1264}
1265
Mingyang Sunda01a972019-07-12 17:32:59 +08001266enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1267 const int32_t *args)
1268{
1269 struct spm_partition_runtime_data_t *runtime_data =
1270 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1271 size_t i;
1272
1273 if ((args[1] < 0) || (args[3] < 0)) {
1274 return SPM_ERR_INVALID_PARAMETER;
1275 }
1276
1277 runtime_data->iovec_args.in_len = (size_t)args[1];
1278 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1279 runtime_data->iovec_args.in_vec[i].base =
1280 ((psa_invec *)args[0])[i].base;
1281 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1282 }
1283 runtime_data->iovec_args.out_len = (size_t)args[3];
1284 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1285 runtime_data->iovec_args.out_vec[i].base =
1286 ((psa_outvec *)args[2])[i].base;
1287 runtime_data->iovec_args.out_vec[i].len =
1288 ((psa_outvec *)args[2])[i].len;
1289 }
1290 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001291
1292 return SPM_ERR_OK;
1293}
1294
1295uint32_t tfm_spm_partition_get_running_partition_idx(void)
1296{
1297 return g_spm_partition_db.running_partition_idx;
1298}
1299
1300void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1301{
1302 struct spm_partition_desc_t *partition =
1303 &(g_spm_partition_db.partitions[partition_idx]);
1304 int32_t i;
1305
1306 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001307 partition->runtime_data.iovec_args.in_len = 0;
1308 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1309 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1310 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1311 }
1312 partition->runtime_data.iovec_args.out_len = 0;
1313 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1314 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1315 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1316 }
1317 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001318}
Summer Qin830c5542020-02-14 13:44:20 +08001319
1320void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1321{
1322 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1323 uint32_t running_partition_flags = 0;
1324 uint32_t running_partition_idx;
1325
1326 /* Check permissions on request type basis */
1327
1328 switch (svc_ctx->r0) {
1329 case TFM_SPM_REQUEST_RESET_VOTE:
1330 running_partition_idx =
1331 tfm_spm_partition_get_running_partition_idx();
1332 running_partition_flags = tfm_spm_partition_get_flags(
1333 running_partition_idx);
1334
1335 /* Currently only PSA Root of Trust services are allowed to make Reset
1336 * vote request
1337 */
1338 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1339 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1340 }
1341
1342 /* FixMe: this is a placeholder for checks to be performed before
1343 * allowing execution of reset
1344 */
1345 *res_ptr = (uint32_t)TFM_SUCCESS;
1346
1347 break;
1348 default:
1349 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1350 }
1351}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001352
1353enum spm_err_t tfm_spm_db_init(void)
1354{
1355 uint32_t i;
1356
1357 /* This function initialises partition db */
1358
1359 /* For the non secure Execution environment */
1360 tfm_nspm_configure_clients();
1361
1362 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1363 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1364 SPM_PARTITION_STATE_UNINIT;
1365 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1366 SPM_INVALID_PARTITION_IDX;
1367 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1368 TFM_INVALID_CLIENT_ID;
1369 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1370 ctx_stack_list[i];
1371 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1372 g_spm_partition_db.partitions[i].platform_data_list =
1373 platform_data_list_list[i];
1374 }
1375 g_spm_partition_db.is_init = 1;
1376
1377 return SPM_ERR_OK;
1378}