blob: 9cfd1a82b5704a219249ccb60824fccefd5f9acd [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080020#include "spm_api.h"
21#include "spm_db.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080022#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080024#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080025#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080026
27#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
28#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
29
30#ifndef TFM_LVL
31#error TFM_LVL is not defined!
32#endif
33
34REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
35REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
36
37/*
38 * This is the "Big Lock" on the secure side, to guarantee single entry
39 * to SPE
40 */
Summer Qin5fdcf632020-06-22 16:49:24 +080041static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +080042static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080043
Mingyang Sunabb1aab2020-02-18 13:49:08 +080044static uint32_t *prepare_partition_iovec_ctx(
45 const struct tfm_state_context_t *svc_ctx,
46 const struct tfm_sfn_req_s *desc_ptr,
47 const struct iovec_args_t *iovec_args,
48 uint32_t *dst)
49{
50 /* XPSR = as was when called, but make sure it's thread mode */
51 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
52 /* ReturnAddress = resume veneer in new context */
53 *(--dst) = svc_ctx->ra;
54 /* LR = sfn address */
55 *(--dst) = (uint32_t)desc_ptr->sfn;
56 /* R12 = don't care */
57 *(--dst) = 0U;
58
59 /* R0-R3 = sfn arguments */
60 *(--dst) = iovec_args->out_len;
61 *(--dst) = (uint32_t)iovec_args->out_vec;
62 *(--dst) = iovec_args->in_len;
63 *(--dst) = (uint32_t)iovec_args->in_vec;
64
65 return dst;
66}
67
68/**
69 * \brief Create a stack frame that sets the execution environment to thread
70 * mode on exception return.
71 *
72 * \param[in] svc_ctx The stacked SVC context
73 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
74 * \param[in] dst A pointer where the context is to be created. (the
75 * pointer is considered to be a stack pointer, and
76 * the frame is created below it)
77 *
78 * \return A pointer pointing at the created stack frame.
79 */
80static int32_t *prepare_partition_irq_ctx(
81 const struct tfm_state_context_t *svc_ctx,
82 sfn_t unpriv_handler,
83 int32_t *dst)
84{
85 int i;
86
87 /* XPSR = as was when called, but make sure it's thread mode */
88 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
89 /* ReturnAddress = resume to the privileged handler code, but execute it
90 * unprivileged.
91 */
92 *(--dst) = svc_ctx->ra;
93 /* LR = start address */
94 *(--dst) = (int32_t)unpriv_handler;
95
96 /* R12, R0-R3 unused arguments */
97 for (i = 0; i < 5; ++i) {
98 *(--dst) = 0;
99 }
100
101 return dst;
102}
103
104static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
105 struct tfm_state_context_t *target_ctx)
106{
107 /* ReturnAddress = resume veneer after second SVC */
108 target_ctx->ra = svc_ctx->ra;
109
110 /* R0 = function return value */
111 target_ctx->r0 = svc_ctx->r0;
112
113 return;
114}
115
116/**
117 * \brief Check whether the iovec parameters are valid, and the memory ranges
118 * are in the possession of the calling partition.
119 *
120 * \param[in] desc_ptr The secure function request descriptor
121 *
122 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
123 * otherwise as in /ref tfm_status_e
124 */
125static enum tfm_status_e tfm_core_check_sfn_parameters(
126 const struct tfm_sfn_req_s *desc_ptr)
127{
128 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
129 size_t in_len;
130 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
131 size_t out_len;
132 uint32_t i;
133
134 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
135 return TFM_ERROR_INVALID_PARAMETER;
136 }
137
138 in_len = (size_t)(desc_ptr->args[1]);
139 out_len = (size_t)(desc_ptr->args[3]);
140
141 /* The number of vectors are within range. Extra checks to avoid overflow */
142 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
143 (in_len + out_len > PSA_MAX_IOVEC)) {
144 return TFM_ERROR_INVALID_PARAMETER;
145 }
146
147 /* Check whether the caller partition has at write access to the iovec
148 * structures themselves. Use the TT instruction for this.
149 */
150 if (in_len > 0) {
151 if ((in_vec == NULL) ||
152 (tfm_core_has_write_access_to_region(in_vec,
153 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
154 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
155 return TFM_ERROR_INVALID_PARAMETER;
156 }
157 } else {
158 if (in_vec != NULL) {
159 return TFM_ERROR_INVALID_PARAMETER;
160 }
161 }
162 if (out_len > 0) {
163 if ((out_vec == NULL) ||
164 (tfm_core_has_write_access_to_region(out_vec,
165 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
166 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
167 return TFM_ERROR_INVALID_PARAMETER;
168 }
169 } else {
170 if (out_vec != NULL) {
171 return TFM_ERROR_INVALID_PARAMETER;
172 }
173 }
174
175 /* Check whether the caller partition has access to the data inside the
176 * iovecs
177 */
178 for (i = 0; i < in_len; ++i) {
179 if (in_vec[i].len > 0) {
180 if ((in_vec[i].base == NULL) ||
181 (tfm_core_has_read_access_to_region(in_vec[i].base,
182 in_vec[i].len, desc_ptr->ns_caller,
183 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
184 return TFM_ERROR_INVALID_PARAMETER;
185 }
186 }
187 }
188 for (i = 0; i < out_len; ++i) {
189 if (out_vec[i].len > 0) {
190 if ((out_vec[i].base == NULL) ||
191 (tfm_core_has_write_access_to_region(out_vec[i].base,
192 out_vec[i].len, desc_ptr->ns_caller,
193 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
194 return TFM_ERROR_INVALID_PARAMETER;
195 }
196 }
197 }
198
199 return TFM_SUCCESS;
200}
201
202static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
203 const struct iovec_args_t *source)
204{
205 size_t i;
206
207 /* The vectors have been sanity checked already, and since then the
208 * interrupts have been kept disabled. So we can be sure that the
209 * vectors haven't been tampered with since the check. So it is safe to pass
210 * it to the called partition.
211 */
212
213 target->in_len = source->in_len;
214 for (i = 0; i < source->in_len; ++i) {
215 target->in_vec[i].base = source->in_vec[i].base;
216 target->in_vec[i].len = source->in_vec[i].len;
217 }
218 target->out_len = source->out_len;
219 for (i = 0; i < source->out_len; ++i) {
220 target->out_vec[i].base = source->out_vec[i].base;
221 target->out_vec[i].len = source->out_vec[i].len;
222 }
223}
224
225static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
226{
227 int i;
228
229 args->in_len = 0;
230 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
231 args->in_vec[i].base = NULL;
232 args->in_vec[i].len = 0;
233 }
234 args->out_len = 0;
235 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
236 args->out_vec[i].base = NULL;
237 args->out_vec[i].len = 0;
238 }
239}
240
241/**
242 * \brief Check whether the partitions for the secure function call are in a
243 * proper state.
244 *
245 * \param[in] curr_partition_state State of the partition to be called
246 * \param[in] caller_partition_state State of the caller partition
247 *
248 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
249 */
250static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
251 uint32_t caller_partition_state)
252{
253 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
254 /* Calling partition from non-running state (e.g. during handling IRQ)
255 * is not allowed.
256 */
257 return TFM_ERROR_INVALID_EXC_MODE;
258 }
259
260 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
261 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
262 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
263 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
264 /* Active partitions cannot be called! */
265 return TFM_ERROR_PARTITION_NON_REENTRANT;
266 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
267 /* The partition to be called is not in a proper state */
268 return TFM_SECURE_LOCK_FAILED;
269 }
270 return TFM_SUCCESS;
271}
272
273/**
274 * \brief Check whether the partitions for the secure function call of irq are
275 * in a proper state.
276 *
277 * \param[in] called_partition_state State of the partition to be called
278 *
279 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
280 */
281static enum tfm_status_e check_irq_partition_state(
282 uint32_t called_partition_state)
283{
284 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
285 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
286 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
287 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
288 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
289 return TFM_SUCCESS;
290 }
291 return TFM_SECURE_LOCK_FAILED;
292}
293
294/**
295 * \brief Calculate the address where the iovec parameters are to be saved for
296 * the called partition.
297 *
298 * \param[in] partition_idx The index of the partition to be called.
299 *
300 * \return The address where the iovec parameters should be saved.
301 */
302static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
303{
304 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100305 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800306}
307
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800308/**
309 * \brief Returns the index of the partition with the given partition ID.
310 *
311 * \param[in] partition_id Partition id
312 *
313 * \return the partition idx if partition_id is valid,
314 * \ref SPM_INVALID_PARTITION_IDX othervise
315 */
316static uint32_t get_partition_idx(uint32_t partition_id)
317{
318 uint32_t i;
319
320 if (partition_id == INVALID_PARTITION_ID) {
321 return SPM_INVALID_PARTITION_IDX;
322 }
323
324 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
325 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
326 partition_id) {
327 return i;
328 }
329 }
330 return SPM_INVALID_PARTITION_IDX;
331}
332
333/**
334 * \brief Get the flags associated with a partition
335 *
336 * \param[in] partition_idx Partition index
337 *
338 * \return Flags associated with the partition
339 *
340 * \note This function doesn't check if partition_idx is valid.
341 */
342static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
343{
344 return g_spm_partition_db.partitions[partition_idx].static_data->
345 partition_flags;
346}
347
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800348static enum tfm_status_e tfm_start_partition(
349 const struct tfm_sfn_req_s *desc_ptr,
350 uint32_t excReturn)
351{
352 enum tfm_status_e res;
353 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
354 const struct spm_partition_runtime_data_t *curr_part_data;
355 const struct spm_partition_runtime_data_t *caller_part_data;
356 uint32_t caller_flags;
357 register uint32_t partition_idx;
358 uint32_t psp;
359 uint32_t partition_psp, partition_psplim;
360 uint32_t partition_state;
361 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800362 struct tfm_state_context_t *svc_ctx;
363 uint32_t caller_partition_id;
364 int32_t client_id;
365 struct iovec_args_t *iovec_args;
366
367 psp = __get_PSP();
368 svc_ctx = (struct tfm_state_context_t *)psp;
369 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
370
371 /* Check partition state consistency */
372 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
373 != (!desc_ptr->ns_caller)) {
374 /* Partition state inconsistency detected */
375 return TFM_SECURE_LOCK_FAILED;
376 }
377
378 partition_idx = get_partition_idx(desc_ptr->sp_id);
379
380 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
381 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
382 partition_state = curr_part_data->partition_state;
383 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800384 caller_partition_id = tfm_spm_partition_get_partition_id(
385 caller_partition_idx);
386
387 if (!tfm_secure_api_initializing) {
388 res = check_partition_state(partition_state, caller_partition_state);
389 if (res != TFM_SUCCESS) {
390 return res;
391 }
392 }
393
394 /* Prepare switch to shared secure partition stack */
395 /* In case the call is coming from the non-secure world, we save the iovecs
396 * on the stop of the stack. So the memory area, that can actually be used
397 * as stack by the partitions starts at a lower address
398 */
399 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100400 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800401 partition_psplim =
402 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
403
404 /* Store the context for the partition call */
405 tfm_spm_partition_set_caller_partition_idx(partition_idx,
406 caller_partition_idx);
407 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
408
409 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
410 tfm_spm_partition_set_caller_client_id(partition_idx,
411 caller_partition_id);
412 } else {
413 client_id = tfm_nspm_get_current_client_id();
414 if (client_id >= 0) {
415 return TFM_SECURE_LOCK_FAILED;
416 }
417 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
418 }
419
420 /* In level one, only switch context and return from exception if in
421 * handler mode
422 */
423 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
424 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
425 SPM_ERR_OK) {
426 return TFM_ERROR_GENERIC;
427 }
428 iovec_args = get_iovec_args_stack_address(partition_idx);
429 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
430
431 /* Prepare the partition context, update stack ptr */
432 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
433 iovec_args,
434 (uint32_t *)partition_psp);
435 __set_PSP(psp);
436 tfm_arch_set_psplim(partition_psplim);
437 }
438
439 tfm_spm_partition_set_state(caller_partition_idx,
440 SPM_PARTITION_STATE_BLOCKED);
441 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
442 tfm_secure_lock++;
443
444 return TFM_SUCCESS;
445}
446
447static enum tfm_status_e tfm_start_partition_for_irq_handling(
448 uint32_t excReturn,
449 struct tfm_state_context_t *svc_ctx)
450{
451 uint32_t handler_partition_id = svc_ctx->r0;
452 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
453 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100454 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800455 enum tfm_status_e res;
456 uint32_t psp = __get_PSP();
457 uint32_t handler_partition_psp;
458 uint32_t handler_partition_state;
459 uint32_t interrupted_partition_idx =
460 tfm_spm_partition_get_running_partition_idx();
461 const struct spm_partition_runtime_data_t *handler_part_data;
462 uint32_t handler_partition_idx;
463
464 handler_partition_idx = get_partition_idx(handler_partition_id);
465 handler_part_data = tfm_spm_partition_get_runtime_data(
466 handler_partition_idx);
467 handler_partition_state = handler_part_data->partition_state;
468
469 res = check_irq_partition_state(handler_partition_state);
470 if (res != TFM_SUCCESS) {
471 return res;
472 }
473
474 /* set mask for the partition */
475 tfm_spm_partition_set_signal_mask(
476 handler_partition_idx,
477 handler_part_data->signal_mask | irq_signal);
478
479 tfm_spm_hal_disable_irq(irq_line);
480
481 /* save the current context of the interrupted partition */
482 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
483
484 handler_partition_psp = psp;
485
486 /* save the current context of the handler partition */
487 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
488
489 /* Store caller for the partition */
490 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
491 interrupted_partition_idx);
492
493 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
494 (int32_t *)handler_partition_psp);
495 __set_PSP(psp);
496
497 tfm_spm_partition_set_state(interrupted_partition_idx,
498 SPM_PARTITION_STATE_SUSPENDED);
499 tfm_spm_partition_set_state(handler_partition_idx,
500 SPM_PARTITION_STATE_HANDLING_IRQ);
501
502 return TFM_SUCCESS;
503}
504
505static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
506{
507 uint32_t current_partition_idx =
508 tfm_spm_partition_get_running_partition_idx();
509 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800510 uint32_t return_partition_idx;
511 uint32_t return_partition_flags;
512 uint32_t psp = __get_PSP();
513 size_t i;
514 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
515 struct iovec_args_t *iovec_args;
516
517 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
518 return TFM_SECURE_UNLOCK_FAILED;
519 }
520
521 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
522 return_partition_idx = curr_part_data->caller_partition_idx;
523
524 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
525 return TFM_SECURE_UNLOCK_FAILED;
526 }
527
528 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
529
530 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800531
532 tfm_secure_lock--;
533
534 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
535 (tfm_secure_api_initializing)) {
536 /* In TFM level 1 context restore is only done when
537 * returning to NS or after initialization
538 */
539 /* Restore caller context */
540 restore_caller_ctx(svc_ctx,
541 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
542 *excReturn = ret_part_data->lr;
543 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100544 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800545 uint32_t psp_stack_bottom =
546 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
547 tfm_arch_set_psplim(psp_stack_bottom);
548
TTornblom99f0be22019-12-17 16:22:38 +0100549 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800550
551 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
552 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
553 }
554 tfm_clear_iovec_parameters(iovec_args);
555 }
556
557 tfm_spm_partition_cleanup_context(current_partition_idx);
558
559 tfm_spm_partition_set_state(current_partition_idx,
560 SPM_PARTITION_STATE_IDLE);
561 tfm_spm_partition_set_state(return_partition_idx,
562 SPM_PARTITION_STATE_RUNNING);
563
564 return TFM_SUCCESS;
565}
566
567static enum tfm_status_e tfm_return_from_partition_irq_handling(
568 uint32_t *excReturn)
569{
570 uint32_t handler_partition_idx =
571 tfm_spm_partition_get_running_partition_idx();
572 const struct spm_partition_runtime_data_t *handler_part_data;
573 uint32_t interrupted_partition_idx;
574 uint32_t psp = __get_PSP();
575 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
576
577 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
578 return TFM_SECURE_UNLOCK_FAILED;
579 }
580
581 handler_part_data = tfm_spm_partition_get_runtime_data(
582 handler_partition_idx);
583 interrupted_partition_idx = handler_part_data->caller_partition_idx;
584
585 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
586 return TFM_SECURE_UNLOCK_FAILED;
587 }
588
589 /* For level 1, modify PSP, so that the SVC stack frame disappears,
590 * and return to the privileged handler using the stack frame still on the
591 * MSP stack.
592 */
593 *excReturn = svc_ctx->ra;
594 psp += sizeof(struct tfm_state_context_t);
595
596 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
597 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
598
599 __set_PSP(psp);
600
601 return TFM_SUCCESS;
602}
603
604static enum tfm_status_e tfm_check_sfn_req_integrity(
605 const struct tfm_sfn_req_s *desc_ptr)
606{
607 if ((desc_ptr == NULL) ||
608 (desc_ptr->sp_id == 0) ||
609 (desc_ptr->sfn == NULL)) {
610 /* invalid parameter */
611 return TFM_ERROR_INVALID_PARAMETER;
612 }
613 return TFM_SUCCESS;
614}
615
616static enum tfm_status_e tfm_core_check_sfn_req_rules(
617 const struct tfm_sfn_req_s *desc_ptr)
618{
619 /* Check partition idx validity */
620 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
621 return TFM_ERROR_NO_ACTIVE_PARTITION;
622 }
623
624 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
625 /* Secure domain is already locked!
626 * This should only happen if caller is secure partition!
627 */
628 /* This scenario is a potential security breach.
629 * Error is handled in caller.
630 */
631 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
632 }
633
634 if (tfm_secure_api_initializing) {
635 int32_t id =
636 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
637
638 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
639 /* Invalid request during system initialization */
640 ERROR_MSG("Invalid service request during initialization!");
641 return TFM_ERROR_NOT_INITIALIZED;
642 }
643 }
644
645 return TFM_SUCCESS;
646}
647
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800648uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
649{
650 return g_spm_partition_db.partitions[partition_idx].static_data->
651 partition_id;
652}
653
654uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
655{
656 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
657 return TFM_PARTITION_PRIVILEGED_MODE;
658 } else {
659 return TFM_PARTITION_UNPRIVILEGED_MODE;
660 }
661}
662
663bool tfm_is_partition_privileged(uint32_t partition_idx)
664{
665 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
666
667 return tfm_spm_partition_get_privileged_mode(flags) ==
668 TFM_PARTITION_PRIVILEGED_MODE;
669}
670
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800671void tfm_spm_secure_api_init_done(void)
672{
673 tfm_secure_api_initializing = 0;
674}
675
676enum tfm_status_e tfm_spm_sfn_request_handler(
677 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
678{
679 enum tfm_status_e res;
680
681 res = tfm_check_sfn_req_integrity(desc_ptr);
682 if (res != TFM_SUCCESS) {
683 ERROR_MSG("Invalid service request!");
684 tfm_secure_api_error_handler();
685 }
686
687 __disable_irq();
688
689 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
690
691 res = tfm_core_check_sfn_parameters(desc_ptr);
692 if (res != TFM_SUCCESS) {
693 /* The sanity check of iovecs failed. */
694 __enable_irq();
695 tfm_secure_api_error_handler();
696 }
697
698 res = tfm_core_check_sfn_req_rules(desc_ptr);
699 if (res != TFM_SUCCESS) {
700 /* FixMe: error compartmentalization TBD */
701 tfm_spm_partition_set_state(
702 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
703 __enable_irq();
704 ERROR_MSG("Unauthorized service request!");
705 tfm_secure_api_error_handler();
706 }
707
708 res = tfm_start_partition(desc_ptr, excReturn);
709 if (res != TFM_SUCCESS) {
710 /* FixMe: consider possible fault scenarios */
711 __enable_irq();
712 ERROR_MSG("Failed to process service request!");
713 tfm_secure_api_error_handler();
714 }
715
716 __enable_irq();
717
718 return res;
719}
720
721int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
722{
723 enum tfm_status_e res;
724 int32_t *args;
725 int32_t retVal;
726
727 res = tfm_core_check_sfn_parameters(desc_ptr);
728 if (res != TFM_SUCCESS) {
729 /* The sanity check of iovecs failed. */
730 return (int32_t)res;
731 }
732
733 /* No excReturn value is needed as no exception handling is used */
734 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
735
736 if (res != TFM_SUCCESS) {
737 tfm_secure_api_error_handler();
738 }
739
740 /* Secure partition to secure partition call in TFM level 1 */
741 args = desc_ptr->args;
742 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
743
744 /* return handler should restore original exc_return value... */
745 res = tfm_return_from_partition(NULL);
746 if (res == TFM_SUCCESS) {
747 /* If unlock successful, pass SS return value to caller */
748 return retVal;
749 } else {
750 /* Unlock errors indicate ctx database corruption or unknown
751 * anomalies. Halt execution
752 */
753 ERROR_MSG("Secure API error during unlock!");
754 tfm_secure_api_error_handler();
755 }
756 return (int32_t)res;
757}
758
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800759int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
760 void *start_addr,
761 size_t len,
762 uint32_t alignment)
763{
764 uintptr_t start_addr_value = (uintptr_t)start_addr;
765 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
766 uintptr_t alignment_mask;
767
768 alignment_mask = (((uintptr_t)1) << alignment) - 1;
769
770 /* Check that the pointer is aligned properly */
771 if (start_addr_value & alignment_mask) {
772 /* not aligned, return error */
773 return 0;
774 }
775
776 /* Protect against overflow (and zero len) */
777 if (end_addr_value <= start_addr_value) {
778 return 0;
779 }
780
781 /* For privileged partition execution, all secure data memory and stack
782 * is accessible
783 */
784 if (start_addr_value >= S_DATA_START &&
785 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
786 return 1;
787 }
788
789 return 0;
790}
791
792void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
793{
794 uintptr_t result_ptr_value = svc_args[0];
795 uint32_t running_partition_idx =
796 tfm_spm_partition_get_running_partition_idx();
797 const uint32_t running_partition_flags =
798 tfm_spm_partition_get_flags(running_partition_idx);
799 const struct spm_partition_runtime_data_t *curr_part_data =
800 tfm_spm_partition_get_runtime_data(running_partition_idx);
801 int res = 0;
802
803 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
804 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
805 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
806 /* This handler shouldn't be called from outside partition context.
807 * Also if the current partition is handling IRQ, the caller partition
808 * index might not be valid;
809 * Partitions are only allowed to run while S domain is locked.
810 */
811 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
812 return;
813 }
814
815 /* Make sure that the output pointer points to a memory area that is owned
816 * by the partition
817 */
818 res = tfm_spm_check_buffer_access(running_partition_idx,
819 (void *)result_ptr_value,
820 sizeof(curr_part_data->caller_client_id),
821 2);
822 if (!res) {
823 /* Not in accessible range, return error */
824 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
825 return;
826 }
827
828 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
829
830 /* Store return value in r0 */
831 svc_args[0] = (uint32_t)TFM_SUCCESS;
832}
833
834/* This SVC handler is called if veneer is running in thread mode */
835uint32_t tfm_spm_partition_request_svc_handler(
836 const uint32_t *svc_ctx, uint32_t excReturn)
837{
838 struct tfm_sfn_req_s *desc_ptr;
839
840 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
841 /* Service request SVC called with MSP active.
842 * Either invalid configuration for Thread mode or SVC called
843 * from Handler mode, which is not supported.
844 * FixMe: error severity TBD
845 */
846 ERROR_MSG("Service request SVC called with MSP active!");
847 tfm_secure_api_error_handler();
848 }
849
850 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
851
852 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
853 tfm_secure_api_error_handler();
854 }
855
856 return EXC_RETURN_SECURE_FUNCTION;
857}
858
859/* This SVC handler is called, if a thread mode execution environment is to
860 * be set up, to run an unprivileged IRQ handler
861 */
862uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
863{
864 struct tfm_state_context_t *svc_ctx =
865 (struct tfm_state_context_t *)svc_args;
866
867 enum tfm_status_e res;
868
869 if (excReturn & EXC_RETURN_STACK_PROCESS) {
870 /* FixMe: error severity TBD */
871 ERROR_MSG("Partition request SVC called with PSP active!");
872 tfm_secure_api_error_handler();
873 }
874
875 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
876 if (res != TFM_SUCCESS) {
877 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
878 * its code can be run
879 */
880 /* FixMe: For now this case is handled with TF-M panic, however it would
881 * be possible to skip the execution of the interrupt handler, and
882 * resume the execution of the interrupted code.
883 */
884 tfm_secure_api_error_handler();
885 }
886 return EXC_RETURN_SECURE_FUNCTION;
887}
888
889/* This SVC handler is called when sfn returns */
890uint32_t tfm_spm_partition_return_handler(uint32_t lr)
891{
892 enum tfm_status_e res;
893
894 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
895 /* Partition return SVC called with MSP active.
896 * This should not happen!
897 */
898 ERROR_MSG("Partition return SVC called with MSP active!");
899 tfm_secure_api_error_handler();
900 }
901
902 res = tfm_return_from_partition(&lr);
903 if (res != TFM_SUCCESS) {
904 /* Unlock errors indicate ctx database corruption or unknown anomalies
905 * Halt execution
906 */
907 ERROR_MSG("Secure API error during unlock!");
908 tfm_secure_api_error_handler();
909 }
910
911 return lr;
912}
913
914/* This SVC handler is called if a deprivileged IRQ handler was executed, and
915 * the execution environment is to be set back for the privileged handler mode
916 */
917uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
918{
919 enum tfm_status_e res;
920 struct tfm_state_context_t *irq_svc_ctx =
921 (struct tfm_state_context_t *)irq_svc_args;
922
923 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
924 /* Partition request SVC called with MSP active.
925 * FixMe: error severity TBD
926 */
927 ERROR_MSG("Partition request SVC called with MSP active!");
928 tfm_secure_api_error_handler();
929 }
930
931 res = tfm_return_from_partition_irq_handling(&lr);
932 if (res != TFM_SUCCESS) {
933 /* Unlock errors indicate ctx database corruption or unknown anomalies
934 * Halt execution
935 */
936 ERROR_MSG("Secure API error during unlock!");
937 tfm_secure_api_error_handler();
938 }
939
940 irq_svc_ctx->ra = lr;
941
942 return EXC_RETURN_SECURE_HANDLER;
943}
944
945/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
946/**
947 * \brief Return the IRQ line number associated with a signal
948 *
949 * \param[in] partition_id The ID of the partition in which we look for the
950 * signal
951 * \param[in] signal The signal we do the query for
952 *
953 * \retval >=0 The IRQ line number associated with a signal in the partition
954 * \retval <0 error
955 */
TTornblomfaf74f52020-03-04 17:56:27 +0100956static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800957 psa_signal_t signal)
958{
959 size_t i;
960
961 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
962 if (tfm_core_irq_signals[i].partition_id == partition_id &&
963 tfm_core_irq_signals[i].signal_value == signal) {
964 return tfm_core_irq_signals[i].irq_line;
965 }
966 }
TTornblomfaf74f52020-03-04 17:56:27 +0100967 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800968}
969
970void tfm_spm_enable_irq_handler(uint32_t *svc_args)
971{
972 struct tfm_state_context_t *svc_ctx =
973 (struct tfm_state_context_t *)svc_args;
974 psa_signal_t irq_signal = svc_ctx->r0;
975 uint32_t running_partition_idx =
976 tfm_spm_partition_get_running_partition_idx();
977 uint32_t running_partition_id =
978 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100979 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800980
981 /* Only a single signal is allowed */
982 if (!tfm_is_one_bit_set(irq_signal)) {
983 /* FixMe: error severity TBD */
984 tfm_secure_api_error_handler();
985 }
986
987 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
988
989 if (irq_line < 0) {
990 /* FixMe: error severity TBD */
991 tfm_secure_api_error_handler();
992 }
993
994 tfm_spm_hal_enable_irq(irq_line);
995}
996
997void tfm_spm_disable_irq_handler(uint32_t *svc_args)
998{
999 struct tfm_state_context_t *svc_ctx =
1000 (struct tfm_state_context_t *)svc_args;
1001 psa_signal_t irq_signal = svc_ctx->r0;
1002 uint32_t running_partition_idx =
1003 tfm_spm_partition_get_running_partition_idx();
1004 uint32_t running_partition_id =
1005 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +01001006 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001007
1008 /* Only a single signal is allowed */
1009 if (!tfm_is_one_bit_set(irq_signal)) {
1010 /* FixMe: error severity TBD */
1011 tfm_secure_api_error_handler();
1012 }
1013
1014 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1015
1016 if (irq_line < 0) {
1017 /* FixMe: error severity TBD */
1018 tfm_secure_api_error_handler();
1019 }
1020
1021 tfm_spm_hal_disable_irq(irq_line);
1022}
1023
1024void tfm_spm_psa_wait(uint32_t *svc_args)
1025{
1026 /* Look for partition that is ready for run */
1027 struct tfm_state_context_t *svc_ctx =
1028 (struct tfm_state_context_t *)svc_args;
1029 uint32_t running_partition_idx;
1030 const struct spm_partition_runtime_data_t *curr_part_data;
1031
1032 psa_signal_t signal_mask = svc_ctx->r0;
1033 uint32_t timeout = svc_ctx->r1;
1034
1035 /*
1036 * Timeout[30:0] are reserved for future use.
1037 * SPM must ignore the value of RES.
1038 */
1039 timeout &= PSA_TIMEOUT_MASK;
1040
1041 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1042 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1043
1044 if (timeout == PSA_BLOCK) {
1045 /* FIXME: Scheduling is not available in library model, and busy wait is
1046 * also not possible as this code is running in SVC context, and it
1047 * cannot be pre-empted by interrupts. So do nothing here for now
1048 */
1049 (void) signal_mask;
1050 }
1051
1052 svc_ctx->r0 = curr_part_data->signal_mask;
1053}
1054
1055void tfm_spm_psa_eoi(uint32_t *svc_args)
1056{
1057 struct tfm_state_context_t *svc_ctx =
1058 (struct tfm_state_context_t *)svc_args;
1059 psa_signal_t irq_signal = svc_ctx->r0;
1060 uint32_t signal_mask;
1061 uint32_t running_partition_idx;
1062 uint32_t running_partition_id;
1063 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001064 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001065
1066 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1067 running_partition_id =
1068 tfm_spm_partition_get_partition_id(running_partition_idx);
1069 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1070
1071 /* Only a single signal is allowed */
1072 if (!tfm_is_one_bit_set(irq_signal)) {
1073 tfm_secure_api_error_handler();
1074 }
1075
1076 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1077
1078 if (irq_line < 0) {
1079 /* FixMe: error severity TBD */
1080 tfm_secure_api_error_handler();
1081 }
1082
1083 tfm_spm_hal_clear_pending_irq(irq_line);
1084 tfm_spm_hal_enable_irq(irq_line);
1085
1086 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1087 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1088}
Mingyang Sunda01a972019-07-12 17:32:59 +08001089
1090/*
1091 * This function is called when a secure partition causes an error.
1092 * In case of an error in the error handling, a non-zero value have to be
1093 * returned.
1094 */
1095static void tfm_spm_partition_err_handler(
1096 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001097 int32_t err_code)
1098{
Mingyang Sunda01a972019-07-12 17:32:59 +08001099 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001100
Summer Qin423dbef2019-08-22 15:59:35 +08001101 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001102 SPM_PARTITION_STATE_CLOSED);
1103}
1104
1105enum spm_err_t tfm_spm_partition_init(void)
1106{
1107 struct spm_partition_desc_t *part;
1108 struct tfm_sfn_req_s desc;
1109 int32_t args[4] = {0};
1110 int32_t fail_cnt = 0;
1111 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001112 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001113
1114 /* Call the init function for each partition */
1115 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1116 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001117 platform_data_p = part->platform_data_list;
1118 if (platform_data_p != NULL) {
1119 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001120 if (tfm_spm_hal_configure_default_isolation(idx,
1121 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1122 fail_cnt++;
1123 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001124 ++platform_data_p;
1125 }
1126 }
Summer Qin423dbef2019-08-22 15:59:35 +08001127 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001128 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1129 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001130 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001131 } else {
1132 int32_t res;
1133
1134 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001135 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001136 desc.sfn = (sfn_t)part->static_data->partition_init;
1137 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001138 res = tfm_core_sfn_request(&desc);
1139 if (res == TFM_SUCCESS) {
1140 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1141 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001142 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001143 fail_cnt++;
1144 }
1145 }
1146 }
1147
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001148 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001149
1150 if (fail_cnt == 0) {
1151 return SPM_ERR_OK;
1152 } else {
1153 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1154 }
1155}
1156
1157void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1158{
1159 struct spm_partition_runtime_data_t *runtime_data =
1160 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1161 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001162 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001163
1164 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001165
1166 runtime_data->ctx_stack_ptr +=
1167 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001168}
1169
1170void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1171{
1172 struct spm_partition_runtime_data_t *runtime_data =
1173 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1174 struct interrupted_ctx_stack_frame_t *stack_frame;
1175
Matt463ed582019-12-20 12:31:25 +08001176 runtime_data->ctx_stack_ptr -=
1177 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1178
Mingyang Sunda01a972019-07-12 17:32:59 +08001179 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1180 runtime_data->ctx_stack_ptr;
1181 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1182 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001183}
1184
1185void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1186{
1187 struct spm_partition_runtime_data_t *runtime_data =
1188 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1189 struct handler_ctx_stack_frame_t *stack_frame =
1190 (struct handler_ctx_stack_frame_t *)
1191 runtime_data->ctx_stack_ptr;
1192
1193 stack_frame->partition_state = runtime_data->partition_state;
1194 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1195
1196 runtime_data->ctx_stack_ptr +=
1197 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1198}
1199
1200void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1201{
1202 struct spm_partition_runtime_data_t *runtime_data =
1203 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1204 struct handler_ctx_stack_frame_t *stack_frame;
1205
1206 runtime_data->ctx_stack_ptr -=
1207 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1208
1209 stack_frame = (struct handler_ctx_stack_frame_t *)
1210 runtime_data->ctx_stack_ptr;
1211
1212 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1213 stack_frame->partition_state = 0;
1214 tfm_spm_partition_set_caller_partition_idx(
1215 partition_idx, stack_frame->caller_partition_idx);
1216 stack_frame->caller_partition_idx = 0;
1217}
1218
Mingyang Sunda01a972019-07-12 17:32:59 +08001219void tfm_spm_partition_store_context(uint32_t partition_idx,
1220 uint32_t stack_ptr, uint32_t lr)
1221{
1222 g_spm_partition_db.partitions[partition_idx].
1223 runtime_data.stack_ptr = stack_ptr;
1224 g_spm_partition_db.partitions[partition_idx].
1225 runtime_data.lr = lr;
1226}
1227
1228const struct spm_partition_runtime_data_t *
1229 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1230{
1231 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1232}
1233
1234void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1235{
1236 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1237 state;
1238 if (state == SPM_PARTITION_STATE_RUNNING ||
1239 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1240 g_spm_partition_db.running_partition_idx = partition_idx;
1241 }
1242}
1243
1244void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1245 uint32_t caller_partition_idx)
1246{
1247 g_spm_partition_db.partitions[partition_idx].runtime_data.
1248 caller_partition_idx = caller_partition_idx;
1249}
1250
1251void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1252 uint32_t signal_mask)
1253{
1254 g_spm_partition_db.partitions[partition_idx].runtime_data.
1255 signal_mask = signal_mask;
1256}
1257
1258void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1259 int32_t caller_client_id)
1260{
1261 g_spm_partition_db.partitions[partition_idx].runtime_data.
1262 caller_client_id = caller_client_id;
1263}
1264
Mingyang Sunda01a972019-07-12 17:32:59 +08001265enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1266 const int32_t *args)
1267{
1268 struct spm_partition_runtime_data_t *runtime_data =
1269 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1270 size_t i;
1271
1272 if ((args[1] < 0) || (args[3] < 0)) {
1273 return SPM_ERR_INVALID_PARAMETER;
1274 }
1275
1276 runtime_data->iovec_args.in_len = (size_t)args[1];
1277 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1278 runtime_data->iovec_args.in_vec[i].base =
1279 ((psa_invec *)args[0])[i].base;
1280 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1281 }
1282 runtime_data->iovec_args.out_len = (size_t)args[3];
1283 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1284 runtime_data->iovec_args.out_vec[i].base =
1285 ((psa_outvec *)args[2])[i].base;
1286 runtime_data->iovec_args.out_vec[i].len =
1287 ((psa_outvec *)args[2])[i].len;
1288 }
1289 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001290
1291 return SPM_ERR_OK;
1292}
1293
1294uint32_t tfm_spm_partition_get_running_partition_idx(void)
1295{
1296 return g_spm_partition_db.running_partition_idx;
1297}
1298
1299void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1300{
1301 struct spm_partition_desc_t *partition =
1302 &(g_spm_partition_db.partitions[partition_idx]);
1303 int32_t i;
1304
1305 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001306 partition->runtime_data.iovec_args.in_len = 0;
1307 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1308 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1309 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1310 }
1311 partition->runtime_data.iovec_args.out_len = 0;
1312 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1313 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1314 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1315 }
1316 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001317}
Summer Qin830c5542020-02-14 13:44:20 +08001318
1319void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1320{
1321 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1322 uint32_t running_partition_flags = 0;
1323 uint32_t running_partition_idx;
1324
1325 /* Check permissions on request type basis */
1326
1327 switch (svc_ctx->r0) {
1328 case TFM_SPM_REQUEST_RESET_VOTE:
1329 running_partition_idx =
1330 tfm_spm_partition_get_running_partition_idx();
1331 running_partition_flags = tfm_spm_partition_get_flags(
1332 running_partition_idx);
1333
1334 /* Currently only PSA Root of Trust services are allowed to make Reset
1335 * vote request
1336 */
1337 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1338 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1339 }
1340
1341 /* FixMe: this is a placeholder for checks to be performed before
1342 * allowing execution of reset
1343 */
1344 *res_ptr = (uint32_t)TFM_SUCCESS;
1345
1346 break;
1347 default:
1348 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1349 }
1350}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001351
1352enum spm_err_t tfm_spm_db_init(void)
1353{
1354 uint32_t i;
1355
1356 /* This function initialises partition db */
1357
1358 /* For the non secure Execution environment */
1359 tfm_nspm_configure_clients();
1360
1361 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1362 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1363 SPM_PARTITION_STATE_UNINIT;
1364 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1365 SPM_INVALID_PARTITION_IDX;
1366 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1367 TFM_INVALID_CLIENT_ID;
1368 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1369 ctx_stack_list[i];
1370 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1371 g_spm_partition_db.partitions[i].platform_data_list =
1372 platform_data_list_list[i];
1373 }
1374 g_spm_partition_db.is_init = 1;
1375
1376 return SPM_ERR_OK;
1377}