blob: 890e0b21d7f97213f8a18c126f2f2bd9a5ed349d [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080020#include "spm_api.h"
21#include "spm_db.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080022#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
Ken Liu1f345b02020-05-30 21:11:05 +080024#include "tfm/tfm_spm_services_api.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080025#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080026
27#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
28#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
29
30#ifndef TFM_LVL
31#error TFM_LVL is not defined!
32#endif
33
34REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
35REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
36
37/*
38 * This is the "Big Lock" on the secure side, to guarantee single entry
39 * to SPE
40 */
41extern int32_t tfm_secure_lock;
42static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080043
Mingyang Sunabb1aab2020-02-18 13:49:08 +080044static uint32_t *prepare_partition_iovec_ctx(
45 const struct tfm_state_context_t *svc_ctx,
46 const struct tfm_sfn_req_s *desc_ptr,
47 const struct iovec_args_t *iovec_args,
48 uint32_t *dst)
49{
50 /* XPSR = as was when called, but make sure it's thread mode */
51 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
52 /* ReturnAddress = resume veneer in new context */
53 *(--dst) = svc_ctx->ra;
54 /* LR = sfn address */
55 *(--dst) = (uint32_t)desc_ptr->sfn;
56 /* R12 = don't care */
57 *(--dst) = 0U;
58
59 /* R0-R3 = sfn arguments */
60 *(--dst) = iovec_args->out_len;
61 *(--dst) = (uint32_t)iovec_args->out_vec;
62 *(--dst) = iovec_args->in_len;
63 *(--dst) = (uint32_t)iovec_args->in_vec;
64
65 return dst;
66}
67
68/**
69 * \brief Create a stack frame that sets the execution environment to thread
70 * mode on exception return.
71 *
72 * \param[in] svc_ctx The stacked SVC context
73 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
74 * \param[in] dst A pointer where the context is to be created. (the
75 * pointer is considered to be a stack pointer, and
76 * the frame is created below it)
77 *
78 * \return A pointer pointing at the created stack frame.
79 */
80static int32_t *prepare_partition_irq_ctx(
81 const struct tfm_state_context_t *svc_ctx,
82 sfn_t unpriv_handler,
83 int32_t *dst)
84{
85 int i;
86
87 /* XPSR = as was when called, but make sure it's thread mode */
88 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
89 /* ReturnAddress = resume to the privileged handler code, but execute it
90 * unprivileged.
91 */
92 *(--dst) = svc_ctx->ra;
93 /* LR = start address */
94 *(--dst) = (int32_t)unpriv_handler;
95
96 /* R12, R0-R3 unused arguments */
97 for (i = 0; i < 5; ++i) {
98 *(--dst) = 0;
99 }
100
101 return dst;
102}
103
104static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
105 struct tfm_state_context_t *target_ctx)
106{
107 /* ReturnAddress = resume veneer after second SVC */
108 target_ctx->ra = svc_ctx->ra;
109
110 /* R0 = function return value */
111 target_ctx->r0 = svc_ctx->r0;
112
113 return;
114}
115
116/**
117 * \brief Check whether the iovec parameters are valid, and the memory ranges
118 * are in the possession of the calling partition.
119 *
120 * \param[in] desc_ptr The secure function request descriptor
121 *
122 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
123 * otherwise as in /ref tfm_status_e
124 */
125static enum tfm_status_e tfm_core_check_sfn_parameters(
126 const struct tfm_sfn_req_s *desc_ptr)
127{
128 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
129 size_t in_len;
130 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
131 size_t out_len;
132 uint32_t i;
133
134 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
135 return TFM_ERROR_INVALID_PARAMETER;
136 }
137
138 in_len = (size_t)(desc_ptr->args[1]);
139 out_len = (size_t)(desc_ptr->args[3]);
140
141 /* The number of vectors are within range. Extra checks to avoid overflow */
142 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
143 (in_len + out_len > PSA_MAX_IOVEC)) {
144 return TFM_ERROR_INVALID_PARAMETER;
145 }
146
147 /* Check whether the caller partition has at write access to the iovec
148 * structures themselves. Use the TT instruction for this.
149 */
150 if (in_len > 0) {
151 if ((in_vec == NULL) ||
152 (tfm_core_has_write_access_to_region(in_vec,
153 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
154 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
155 return TFM_ERROR_INVALID_PARAMETER;
156 }
157 } else {
158 if (in_vec != NULL) {
159 return TFM_ERROR_INVALID_PARAMETER;
160 }
161 }
162 if (out_len > 0) {
163 if ((out_vec == NULL) ||
164 (tfm_core_has_write_access_to_region(out_vec,
165 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
166 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
167 return TFM_ERROR_INVALID_PARAMETER;
168 }
169 } else {
170 if (out_vec != NULL) {
171 return TFM_ERROR_INVALID_PARAMETER;
172 }
173 }
174
175 /* Check whether the caller partition has access to the data inside the
176 * iovecs
177 */
178 for (i = 0; i < in_len; ++i) {
179 if (in_vec[i].len > 0) {
180 if ((in_vec[i].base == NULL) ||
181 (tfm_core_has_read_access_to_region(in_vec[i].base,
182 in_vec[i].len, desc_ptr->ns_caller,
183 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
184 return TFM_ERROR_INVALID_PARAMETER;
185 }
186 }
187 }
188 for (i = 0; i < out_len; ++i) {
189 if (out_vec[i].len > 0) {
190 if ((out_vec[i].base == NULL) ||
191 (tfm_core_has_write_access_to_region(out_vec[i].base,
192 out_vec[i].len, desc_ptr->ns_caller,
193 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
194 return TFM_ERROR_INVALID_PARAMETER;
195 }
196 }
197 }
198
199 return TFM_SUCCESS;
200}
201
202static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
203 const struct iovec_args_t *source)
204{
205 size_t i;
206
207 /* The vectors have been sanity checked already, and since then the
208 * interrupts have been kept disabled. So we can be sure that the
209 * vectors haven't been tampered with since the check. So it is safe to pass
210 * it to the called partition.
211 */
212
213 target->in_len = source->in_len;
214 for (i = 0; i < source->in_len; ++i) {
215 target->in_vec[i].base = source->in_vec[i].base;
216 target->in_vec[i].len = source->in_vec[i].len;
217 }
218 target->out_len = source->out_len;
219 for (i = 0; i < source->out_len; ++i) {
220 target->out_vec[i].base = source->out_vec[i].base;
221 target->out_vec[i].len = source->out_vec[i].len;
222 }
223}
224
225static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
226{
227 int i;
228
229 args->in_len = 0;
230 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
231 args->in_vec[i].base = NULL;
232 args->in_vec[i].len = 0;
233 }
234 args->out_len = 0;
235 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
236 args->out_vec[i].base = NULL;
237 args->out_vec[i].len = 0;
238 }
239}
240
241/**
242 * \brief Check whether the partitions for the secure function call are in a
243 * proper state.
244 *
245 * \param[in] curr_partition_state State of the partition to be called
246 * \param[in] caller_partition_state State of the caller partition
247 *
248 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
249 */
250static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
251 uint32_t caller_partition_state)
252{
253 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
254 /* Calling partition from non-running state (e.g. during handling IRQ)
255 * is not allowed.
256 */
257 return TFM_ERROR_INVALID_EXC_MODE;
258 }
259
260 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
261 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
262 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
263 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
264 /* Active partitions cannot be called! */
265 return TFM_ERROR_PARTITION_NON_REENTRANT;
266 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
267 /* The partition to be called is not in a proper state */
268 return TFM_SECURE_LOCK_FAILED;
269 }
270 return TFM_SUCCESS;
271}
272
273/**
274 * \brief Check whether the partitions for the secure function call of irq are
275 * in a proper state.
276 *
277 * \param[in] called_partition_state State of the partition to be called
278 *
279 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
280 */
281static enum tfm_status_e check_irq_partition_state(
282 uint32_t called_partition_state)
283{
284 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
285 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
286 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
287 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
288 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
289 return TFM_SUCCESS;
290 }
291 return TFM_SECURE_LOCK_FAILED;
292}
293
294/**
295 * \brief Calculate the address where the iovec parameters are to be saved for
296 * the called partition.
297 *
298 * \param[in] partition_idx The index of the partition to be called.
299 *
300 * \return The address where the iovec parameters should be saved.
301 */
302static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
303{
304 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100305 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800306}
307
308static enum tfm_status_e tfm_start_partition(
309 const struct tfm_sfn_req_s *desc_ptr,
310 uint32_t excReturn)
311{
312 enum tfm_status_e res;
313 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
314 const struct spm_partition_runtime_data_t *curr_part_data;
315 const struct spm_partition_runtime_data_t *caller_part_data;
316 uint32_t caller_flags;
317 register uint32_t partition_idx;
318 uint32_t psp;
319 uint32_t partition_psp, partition_psplim;
320 uint32_t partition_state;
321 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800322 struct tfm_state_context_t *svc_ctx;
323 uint32_t caller_partition_id;
324 int32_t client_id;
325 struct iovec_args_t *iovec_args;
326
327 psp = __get_PSP();
328 svc_ctx = (struct tfm_state_context_t *)psp;
329 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
330
331 /* Check partition state consistency */
332 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
333 != (!desc_ptr->ns_caller)) {
334 /* Partition state inconsistency detected */
335 return TFM_SECURE_LOCK_FAILED;
336 }
337
338 partition_idx = get_partition_idx(desc_ptr->sp_id);
339
340 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
341 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
342 partition_state = curr_part_data->partition_state;
343 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800344 caller_partition_id = tfm_spm_partition_get_partition_id(
345 caller_partition_idx);
346
347 if (!tfm_secure_api_initializing) {
348 res = check_partition_state(partition_state, caller_partition_state);
349 if (res != TFM_SUCCESS) {
350 return res;
351 }
352 }
353
354 /* Prepare switch to shared secure partition stack */
355 /* In case the call is coming from the non-secure world, we save the iovecs
356 * on the stop of the stack. So the memory area, that can actually be used
357 * as stack by the partitions starts at a lower address
358 */
359 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100360 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800361 partition_psplim =
362 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
363
364 /* Store the context for the partition call */
365 tfm_spm_partition_set_caller_partition_idx(partition_idx,
366 caller_partition_idx);
367 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
368
369 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
370 tfm_spm_partition_set_caller_client_id(partition_idx,
371 caller_partition_id);
372 } else {
373 client_id = tfm_nspm_get_current_client_id();
374 if (client_id >= 0) {
375 return TFM_SECURE_LOCK_FAILED;
376 }
377 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
378 }
379
380 /* In level one, only switch context and return from exception if in
381 * handler mode
382 */
383 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
384 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
385 SPM_ERR_OK) {
386 return TFM_ERROR_GENERIC;
387 }
388 iovec_args = get_iovec_args_stack_address(partition_idx);
389 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
390
391 /* Prepare the partition context, update stack ptr */
392 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
393 iovec_args,
394 (uint32_t *)partition_psp);
395 __set_PSP(psp);
396 tfm_arch_set_psplim(partition_psplim);
397 }
398
399 tfm_spm_partition_set_state(caller_partition_idx,
400 SPM_PARTITION_STATE_BLOCKED);
401 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
402 tfm_secure_lock++;
403
404 return TFM_SUCCESS;
405}
406
407static enum tfm_status_e tfm_start_partition_for_irq_handling(
408 uint32_t excReturn,
409 struct tfm_state_context_t *svc_ctx)
410{
411 uint32_t handler_partition_id = svc_ctx->r0;
412 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
413 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100414 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800415 enum tfm_status_e res;
416 uint32_t psp = __get_PSP();
417 uint32_t handler_partition_psp;
418 uint32_t handler_partition_state;
419 uint32_t interrupted_partition_idx =
420 tfm_spm_partition_get_running_partition_idx();
421 const struct spm_partition_runtime_data_t *handler_part_data;
422 uint32_t handler_partition_idx;
423
424 handler_partition_idx = get_partition_idx(handler_partition_id);
425 handler_part_data = tfm_spm_partition_get_runtime_data(
426 handler_partition_idx);
427 handler_partition_state = handler_part_data->partition_state;
428
429 res = check_irq_partition_state(handler_partition_state);
430 if (res != TFM_SUCCESS) {
431 return res;
432 }
433
434 /* set mask for the partition */
435 tfm_spm_partition_set_signal_mask(
436 handler_partition_idx,
437 handler_part_data->signal_mask | irq_signal);
438
439 tfm_spm_hal_disable_irq(irq_line);
440
441 /* save the current context of the interrupted partition */
442 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
443
444 handler_partition_psp = psp;
445
446 /* save the current context of the handler partition */
447 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
448
449 /* Store caller for the partition */
450 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
451 interrupted_partition_idx);
452
453 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
454 (int32_t *)handler_partition_psp);
455 __set_PSP(psp);
456
457 tfm_spm_partition_set_state(interrupted_partition_idx,
458 SPM_PARTITION_STATE_SUSPENDED);
459 tfm_spm_partition_set_state(handler_partition_idx,
460 SPM_PARTITION_STATE_HANDLING_IRQ);
461
462 return TFM_SUCCESS;
463}
464
465static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
466{
467 uint32_t current_partition_idx =
468 tfm_spm_partition_get_running_partition_idx();
469 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800470 uint32_t return_partition_idx;
471 uint32_t return_partition_flags;
472 uint32_t psp = __get_PSP();
473 size_t i;
474 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
475 struct iovec_args_t *iovec_args;
476
477 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
478 return TFM_SECURE_UNLOCK_FAILED;
479 }
480
481 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
482 return_partition_idx = curr_part_data->caller_partition_idx;
483
484 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
485 return TFM_SECURE_UNLOCK_FAILED;
486 }
487
488 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
489
490 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800491
492 tfm_secure_lock--;
493
494 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
495 (tfm_secure_api_initializing)) {
496 /* In TFM level 1 context restore is only done when
497 * returning to NS or after initialization
498 */
499 /* Restore caller context */
500 restore_caller_ctx(svc_ctx,
501 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
502 *excReturn = ret_part_data->lr;
503 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100504 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800505 uint32_t psp_stack_bottom =
506 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
507 tfm_arch_set_psplim(psp_stack_bottom);
508
TTornblom99f0be22019-12-17 16:22:38 +0100509 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800510
511 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
512 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
513 }
514 tfm_clear_iovec_parameters(iovec_args);
515 }
516
517 tfm_spm_partition_cleanup_context(current_partition_idx);
518
519 tfm_spm_partition_set_state(current_partition_idx,
520 SPM_PARTITION_STATE_IDLE);
521 tfm_spm_partition_set_state(return_partition_idx,
522 SPM_PARTITION_STATE_RUNNING);
523
524 return TFM_SUCCESS;
525}
526
527static enum tfm_status_e tfm_return_from_partition_irq_handling(
528 uint32_t *excReturn)
529{
530 uint32_t handler_partition_idx =
531 tfm_spm_partition_get_running_partition_idx();
532 const struct spm_partition_runtime_data_t *handler_part_data;
533 uint32_t interrupted_partition_idx;
534 uint32_t psp = __get_PSP();
535 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
536
537 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
538 return TFM_SECURE_UNLOCK_FAILED;
539 }
540
541 handler_part_data = tfm_spm_partition_get_runtime_data(
542 handler_partition_idx);
543 interrupted_partition_idx = handler_part_data->caller_partition_idx;
544
545 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
546 return TFM_SECURE_UNLOCK_FAILED;
547 }
548
549 /* For level 1, modify PSP, so that the SVC stack frame disappears,
550 * and return to the privileged handler using the stack frame still on the
551 * MSP stack.
552 */
553 *excReturn = svc_ctx->ra;
554 psp += sizeof(struct tfm_state_context_t);
555
556 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
557 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
558
559 __set_PSP(psp);
560
561 return TFM_SUCCESS;
562}
563
564static enum tfm_status_e tfm_check_sfn_req_integrity(
565 const struct tfm_sfn_req_s *desc_ptr)
566{
567 if ((desc_ptr == NULL) ||
568 (desc_ptr->sp_id == 0) ||
569 (desc_ptr->sfn == NULL)) {
570 /* invalid parameter */
571 return TFM_ERROR_INVALID_PARAMETER;
572 }
573 return TFM_SUCCESS;
574}
575
576static enum tfm_status_e tfm_core_check_sfn_req_rules(
577 const struct tfm_sfn_req_s *desc_ptr)
578{
579 /* Check partition idx validity */
580 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
581 return TFM_ERROR_NO_ACTIVE_PARTITION;
582 }
583
584 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
585 /* Secure domain is already locked!
586 * This should only happen if caller is secure partition!
587 */
588 /* This scenario is a potential security breach.
589 * Error is handled in caller.
590 */
591 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
592 }
593
594 if (tfm_secure_api_initializing) {
595 int32_t id =
596 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
597
598 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
599 /* Invalid request during system initialization */
600 ERROR_MSG("Invalid service request during initialization!");
601 return TFM_ERROR_NOT_INITIALIZED;
602 }
603 }
604
605 return TFM_SUCCESS;
606}
607
608void tfm_spm_secure_api_init_done(void)
609{
610 tfm_secure_api_initializing = 0;
611}
612
613enum tfm_status_e tfm_spm_sfn_request_handler(
614 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
615{
616 enum tfm_status_e res;
617
618 res = tfm_check_sfn_req_integrity(desc_ptr);
619 if (res != TFM_SUCCESS) {
620 ERROR_MSG("Invalid service request!");
621 tfm_secure_api_error_handler();
622 }
623
624 __disable_irq();
625
626 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
627
628 res = tfm_core_check_sfn_parameters(desc_ptr);
629 if (res != TFM_SUCCESS) {
630 /* The sanity check of iovecs failed. */
631 __enable_irq();
632 tfm_secure_api_error_handler();
633 }
634
635 res = tfm_core_check_sfn_req_rules(desc_ptr);
636 if (res != TFM_SUCCESS) {
637 /* FixMe: error compartmentalization TBD */
638 tfm_spm_partition_set_state(
639 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
640 __enable_irq();
641 ERROR_MSG("Unauthorized service request!");
642 tfm_secure_api_error_handler();
643 }
644
645 res = tfm_start_partition(desc_ptr, excReturn);
646 if (res != TFM_SUCCESS) {
647 /* FixMe: consider possible fault scenarios */
648 __enable_irq();
649 ERROR_MSG("Failed to process service request!");
650 tfm_secure_api_error_handler();
651 }
652
653 __enable_irq();
654
655 return res;
656}
657
658int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
659{
660 enum tfm_status_e res;
661 int32_t *args;
662 int32_t retVal;
663
664 res = tfm_core_check_sfn_parameters(desc_ptr);
665 if (res != TFM_SUCCESS) {
666 /* The sanity check of iovecs failed. */
667 return (int32_t)res;
668 }
669
670 /* No excReturn value is needed as no exception handling is used */
671 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
672
673 if (res != TFM_SUCCESS) {
674 tfm_secure_api_error_handler();
675 }
676
677 /* Secure partition to secure partition call in TFM level 1 */
678 args = desc_ptr->args;
679 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
680
681 /* return handler should restore original exc_return value... */
682 res = tfm_return_from_partition(NULL);
683 if (res == TFM_SUCCESS) {
684 /* If unlock successful, pass SS return value to caller */
685 return retVal;
686 } else {
687 /* Unlock errors indicate ctx database corruption or unknown
688 * anomalies. Halt execution
689 */
690 ERROR_MSG("Secure API error during unlock!");
691 tfm_secure_api_error_handler();
692 }
693 return (int32_t)res;
694}
695
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800696int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
697 void *start_addr,
698 size_t len,
699 uint32_t alignment)
700{
701 uintptr_t start_addr_value = (uintptr_t)start_addr;
702 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
703 uintptr_t alignment_mask;
704
705 alignment_mask = (((uintptr_t)1) << alignment) - 1;
706
707 /* Check that the pointer is aligned properly */
708 if (start_addr_value & alignment_mask) {
709 /* not aligned, return error */
710 return 0;
711 }
712
713 /* Protect against overflow (and zero len) */
714 if (end_addr_value <= start_addr_value) {
715 return 0;
716 }
717
718 /* For privileged partition execution, all secure data memory and stack
719 * is accessible
720 */
721 if (start_addr_value >= S_DATA_START &&
722 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
723 return 1;
724 }
725
726 return 0;
727}
728
729void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
730{
731 uintptr_t result_ptr_value = svc_args[0];
732 uint32_t running_partition_idx =
733 tfm_spm_partition_get_running_partition_idx();
734 const uint32_t running_partition_flags =
735 tfm_spm_partition_get_flags(running_partition_idx);
736 const struct spm_partition_runtime_data_t *curr_part_data =
737 tfm_spm_partition_get_runtime_data(running_partition_idx);
738 int res = 0;
739
740 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
741 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
742 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
743 /* This handler shouldn't be called from outside partition context.
744 * Also if the current partition is handling IRQ, the caller partition
745 * index might not be valid;
746 * Partitions are only allowed to run while S domain is locked.
747 */
748 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
749 return;
750 }
751
752 /* Make sure that the output pointer points to a memory area that is owned
753 * by the partition
754 */
755 res = tfm_spm_check_buffer_access(running_partition_idx,
756 (void *)result_ptr_value,
757 sizeof(curr_part_data->caller_client_id),
758 2);
759 if (!res) {
760 /* Not in accessible range, return error */
761 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
762 return;
763 }
764
765 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
766
767 /* Store return value in r0 */
768 svc_args[0] = (uint32_t)TFM_SUCCESS;
769}
770
771/* This SVC handler is called if veneer is running in thread mode */
772uint32_t tfm_spm_partition_request_svc_handler(
773 const uint32_t *svc_ctx, uint32_t excReturn)
774{
775 struct tfm_sfn_req_s *desc_ptr;
776
777 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
778 /* Service request SVC called with MSP active.
779 * Either invalid configuration for Thread mode or SVC called
780 * from Handler mode, which is not supported.
781 * FixMe: error severity TBD
782 */
783 ERROR_MSG("Service request SVC called with MSP active!");
784 tfm_secure_api_error_handler();
785 }
786
787 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
788
789 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
790 tfm_secure_api_error_handler();
791 }
792
793 return EXC_RETURN_SECURE_FUNCTION;
794}
795
796/* This SVC handler is called, if a thread mode execution environment is to
797 * be set up, to run an unprivileged IRQ handler
798 */
799uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
800{
801 struct tfm_state_context_t *svc_ctx =
802 (struct tfm_state_context_t *)svc_args;
803
804 enum tfm_status_e res;
805
806 if (excReturn & EXC_RETURN_STACK_PROCESS) {
807 /* FixMe: error severity TBD */
808 ERROR_MSG("Partition request SVC called with PSP active!");
809 tfm_secure_api_error_handler();
810 }
811
812 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
813 if (res != TFM_SUCCESS) {
814 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
815 * its code can be run
816 */
817 /* FixMe: For now this case is handled with TF-M panic, however it would
818 * be possible to skip the execution of the interrupt handler, and
819 * resume the execution of the interrupted code.
820 */
821 tfm_secure_api_error_handler();
822 }
823 return EXC_RETURN_SECURE_FUNCTION;
824}
825
826/* This SVC handler is called when sfn returns */
827uint32_t tfm_spm_partition_return_handler(uint32_t lr)
828{
829 enum tfm_status_e res;
830
831 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
832 /* Partition return SVC called with MSP active.
833 * This should not happen!
834 */
835 ERROR_MSG("Partition return SVC called with MSP active!");
836 tfm_secure_api_error_handler();
837 }
838
839 res = tfm_return_from_partition(&lr);
840 if (res != TFM_SUCCESS) {
841 /* Unlock errors indicate ctx database corruption or unknown anomalies
842 * Halt execution
843 */
844 ERROR_MSG("Secure API error during unlock!");
845 tfm_secure_api_error_handler();
846 }
847
848 return lr;
849}
850
851/* This SVC handler is called if a deprivileged IRQ handler was executed, and
852 * the execution environment is to be set back for the privileged handler mode
853 */
854uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
855{
856 enum tfm_status_e res;
857 struct tfm_state_context_t *irq_svc_ctx =
858 (struct tfm_state_context_t *)irq_svc_args;
859
860 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
861 /* Partition request SVC called with MSP active.
862 * FixMe: error severity TBD
863 */
864 ERROR_MSG("Partition request SVC called with MSP active!");
865 tfm_secure_api_error_handler();
866 }
867
868 res = tfm_return_from_partition_irq_handling(&lr);
869 if (res != TFM_SUCCESS) {
870 /* Unlock errors indicate ctx database corruption or unknown anomalies
871 * Halt execution
872 */
873 ERROR_MSG("Secure API error during unlock!");
874 tfm_secure_api_error_handler();
875 }
876
877 irq_svc_ctx->ra = lr;
878
879 return EXC_RETURN_SECURE_HANDLER;
880}
881
882/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
883/**
884 * \brief Return the IRQ line number associated with a signal
885 *
886 * \param[in] partition_id The ID of the partition in which we look for the
887 * signal
888 * \param[in] signal The signal we do the query for
889 *
890 * \retval >=0 The IRQ line number associated with a signal in the partition
891 * \retval <0 error
892 */
TTornblomfaf74f52020-03-04 17:56:27 +0100893static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800894 psa_signal_t signal)
895{
896 size_t i;
897
898 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
899 if (tfm_core_irq_signals[i].partition_id == partition_id &&
900 tfm_core_irq_signals[i].signal_value == signal) {
901 return tfm_core_irq_signals[i].irq_line;
902 }
903 }
TTornblomfaf74f52020-03-04 17:56:27 +0100904 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800905}
906
907void tfm_spm_enable_irq_handler(uint32_t *svc_args)
908{
909 struct tfm_state_context_t *svc_ctx =
910 (struct tfm_state_context_t *)svc_args;
911 psa_signal_t irq_signal = svc_ctx->r0;
912 uint32_t running_partition_idx =
913 tfm_spm_partition_get_running_partition_idx();
914 uint32_t running_partition_id =
915 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100916 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800917
918 /* Only a single signal is allowed */
919 if (!tfm_is_one_bit_set(irq_signal)) {
920 /* FixMe: error severity TBD */
921 tfm_secure_api_error_handler();
922 }
923
924 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
925
926 if (irq_line < 0) {
927 /* FixMe: error severity TBD */
928 tfm_secure_api_error_handler();
929 }
930
931 tfm_spm_hal_enable_irq(irq_line);
932}
933
934void tfm_spm_disable_irq_handler(uint32_t *svc_args)
935{
936 struct tfm_state_context_t *svc_ctx =
937 (struct tfm_state_context_t *)svc_args;
938 psa_signal_t irq_signal = svc_ctx->r0;
939 uint32_t running_partition_idx =
940 tfm_spm_partition_get_running_partition_idx();
941 uint32_t running_partition_id =
942 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100943 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800944
945 /* Only a single signal is allowed */
946 if (!tfm_is_one_bit_set(irq_signal)) {
947 /* FixMe: error severity TBD */
948 tfm_secure_api_error_handler();
949 }
950
951 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
952
953 if (irq_line < 0) {
954 /* FixMe: error severity TBD */
955 tfm_secure_api_error_handler();
956 }
957
958 tfm_spm_hal_disable_irq(irq_line);
959}
960
961void tfm_spm_psa_wait(uint32_t *svc_args)
962{
963 /* Look for partition that is ready for run */
964 struct tfm_state_context_t *svc_ctx =
965 (struct tfm_state_context_t *)svc_args;
966 uint32_t running_partition_idx;
967 const struct spm_partition_runtime_data_t *curr_part_data;
968
969 psa_signal_t signal_mask = svc_ctx->r0;
970 uint32_t timeout = svc_ctx->r1;
971
972 /*
973 * Timeout[30:0] are reserved for future use.
974 * SPM must ignore the value of RES.
975 */
976 timeout &= PSA_TIMEOUT_MASK;
977
978 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
979 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
980
981 if (timeout == PSA_BLOCK) {
982 /* FIXME: Scheduling is not available in library model, and busy wait is
983 * also not possible as this code is running in SVC context, and it
984 * cannot be pre-empted by interrupts. So do nothing here for now
985 */
986 (void) signal_mask;
987 }
988
989 svc_ctx->r0 = curr_part_data->signal_mask;
990}
991
992void tfm_spm_psa_eoi(uint32_t *svc_args)
993{
994 struct tfm_state_context_t *svc_ctx =
995 (struct tfm_state_context_t *)svc_args;
996 psa_signal_t irq_signal = svc_ctx->r0;
997 uint32_t signal_mask;
998 uint32_t running_partition_idx;
999 uint32_t running_partition_id;
1000 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001001 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001002
1003 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1004 running_partition_id =
1005 tfm_spm_partition_get_partition_id(running_partition_idx);
1006 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1007
1008 /* Only a single signal is allowed */
1009 if (!tfm_is_one_bit_set(irq_signal)) {
1010 tfm_secure_api_error_handler();
1011 }
1012
1013 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1014
1015 if (irq_line < 0) {
1016 /* FixMe: error severity TBD */
1017 tfm_secure_api_error_handler();
1018 }
1019
1020 tfm_spm_hal_clear_pending_irq(irq_line);
1021 tfm_spm_hal_enable_irq(irq_line);
1022
1023 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1024 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1025}
Mingyang Sunda01a972019-07-12 17:32:59 +08001026
1027/*
1028 * This function is called when a secure partition causes an error.
1029 * In case of an error in the error handling, a non-zero value have to be
1030 * returned.
1031 */
1032static void tfm_spm_partition_err_handler(
1033 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001034 int32_t err_code)
1035{
Mingyang Sunda01a972019-07-12 17:32:59 +08001036 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001037
Summer Qin423dbef2019-08-22 15:59:35 +08001038 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001039 SPM_PARTITION_STATE_CLOSED);
1040}
1041
1042enum spm_err_t tfm_spm_partition_init(void)
1043{
1044 struct spm_partition_desc_t *part;
1045 struct tfm_sfn_req_s desc;
1046 int32_t args[4] = {0};
1047 int32_t fail_cnt = 0;
1048 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001049 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001050
1051 /* Call the init function for each partition */
1052 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1053 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001054 platform_data_p = part->platform_data_list;
1055 if (platform_data_p != NULL) {
1056 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001057 if (tfm_spm_hal_configure_default_isolation(idx,
1058 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1059 fail_cnt++;
1060 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001061 ++platform_data_p;
1062 }
1063 }
Summer Qin423dbef2019-08-22 15:59:35 +08001064 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001065 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1066 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001067 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001068 } else {
1069 int32_t res;
1070
1071 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001072 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001073 desc.sfn = (sfn_t)part->static_data->partition_init;
1074 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001075 res = tfm_core_sfn_request(&desc);
1076 if (res == TFM_SUCCESS) {
1077 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1078 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001079 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001080 fail_cnt++;
1081 }
1082 }
1083 }
1084
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001085 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001086
1087 if (fail_cnt == 0) {
1088 return SPM_ERR_OK;
1089 } else {
1090 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1091 }
1092}
1093
1094void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1095{
1096 struct spm_partition_runtime_data_t *runtime_data =
1097 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1098 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001099 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001100
1101 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001102
1103 runtime_data->ctx_stack_ptr +=
1104 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001105}
1106
1107void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1108{
1109 struct spm_partition_runtime_data_t *runtime_data =
1110 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1111 struct interrupted_ctx_stack_frame_t *stack_frame;
1112
Matt463ed582019-12-20 12:31:25 +08001113 runtime_data->ctx_stack_ptr -=
1114 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1115
Mingyang Sunda01a972019-07-12 17:32:59 +08001116 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1117 runtime_data->ctx_stack_ptr;
1118 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1119 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001120}
1121
1122void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1123{
1124 struct spm_partition_runtime_data_t *runtime_data =
1125 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1126 struct handler_ctx_stack_frame_t *stack_frame =
1127 (struct handler_ctx_stack_frame_t *)
1128 runtime_data->ctx_stack_ptr;
1129
1130 stack_frame->partition_state = runtime_data->partition_state;
1131 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1132
1133 runtime_data->ctx_stack_ptr +=
1134 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1135}
1136
1137void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1138{
1139 struct spm_partition_runtime_data_t *runtime_data =
1140 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1141 struct handler_ctx_stack_frame_t *stack_frame;
1142
1143 runtime_data->ctx_stack_ptr -=
1144 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1145
1146 stack_frame = (struct handler_ctx_stack_frame_t *)
1147 runtime_data->ctx_stack_ptr;
1148
1149 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1150 stack_frame->partition_state = 0;
1151 tfm_spm_partition_set_caller_partition_idx(
1152 partition_idx, stack_frame->caller_partition_idx);
1153 stack_frame->caller_partition_idx = 0;
1154}
1155
Mingyang Sunda01a972019-07-12 17:32:59 +08001156void tfm_spm_partition_store_context(uint32_t partition_idx,
1157 uint32_t stack_ptr, uint32_t lr)
1158{
1159 g_spm_partition_db.partitions[partition_idx].
1160 runtime_data.stack_ptr = stack_ptr;
1161 g_spm_partition_db.partitions[partition_idx].
1162 runtime_data.lr = lr;
1163}
1164
1165const struct spm_partition_runtime_data_t *
1166 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1167{
1168 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1169}
1170
1171void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1172{
1173 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1174 state;
1175 if (state == SPM_PARTITION_STATE_RUNNING ||
1176 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1177 g_spm_partition_db.running_partition_idx = partition_idx;
1178 }
1179}
1180
1181void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1182 uint32_t caller_partition_idx)
1183{
1184 g_spm_partition_db.partitions[partition_idx].runtime_data.
1185 caller_partition_idx = caller_partition_idx;
1186}
1187
1188void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1189 uint32_t signal_mask)
1190{
1191 g_spm_partition_db.partitions[partition_idx].runtime_data.
1192 signal_mask = signal_mask;
1193}
1194
1195void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1196 int32_t caller_client_id)
1197{
1198 g_spm_partition_db.partitions[partition_idx].runtime_data.
1199 caller_client_id = caller_client_id;
1200}
1201
Mingyang Sunda01a972019-07-12 17:32:59 +08001202enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1203 const int32_t *args)
1204{
1205 struct spm_partition_runtime_data_t *runtime_data =
1206 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1207 size_t i;
1208
1209 if ((args[1] < 0) || (args[3] < 0)) {
1210 return SPM_ERR_INVALID_PARAMETER;
1211 }
1212
1213 runtime_data->iovec_args.in_len = (size_t)args[1];
1214 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1215 runtime_data->iovec_args.in_vec[i].base =
1216 ((psa_invec *)args[0])[i].base;
1217 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1218 }
1219 runtime_data->iovec_args.out_len = (size_t)args[3];
1220 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1221 runtime_data->iovec_args.out_vec[i].base =
1222 ((psa_outvec *)args[2])[i].base;
1223 runtime_data->iovec_args.out_vec[i].len =
1224 ((psa_outvec *)args[2])[i].len;
1225 }
1226 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001227
1228 return SPM_ERR_OK;
1229}
1230
1231uint32_t tfm_spm_partition_get_running_partition_idx(void)
1232{
1233 return g_spm_partition_db.running_partition_idx;
1234}
1235
1236void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1237{
1238 struct spm_partition_desc_t *partition =
1239 &(g_spm_partition_db.partitions[partition_idx]);
1240 int32_t i;
1241
1242 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001243 partition->runtime_data.iovec_args.in_len = 0;
1244 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1245 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1246 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1247 }
1248 partition->runtime_data.iovec_args.out_len = 0;
1249 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1250 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1251 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1252 }
1253 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001254}
Summer Qin830c5542020-02-14 13:44:20 +08001255
1256void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1257{
1258 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1259 uint32_t running_partition_flags = 0;
1260 uint32_t running_partition_idx;
1261
1262 /* Check permissions on request type basis */
1263
1264 switch (svc_ctx->r0) {
1265 case TFM_SPM_REQUEST_RESET_VOTE:
1266 running_partition_idx =
1267 tfm_spm_partition_get_running_partition_idx();
1268 running_partition_flags = tfm_spm_partition_get_flags(
1269 running_partition_idx);
1270
1271 /* Currently only PSA Root of Trust services are allowed to make Reset
1272 * vote request
1273 */
1274 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1275 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1276 }
1277
1278 /* FixMe: this is a placeholder for checks to be performed before
1279 * allowing execution of reset
1280 */
1281 *res_ptr = (uint32_t)TFM_SUCCESS;
1282
1283 break;
1284 default:
1285 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1286 }
1287}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001288
1289enum spm_err_t tfm_spm_db_init(void)
1290{
1291 uint32_t i;
1292
1293 /* This function initialises partition db */
1294
1295 /* For the non secure Execution environment */
1296 tfm_nspm_configure_clients();
1297
1298 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1299 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1300 SPM_PARTITION_STATE_UNINIT;
1301 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1302 SPM_INVALID_PARTITION_IDX;
1303 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1304 TFM_INVALID_CLIENT_ID;
1305 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1306 ctx_stack_list[i];
1307 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1308 g_spm_partition_db.partitions[i].platform_data_list =
1309 platform_data_list_list[i];
1310 }
1311 g_spm_partition_db.is_init = 1;
1312
1313 return SPM_ERR_OK;
1314}