blob: ca1181470e92e81d195ba984ece2419bb000daa6 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
12#include "secure_utilities.h"
13#include "tfm_api.h"
14#include "tfm_arch.h"
15#include "tfm_irq_list.h"
16#include "psa/service.h"
17#include "tfm_core_mem_check.h"
18#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
20#include "spm_api.h"
21#include "spm_db.h"
22#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
24
25#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
26#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
27
28#ifndef TFM_LVL
29#error TFM_LVL is not defined!
30#endif
31
32REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
33REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
34
35/*
36 * This is the "Big Lock" on the secure side, to guarantee single entry
37 * to SPE
38 */
39extern int32_t tfm_secure_lock;
40static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080041
42extern struct spm_partition_db_t g_spm_partition_db;
43
Mingyang Sunabb1aab2020-02-18 13:49:08 +080044static uint32_t *prepare_partition_iovec_ctx(
45 const struct tfm_state_context_t *svc_ctx,
46 const struct tfm_sfn_req_s *desc_ptr,
47 const struct iovec_args_t *iovec_args,
48 uint32_t *dst)
49{
50 /* XPSR = as was when called, but make sure it's thread mode */
51 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
52 /* ReturnAddress = resume veneer in new context */
53 *(--dst) = svc_ctx->ra;
54 /* LR = sfn address */
55 *(--dst) = (uint32_t)desc_ptr->sfn;
56 /* R12 = don't care */
57 *(--dst) = 0U;
58
59 /* R0-R3 = sfn arguments */
60 *(--dst) = iovec_args->out_len;
61 *(--dst) = (uint32_t)iovec_args->out_vec;
62 *(--dst) = iovec_args->in_len;
63 *(--dst) = (uint32_t)iovec_args->in_vec;
64
65 return dst;
66}
67
68/**
69 * \brief Create a stack frame that sets the execution environment to thread
70 * mode on exception return.
71 *
72 * \param[in] svc_ctx The stacked SVC context
73 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
74 * \param[in] dst A pointer where the context is to be created. (the
75 * pointer is considered to be a stack pointer, and
76 * the frame is created below it)
77 *
78 * \return A pointer pointing at the created stack frame.
79 */
80static int32_t *prepare_partition_irq_ctx(
81 const struct tfm_state_context_t *svc_ctx,
82 sfn_t unpriv_handler,
83 int32_t *dst)
84{
85 int i;
86
87 /* XPSR = as was when called, but make sure it's thread mode */
88 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
89 /* ReturnAddress = resume to the privileged handler code, but execute it
90 * unprivileged.
91 */
92 *(--dst) = svc_ctx->ra;
93 /* LR = start address */
94 *(--dst) = (int32_t)unpriv_handler;
95
96 /* R12, R0-R3 unused arguments */
97 for (i = 0; i < 5; ++i) {
98 *(--dst) = 0;
99 }
100
101 return dst;
102}
103
104static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
105 struct tfm_state_context_t *target_ctx)
106{
107 /* ReturnAddress = resume veneer after second SVC */
108 target_ctx->ra = svc_ctx->ra;
109
110 /* R0 = function return value */
111 target_ctx->r0 = svc_ctx->r0;
112
113 return;
114}
115
116/**
117 * \brief Check whether the iovec parameters are valid, and the memory ranges
118 * are in the possession of the calling partition.
119 *
120 * \param[in] desc_ptr The secure function request descriptor
121 *
122 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
123 * otherwise as in /ref tfm_status_e
124 */
125static enum tfm_status_e tfm_core_check_sfn_parameters(
126 const struct tfm_sfn_req_s *desc_ptr)
127{
128 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
129 size_t in_len;
130 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
131 size_t out_len;
132 uint32_t i;
133
134 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
135 return TFM_ERROR_INVALID_PARAMETER;
136 }
137
138 in_len = (size_t)(desc_ptr->args[1]);
139 out_len = (size_t)(desc_ptr->args[3]);
140
141 /* The number of vectors are within range. Extra checks to avoid overflow */
142 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
143 (in_len + out_len > PSA_MAX_IOVEC)) {
144 return TFM_ERROR_INVALID_PARAMETER;
145 }
146
147 /* Check whether the caller partition has at write access to the iovec
148 * structures themselves. Use the TT instruction for this.
149 */
150 if (in_len > 0) {
151 if ((in_vec == NULL) ||
152 (tfm_core_has_write_access_to_region(in_vec,
153 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
154 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
155 return TFM_ERROR_INVALID_PARAMETER;
156 }
157 } else {
158 if (in_vec != NULL) {
159 return TFM_ERROR_INVALID_PARAMETER;
160 }
161 }
162 if (out_len > 0) {
163 if ((out_vec == NULL) ||
164 (tfm_core_has_write_access_to_region(out_vec,
165 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
166 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
167 return TFM_ERROR_INVALID_PARAMETER;
168 }
169 } else {
170 if (out_vec != NULL) {
171 return TFM_ERROR_INVALID_PARAMETER;
172 }
173 }
174
175 /* Check whether the caller partition has access to the data inside the
176 * iovecs
177 */
178 for (i = 0; i < in_len; ++i) {
179 if (in_vec[i].len > 0) {
180 if ((in_vec[i].base == NULL) ||
181 (tfm_core_has_read_access_to_region(in_vec[i].base,
182 in_vec[i].len, desc_ptr->ns_caller,
183 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
184 return TFM_ERROR_INVALID_PARAMETER;
185 }
186 }
187 }
188 for (i = 0; i < out_len; ++i) {
189 if (out_vec[i].len > 0) {
190 if ((out_vec[i].base == NULL) ||
191 (tfm_core_has_write_access_to_region(out_vec[i].base,
192 out_vec[i].len, desc_ptr->ns_caller,
193 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
194 return TFM_ERROR_INVALID_PARAMETER;
195 }
196 }
197 }
198
199 return TFM_SUCCESS;
200}
201
202static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
203 const struct iovec_args_t *source)
204{
205 size_t i;
206
207 /* The vectors have been sanity checked already, and since then the
208 * interrupts have been kept disabled. So we can be sure that the
209 * vectors haven't been tampered with since the check. So it is safe to pass
210 * it to the called partition.
211 */
212
213 target->in_len = source->in_len;
214 for (i = 0; i < source->in_len; ++i) {
215 target->in_vec[i].base = source->in_vec[i].base;
216 target->in_vec[i].len = source->in_vec[i].len;
217 }
218 target->out_len = source->out_len;
219 for (i = 0; i < source->out_len; ++i) {
220 target->out_vec[i].base = source->out_vec[i].base;
221 target->out_vec[i].len = source->out_vec[i].len;
222 }
223}
224
225static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
226{
227 int i;
228
229 args->in_len = 0;
230 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
231 args->in_vec[i].base = NULL;
232 args->in_vec[i].len = 0;
233 }
234 args->out_len = 0;
235 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
236 args->out_vec[i].base = NULL;
237 args->out_vec[i].len = 0;
238 }
239}
240
241/**
242 * \brief Check whether the partitions for the secure function call are in a
243 * proper state.
244 *
245 * \param[in] curr_partition_state State of the partition to be called
246 * \param[in] caller_partition_state State of the caller partition
247 *
248 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
249 */
250static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
251 uint32_t caller_partition_state)
252{
253 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
254 /* Calling partition from non-running state (e.g. during handling IRQ)
255 * is not allowed.
256 */
257 return TFM_ERROR_INVALID_EXC_MODE;
258 }
259
260 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
261 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
262 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
263 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
264 /* Active partitions cannot be called! */
265 return TFM_ERROR_PARTITION_NON_REENTRANT;
266 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
267 /* The partition to be called is not in a proper state */
268 return TFM_SECURE_LOCK_FAILED;
269 }
270 return TFM_SUCCESS;
271}
272
273/**
274 * \brief Check whether the partitions for the secure function call of irq are
275 * in a proper state.
276 *
277 * \param[in] called_partition_state State of the partition to be called
278 *
279 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
280 */
281static enum tfm_status_e check_irq_partition_state(
282 uint32_t called_partition_state)
283{
284 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
285 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
286 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
287 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
288 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
289 return TFM_SUCCESS;
290 }
291 return TFM_SECURE_LOCK_FAILED;
292}
293
294/**
295 * \brief Calculate the address where the iovec parameters are to be saved for
296 * the called partition.
297 *
298 * \param[in] partition_idx The index of the partition to be called.
299 *
300 * \return The address where the iovec parameters should be saved.
301 */
302static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
303{
304 /* Save the iovecs on the common stack. */
305 return (struct iovec_args_t *)((uint8_t *)&REGION_NAME(Image$$,
306 TFM_SECURE_STACK, $$ZI$$Limit) -
307 sizeof(struct iovec_args_t));
308}
309
310static enum tfm_status_e tfm_start_partition(
311 const struct tfm_sfn_req_s *desc_ptr,
312 uint32_t excReturn)
313{
314 enum tfm_status_e res;
315 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
316 const struct spm_partition_runtime_data_t *curr_part_data;
317 const struct spm_partition_runtime_data_t *caller_part_data;
318 uint32_t caller_flags;
319 register uint32_t partition_idx;
320 uint32_t psp;
321 uint32_t partition_psp, partition_psplim;
322 uint32_t partition_state;
323 uint32_t caller_partition_state;
324 uint32_t partition_flags;
325 struct tfm_state_context_t *svc_ctx;
326 uint32_t caller_partition_id;
327 int32_t client_id;
328 struct iovec_args_t *iovec_args;
329
330 psp = __get_PSP();
331 svc_ctx = (struct tfm_state_context_t *)psp;
332 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
333
334 /* Check partition state consistency */
335 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
336 != (!desc_ptr->ns_caller)) {
337 /* Partition state inconsistency detected */
338 return TFM_SECURE_LOCK_FAILED;
339 }
340
341 partition_idx = get_partition_idx(desc_ptr->sp_id);
342
343 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
344 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
345 partition_state = curr_part_data->partition_state;
346 caller_partition_state = caller_part_data->partition_state;
347 partition_flags = tfm_spm_partition_get_flags(partition_idx);
348 caller_partition_id = tfm_spm_partition_get_partition_id(
349 caller_partition_idx);
350
351 if (!tfm_secure_api_initializing) {
352 res = check_partition_state(partition_state, caller_partition_state);
353 if (res != TFM_SUCCESS) {
354 return res;
355 }
356 }
357
358 /* Prepare switch to shared secure partition stack */
359 /* In case the call is coming from the non-secure world, we save the iovecs
360 * on the stop of the stack. So the memory area, that can actually be used
361 * as stack by the partitions starts at a lower address
362 */
363 partition_psp =
364 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)-
365 sizeof(struct iovec_args_t);
366 partition_psplim =
367 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
368
369 /* Store the context for the partition call */
370 tfm_spm_partition_set_caller_partition_idx(partition_idx,
371 caller_partition_idx);
372 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
373
374 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
375 tfm_spm_partition_set_caller_client_id(partition_idx,
376 caller_partition_id);
377 } else {
378 client_id = tfm_nspm_get_current_client_id();
379 if (client_id >= 0) {
380 return TFM_SECURE_LOCK_FAILED;
381 }
382 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
383 }
384
385 /* In level one, only switch context and return from exception if in
386 * handler mode
387 */
388 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
389 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
390 SPM_ERR_OK) {
391 return TFM_ERROR_GENERIC;
392 }
393 iovec_args = get_iovec_args_stack_address(partition_idx);
394 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
395
396 /* Prepare the partition context, update stack ptr */
397 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
398 iovec_args,
399 (uint32_t *)partition_psp);
400 __set_PSP(psp);
401 tfm_arch_set_psplim(partition_psplim);
402 }
403
404 tfm_spm_partition_set_state(caller_partition_idx,
405 SPM_PARTITION_STATE_BLOCKED);
406 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
407 tfm_secure_lock++;
408
409 return TFM_SUCCESS;
410}
411
412static enum tfm_status_e tfm_start_partition_for_irq_handling(
413 uint32_t excReturn,
414 struct tfm_state_context_t *svc_ctx)
415{
416 uint32_t handler_partition_id = svc_ctx->r0;
417 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
418 uint32_t irq_signal = svc_ctx->r2;
419 uint32_t irq_line = svc_ctx->r3;
420 enum tfm_status_e res;
421 uint32_t psp = __get_PSP();
422 uint32_t handler_partition_psp;
423 uint32_t handler_partition_state;
424 uint32_t interrupted_partition_idx =
425 tfm_spm_partition_get_running_partition_idx();
426 const struct spm_partition_runtime_data_t *handler_part_data;
427 uint32_t handler_partition_idx;
428
429 handler_partition_idx = get_partition_idx(handler_partition_id);
430 handler_part_data = tfm_spm_partition_get_runtime_data(
431 handler_partition_idx);
432 handler_partition_state = handler_part_data->partition_state;
433
434 res = check_irq_partition_state(handler_partition_state);
435 if (res != TFM_SUCCESS) {
436 return res;
437 }
438
439 /* set mask for the partition */
440 tfm_spm_partition_set_signal_mask(
441 handler_partition_idx,
442 handler_part_data->signal_mask | irq_signal);
443
444 tfm_spm_hal_disable_irq(irq_line);
445
446 /* save the current context of the interrupted partition */
447 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
448
449 handler_partition_psp = psp;
450
451 /* save the current context of the handler partition */
452 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
453
454 /* Store caller for the partition */
455 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
456 interrupted_partition_idx);
457
458 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
459 (int32_t *)handler_partition_psp);
460 __set_PSP(psp);
461
462 tfm_spm_partition_set_state(interrupted_partition_idx,
463 SPM_PARTITION_STATE_SUSPENDED);
464 tfm_spm_partition_set_state(handler_partition_idx,
465 SPM_PARTITION_STATE_HANDLING_IRQ);
466
467 return TFM_SUCCESS;
468}
469
470static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
471{
472 uint32_t current_partition_idx =
473 tfm_spm_partition_get_running_partition_idx();
474 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
475 uint32_t current_partition_flags;
476 uint32_t return_partition_idx;
477 uint32_t return_partition_flags;
478 uint32_t psp = __get_PSP();
479 size_t i;
480 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
481 struct iovec_args_t *iovec_args;
482
483 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
484 return TFM_SECURE_UNLOCK_FAILED;
485 }
486
487 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
488 return_partition_idx = curr_part_data->caller_partition_idx;
489
490 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
491 return TFM_SECURE_UNLOCK_FAILED;
492 }
493
494 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
495
496 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
497 current_partition_flags = tfm_spm_partition_get_flags(
498 current_partition_idx);
499
500 tfm_secure_lock--;
501
502 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
503 (tfm_secure_api_initializing)) {
504 /* In TFM level 1 context restore is only done when
505 * returning to NS or after initialization
506 */
507 /* Restore caller context */
508 restore_caller_ctx(svc_ctx,
509 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
510 *excReturn = ret_part_data->lr;
511 __set_PSP(ret_part_data->stack_ptr);
512 REGION_DECLARE(Image$$, ARM_LIB_STACK, $$ZI$$Base)[];
513 uint32_t psp_stack_bottom =
514 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
515 tfm_arch_set_psplim(psp_stack_bottom);
516
517 iovec_args = (struct iovec_args_t *)
518 ((uint8_t *)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit) -
519 sizeof(struct iovec_args_t));
520
521 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
522 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
523 }
524 tfm_clear_iovec_parameters(iovec_args);
525 }
526
527 tfm_spm_partition_cleanup_context(current_partition_idx);
528
529 tfm_spm_partition_set_state(current_partition_idx,
530 SPM_PARTITION_STATE_IDLE);
531 tfm_spm_partition_set_state(return_partition_idx,
532 SPM_PARTITION_STATE_RUNNING);
533
534 return TFM_SUCCESS;
535}
536
537static enum tfm_status_e tfm_return_from_partition_irq_handling(
538 uint32_t *excReturn)
539{
540 uint32_t handler_partition_idx =
541 tfm_spm_partition_get_running_partition_idx();
542 const struct spm_partition_runtime_data_t *handler_part_data;
543 uint32_t interrupted_partition_idx;
544 uint32_t psp = __get_PSP();
545 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
546
547 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
548 return TFM_SECURE_UNLOCK_FAILED;
549 }
550
551 handler_part_data = tfm_spm_partition_get_runtime_data(
552 handler_partition_idx);
553 interrupted_partition_idx = handler_part_data->caller_partition_idx;
554
555 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
556 return TFM_SECURE_UNLOCK_FAILED;
557 }
558
559 /* For level 1, modify PSP, so that the SVC stack frame disappears,
560 * and return to the privileged handler using the stack frame still on the
561 * MSP stack.
562 */
563 *excReturn = svc_ctx->ra;
564 psp += sizeof(struct tfm_state_context_t);
565
566 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
567 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
568
569 __set_PSP(psp);
570
571 return TFM_SUCCESS;
572}
573
574static enum tfm_status_e tfm_check_sfn_req_integrity(
575 const struct tfm_sfn_req_s *desc_ptr)
576{
577 if ((desc_ptr == NULL) ||
578 (desc_ptr->sp_id == 0) ||
579 (desc_ptr->sfn == NULL)) {
580 /* invalid parameter */
581 return TFM_ERROR_INVALID_PARAMETER;
582 }
583 return TFM_SUCCESS;
584}
585
586static enum tfm_status_e tfm_core_check_sfn_req_rules(
587 const struct tfm_sfn_req_s *desc_ptr)
588{
589 /* Check partition idx validity */
590 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
591 return TFM_ERROR_NO_ACTIVE_PARTITION;
592 }
593
594 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
595 /* Secure domain is already locked!
596 * This should only happen if caller is secure partition!
597 */
598 /* This scenario is a potential security breach.
599 * Error is handled in caller.
600 */
601 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
602 }
603
604 if (tfm_secure_api_initializing) {
605 int32_t id =
606 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
607
608 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
609 /* Invalid request during system initialization */
610 ERROR_MSG("Invalid service request during initialization!");
611 return TFM_ERROR_NOT_INITIALIZED;
612 }
613 }
614
615 return TFM_SUCCESS;
616}
617
618void tfm_spm_secure_api_init_done(void)
619{
620 tfm_secure_api_initializing = 0;
621}
622
623enum tfm_status_e tfm_spm_sfn_request_handler(
624 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
625{
626 enum tfm_status_e res;
627
628 res = tfm_check_sfn_req_integrity(desc_ptr);
629 if (res != TFM_SUCCESS) {
630 ERROR_MSG("Invalid service request!");
631 tfm_secure_api_error_handler();
632 }
633
634 __disable_irq();
635
636 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
637
638 res = tfm_core_check_sfn_parameters(desc_ptr);
639 if (res != TFM_SUCCESS) {
640 /* The sanity check of iovecs failed. */
641 __enable_irq();
642 tfm_secure_api_error_handler();
643 }
644
645 res = tfm_core_check_sfn_req_rules(desc_ptr);
646 if (res != TFM_SUCCESS) {
647 /* FixMe: error compartmentalization TBD */
648 tfm_spm_partition_set_state(
649 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
650 __enable_irq();
651 ERROR_MSG("Unauthorized service request!");
652 tfm_secure_api_error_handler();
653 }
654
655 res = tfm_start_partition(desc_ptr, excReturn);
656 if (res != TFM_SUCCESS) {
657 /* FixMe: consider possible fault scenarios */
658 __enable_irq();
659 ERROR_MSG("Failed to process service request!");
660 tfm_secure_api_error_handler();
661 }
662
663 __enable_irq();
664
665 return res;
666}
667
668int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
669{
670 enum tfm_status_e res;
671 int32_t *args;
672 int32_t retVal;
673
674 res = tfm_core_check_sfn_parameters(desc_ptr);
675 if (res != TFM_SUCCESS) {
676 /* The sanity check of iovecs failed. */
677 return (int32_t)res;
678 }
679
680 /* No excReturn value is needed as no exception handling is used */
681 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
682
683 if (res != TFM_SUCCESS) {
684 tfm_secure_api_error_handler();
685 }
686
687 /* Secure partition to secure partition call in TFM level 1 */
688 args = desc_ptr->args;
689 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
690
691 /* return handler should restore original exc_return value... */
692 res = tfm_return_from_partition(NULL);
693 if (res == TFM_SUCCESS) {
694 /* If unlock successful, pass SS return value to caller */
695 return retVal;
696 } else {
697 /* Unlock errors indicate ctx database corruption or unknown
698 * anomalies. Halt execution
699 */
700 ERROR_MSG("Secure API error during unlock!");
701 tfm_secure_api_error_handler();
702 }
703 return (int32_t)res;
704}
705
706void tfm_spm_validate_secure_caller_handler(uint32_t *svc_args)
707{
708
709 enum tfm_status_e res = TFM_ERROR_GENERIC;
710 uint32_t running_partition_idx =
711 tfm_spm_partition_get_running_partition_idx();
712 const struct spm_partition_runtime_data_t *curr_part_data =
713 tfm_spm_partition_get_runtime_data(running_partition_idx);
714 uint32_t running_partition_flags =
715 tfm_spm_partition_get_flags(running_partition_idx);
716 uint32_t caller_partition_flags =
717 tfm_spm_partition_get_flags(curr_part_data->caller_partition_idx);
718
719 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
720 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
721 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
722 /* This handler shouldn't be called from outside partition context.
723 * Also if the current partition is handling IRQ, the caller partition
724 * index might not be valid;
725 * Partitions are only allowed to run while S domain is locked.
726 */
727 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
728 return;
729 }
730
731 /* Store return value in r0 */
732 if (caller_partition_flags & SPM_PART_FLAG_APP_ROT) {
733 res = TFM_SUCCESS;
734 }
735 svc_args[0] = (uint32_t)res;
736}
737
738int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
739 void *start_addr,
740 size_t len,
741 uint32_t alignment)
742{
743 uintptr_t start_addr_value = (uintptr_t)start_addr;
744 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
745 uintptr_t alignment_mask;
746
747 alignment_mask = (((uintptr_t)1) << alignment) - 1;
748
749 /* Check that the pointer is aligned properly */
750 if (start_addr_value & alignment_mask) {
751 /* not aligned, return error */
752 return 0;
753 }
754
755 /* Protect against overflow (and zero len) */
756 if (end_addr_value <= start_addr_value) {
757 return 0;
758 }
759
760 /* For privileged partition execution, all secure data memory and stack
761 * is accessible
762 */
763 if (start_addr_value >= S_DATA_START &&
764 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
765 return 1;
766 }
767
768 return 0;
769}
770
771void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
772{
773 uintptr_t result_ptr_value = svc_args[0];
774 uint32_t running_partition_idx =
775 tfm_spm_partition_get_running_partition_idx();
776 const uint32_t running_partition_flags =
777 tfm_spm_partition_get_flags(running_partition_idx);
778 const struct spm_partition_runtime_data_t *curr_part_data =
779 tfm_spm_partition_get_runtime_data(running_partition_idx);
780 int res = 0;
781
782 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
783 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
784 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
785 /* This handler shouldn't be called from outside partition context.
786 * Also if the current partition is handling IRQ, the caller partition
787 * index might not be valid;
788 * Partitions are only allowed to run while S domain is locked.
789 */
790 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
791 return;
792 }
793
794 /* Make sure that the output pointer points to a memory area that is owned
795 * by the partition
796 */
797 res = tfm_spm_check_buffer_access(running_partition_idx,
798 (void *)result_ptr_value,
799 sizeof(curr_part_data->caller_client_id),
800 2);
801 if (!res) {
802 /* Not in accessible range, return error */
803 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
804 return;
805 }
806
807 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
808
809 /* Store return value in r0 */
810 svc_args[0] = (uint32_t)TFM_SUCCESS;
811}
812
813/* This SVC handler is called if veneer is running in thread mode */
814uint32_t tfm_spm_partition_request_svc_handler(
815 const uint32_t *svc_ctx, uint32_t excReturn)
816{
817 struct tfm_sfn_req_s *desc_ptr;
818
819 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
820 /* Service request SVC called with MSP active.
821 * Either invalid configuration for Thread mode or SVC called
822 * from Handler mode, which is not supported.
823 * FixMe: error severity TBD
824 */
825 ERROR_MSG("Service request SVC called with MSP active!");
826 tfm_secure_api_error_handler();
827 }
828
829 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
830
831 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
832 tfm_secure_api_error_handler();
833 }
834
835 return EXC_RETURN_SECURE_FUNCTION;
836}
837
838/* This SVC handler is called, if a thread mode execution environment is to
839 * be set up, to run an unprivileged IRQ handler
840 */
841uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
842{
843 struct tfm_state_context_t *svc_ctx =
844 (struct tfm_state_context_t *)svc_args;
845
846 enum tfm_status_e res;
847
848 if (excReturn & EXC_RETURN_STACK_PROCESS) {
849 /* FixMe: error severity TBD */
850 ERROR_MSG("Partition request SVC called with PSP active!");
851 tfm_secure_api_error_handler();
852 }
853
854 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
855 if (res != TFM_SUCCESS) {
856 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
857 * its code can be run
858 */
859 /* FixMe: For now this case is handled with TF-M panic, however it would
860 * be possible to skip the execution of the interrupt handler, and
861 * resume the execution of the interrupted code.
862 */
863 tfm_secure_api_error_handler();
864 }
865 return EXC_RETURN_SECURE_FUNCTION;
866}
867
868/* This SVC handler is called when sfn returns */
869uint32_t tfm_spm_partition_return_handler(uint32_t lr)
870{
871 enum tfm_status_e res;
872
873 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
874 /* Partition return SVC called with MSP active.
875 * This should not happen!
876 */
877 ERROR_MSG("Partition return SVC called with MSP active!");
878 tfm_secure_api_error_handler();
879 }
880
881 res = tfm_return_from_partition(&lr);
882 if (res != TFM_SUCCESS) {
883 /* Unlock errors indicate ctx database corruption or unknown anomalies
884 * Halt execution
885 */
886 ERROR_MSG("Secure API error during unlock!");
887 tfm_secure_api_error_handler();
888 }
889
890 return lr;
891}
892
893/* This SVC handler is called if a deprivileged IRQ handler was executed, and
894 * the execution environment is to be set back for the privileged handler mode
895 */
896uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
897{
898 enum tfm_status_e res;
899 struct tfm_state_context_t *irq_svc_ctx =
900 (struct tfm_state_context_t *)irq_svc_args;
901
902 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
903 /* Partition request SVC called with MSP active.
904 * FixMe: error severity TBD
905 */
906 ERROR_MSG("Partition request SVC called with MSP active!");
907 tfm_secure_api_error_handler();
908 }
909
910 res = tfm_return_from_partition_irq_handling(&lr);
911 if (res != TFM_SUCCESS) {
912 /* Unlock errors indicate ctx database corruption or unknown anomalies
913 * Halt execution
914 */
915 ERROR_MSG("Secure API error during unlock!");
916 tfm_secure_api_error_handler();
917 }
918
919 irq_svc_ctx->ra = lr;
920
921 return EXC_RETURN_SECURE_HANDLER;
922}
923
924/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
925/**
926 * \brief Return the IRQ line number associated with a signal
927 *
928 * \param[in] partition_id The ID of the partition in which we look for the
929 * signal
930 * \param[in] signal The signal we do the query for
931 *
932 * \retval >=0 The IRQ line number associated with a signal in the partition
933 * \retval <0 error
934 */
935static int32_t get_irq_line_for_signal(int32_t partition_id,
936 psa_signal_t signal)
937{
938 size_t i;
939
940 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
941 if (tfm_core_irq_signals[i].partition_id == partition_id &&
942 tfm_core_irq_signals[i].signal_value == signal) {
943 return tfm_core_irq_signals[i].irq_line;
944 }
945 }
946 return -1;
947}
948
949void tfm_spm_enable_irq_handler(uint32_t *svc_args)
950{
951 struct tfm_state_context_t *svc_ctx =
952 (struct tfm_state_context_t *)svc_args;
953 psa_signal_t irq_signal = svc_ctx->r0;
954 uint32_t running_partition_idx =
955 tfm_spm_partition_get_running_partition_idx();
956 uint32_t running_partition_id =
957 tfm_spm_partition_get_partition_id(running_partition_idx);
958 int32_t irq_line;
959
960 /* Only a single signal is allowed */
961 if (!tfm_is_one_bit_set(irq_signal)) {
962 /* FixMe: error severity TBD */
963 tfm_secure_api_error_handler();
964 }
965
966 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
967
968 if (irq_line < 0) {
969 /* FixMe: error severity TBD */
970 tfm_secure_api_error_handler();
971 }
972
973 tfm_spm_hal_enable_irq(irq_line);
974}
975
976void tfm_spm_disable_irq_handler(uint32_t *svc_args)
977{
978 struct tfm_state_context_t *svc_ctx =
979 (struct tfm_state_context_t *)svc_args;
980 psa_signal_t irq_signal = svc_ctx->r0;
981 uint32_t running_partition_idx =
982 tfm_spm_partition_get_running_partition_idx();
983 uint32_t running_partition_id =
984 tfm_spm_partition_get_partition_id(running_partition_idx);
985 int32_t irq_line;
986
987 /* Only a single signal is allowed */
988 if (!tfm_is_one_bit_set(irq_signal)) {
989 /* FixMe: error severity TBD */
990 tfm_secure_api_error_handler();
991 }
992
993 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
994
995 if (irq_line < 0) {
996 /* FixMe: error severity TBD */
997 tfm_secure_api_error_handler();
998 }
999
1000 tfm_spm_hal_disable_irq(irq_line);
1001}
1002
1003void tfm_spm_psa_wait(uint32_t *svc_args)
1004{
1005 /* Look for partition that is ready for run */
1006 struct tfm_state_context_t *svc_ctx =
1007 (struct tfm_state_context_t *)svc_args;
1008 uint32_t running_partition_idx;
1009 const struct spm_partition_runtime_data_t *curr_part_data;
1010
1011 psa_signal_t signal_mask = svc_ctx->r0;
1012 uint32_t timeout = svc_ctx->r1;
1013
1014 /*
1015 * Timeout[30:0] are reserved for future use.
1016 * SPM must ignore the value of RES.
1017 */
1018 timeout &= PSA_TIMEOUT_MASK;
1019
1020 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1021 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1022
1023 if (timeout == PSA_BLOCK) {
1024 /* FIXME: Scheduling is not available in library model, and busy wait is
1025 * also not possible as this code is running in SVC context, and it
1026 * cannot be pre-empted by interrupts. So do nothing here for now
1027 */
1028 (void) signal_mask;
1029 }
1030
1031 svc_ctx->r0 = curr_part_data->signal_mask;
1032}
1033
1034void tfm_spm_psa_eoi(uint32_t *svc_args)
1035{
1036 struct tfm_state_context_t *svc_ctx =
1037 (struct tfm_state_context_t *)svc_args;
1038 psa_signal_t irq_signal = svc_ctx->r0;
1039 uint32_t signal_mask;
1040 uint32_t running_partition_idx;
1041 uint32_t running_partition_id;
1042 const struct spm_partition_runtime_data_t *curr_part_data;
1043 int32_t irq_line;
1044
1045 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1046 running_partition_id =
1047 tfm_spm_partition_get_partition_id(running_partition_idx);
1048 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1049
1050 /* Only a single signal is allowed */
1051 if (!tfm_is_one_bit_set(irq_signal)) {
1052 tfm_secure_api_error_handler();
1053 }
1054
1055 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1056
1057 if (irq_line < 0) {
1058 /* FixMe: error severity TBD */
1059 tfm_secure_api_error_handler();
1060 }
1061
1062 tfm_spm_hal_clear_pending_irq(irq_line);
1063 tfm_spm_hal_enable_irq(irq_line);
1064
1065 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1066 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1067}
Mingyang Sunda01a972019-07-12 17:32:59 +08001068
1069/*
1070 * This function is called when a secure partition causes an error.
1071 * In case of an error in the error handling, a non-zero value have to be
1072 * returned.
1073 */
1074static void tfm_spm_partition_err_handler(
1075 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001076 int32_t err_code)
1077{
Mingyang Sunda01a972019-07-12 17:32:59 +08001078 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001079
Summer Qin423dbef2019-08-22 15:59:35 +08001080 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001081 SPM_PARTITION_STATE_CLOSED);
1082}
1083
1084enum spm_err_t tfm_spm_partition_init(void)
1085{
1086 struct spm_partition_desc_t *part;
1087 struct tfm_sfn_req_s desc;
1088 int32_t args[4] = {0};
1089 int32_t fail_cnt = 0;
1090 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001091 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001092
1093 /* Call the init function for each partition */
1094 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1095 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001096 platform_data_p = part->platform_data_list;
1097 if (platform_data_p != NULL) {
1098 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001099 if (tfm_spm_hal_configure_default_isolation(idx,
1100 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1101 fail_cnt++;
1102 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001103 ++platform_data_p;
1104 }
1105 }
Summer Qin423dbef2019-08-22 15:59:35 +08001106 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001107 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1108 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001109 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001110 } else {
1111 int32_t res;
1112
1113 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001114 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001115 desc.sfn = (sfn_t)part->static_data->partition_init;
1116 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001117 res = tfm_core_sfn_request(&desc);
1118 if (res == TFM_SUCCESS) {
1119 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1120 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001121 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001122 fail_cnt++;
1123 }
1124 }
1125 }
1126
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001127 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001128
1129 if (fail_cnt == 0) {
1130 return SPM_ERR_OK;
1131 } else {
1132 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1133 }
1134}
1135
1136void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1137{
1138 struct spm_partition_runtime_data_t *runtime_data =
1139 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1140 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001141 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001142
1143 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001144
1145 runtime_data->ctx_stack_ptr +=
1146 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001147}
1148
1149void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1150{
1151 struct spm_partition_runtime_data_t *runtime_data =
1152 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1153 struct interrupted_ctx_stack_frame_t *stack_frame;
1154
Matt463ed582019-12-20 12:31:25 +08001155 runtime_data->ctx_stack_ptr -=
1156 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1157
Mingyang Sunda01a972019-07-12 17:32:59 +08001158 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1159 runtime_data->ctx_stack_ptr;
1160 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1161 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001162}
1163
1164void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1165{
1166 struct spm_partition_runtime_data_t *runtime_data =
1167 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1168 struct handler_ctx_stack_frame_t *stack_frame =
1169 (struct handler_ctx_stack_frame_t *)
1170 runtime_data->ctx_stack_ptr;
1171
1172 stack_frame->partition_state = runtime_data->partition_state;
1173 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1174
1175 runtime_data->ctx_stack_ptr +=
1176 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1177}
1178
1179void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1180{
1181 struct spm_partition_runtime_data_t *runtime_data =
1182 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1183 struct handler_ctx_stack_frame_t *stack_frame;
1184
1185 runtime_data->ctx_stack_ptr -=
1186 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1187
1188 stack_frame = (struct handler_ctx_stack_frame_t *)
1189 runtime_data->ctx_stack_ptr;
1190
1191 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1192 stack_frame->partition_state = 0;
1193 tfm_spm_partition_set_caller_partition_idx(
1194 partition_idx, stack_frame->caller_partition_idx);
1195 stack_frame->caller_partition_idx = 0;
1196}
1197
Mingyang Sunda01a972019-07-12 17:32:59 +08001198void tfm_spm_partition_store_context(uint32_t partition_idx,
1199 uint32_t stack_ptr, uint32_t lr)
1200{
1201 g_spm_partition_db.partitions[partition_idx].
1202 runtime_data.stack_ptr = stack_ptr;
1203 g_spm_partition_db.partitions[partition_idx].
1204 runtime_data.lr = lr;
1205}
1206
1207const struct spm_partition_runtime_data_t *
1208 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1209{
1210 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1211}
1212
1213void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1214{
1215 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1216 state;
1217 if (state == SPM_PARTITION_STATE_RUNNING ||
1218 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1219 g_spm_partition_db.running_partition_idx = partition_idx;
1220 }
1221}
1222
1223void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1224 uint32_t caller_partition_idx)
1225{
1226 g_spm_partition_db.partitions[partition_idx].runtime_data.
1227 caller_partition_idx = caller_partition_idx;
1228}
1229
1230void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1231 uint32_t signal_mask)
1232{
1233 g_spm_partition_db.partitions[partition_idx].runtime_data.
1234 signal_mask = signal_mask;
1235}
1236
1237void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1238 int32_t caller_client_id)
1239{
1240 g_spm_partition_db.partitions[partition_idx].runtime_data.
1241 caller_client_id = caller_client_id;
1242}
1243
Mingyang Sunda01a972019-07-12 17:32:59 +08001244enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1245 const int32_t *args)
1246{
1247 struct spm_partition_runtime_data_t *runtime_data =
1248 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1249 size_t i;
1250
1251 if ((args[1] < 0) || (args[3] < 0)) {
1252 return SPM_ERR_INVALID_PARAMETER;
1253 }
1254
1255 runtime_data->iovec_args.in_len = (size_t)args[1];
1256 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1257 runtime_data->iovec_args.in_vec[i].base =
1258 ((psa_invec *)args[0])[i].base;
1259 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1260 }
1261 runtime_data->iovec_args.out_len = (size_t)args[3];
1262 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1263 runtime_data->iovec_args.out_vec[i].base =
1264 ((psa_outvec *)args[2])[i].base;
1265 runtime_data->iovec_args.out_vec[i].len =
1266 ((psa_outvec *)args[2])[i].len;
1267 }
1268 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001269
1270 return SPM_ERR_OK;
1271}
1272
1273uint32_t tfm_spm_partition_get_running_partition_idx(void)
1274{
1275 return g_spm_partition_db.running_partition_idx;
1276}
1277
1278void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1279{
1280 struct spm_partition_desc_t *partition =
1281 &(g_spm_partition_db.partitions[partition_idx]);
1282 int32_t i;
1283
1284 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001285 partition->runtime_data.iovec_args.in_len = 0;
1286 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1287 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1288 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1289 }
1290 partition->runtime_data.iovec_args.out_len = 0;
1291 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1292 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1293 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1294 }
1295 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001296}