blob: afb0479abe8bd9657d7338114b901e89156255a6 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
12#include "secure_utilities.h"
13#include "tfm_api.h"
14#include "tfm_arch.h"
15#include "tfm_irq_list.h"
16#include "psa/service.h"
17#include "tfm_core_mem_check.h"
18#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
20#include "spm_api.h"
21#include "spm_db.h"
22#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
24
25#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
26#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
27
28#ifndef TFM_LVL
29#error TFM_LVL is not defined!
30#endif
31
32REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
33REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
34
35/*
36 * This is the "Big Lock" on the secure side, to guarantee single entry
37 * to SPE
38 */
39extern int32_t tfm_secure_lock;
40static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080041
42extern struct spm_partition_db_t g_spm_partition_db;
43
Mingyang Sunabb1aab2020-02-18 13:49:08 +080044static uint32_t *prepare_partition_iovec_ctx(
45 const struct tfm_state_context_t *svc_ctx,
46 const struct tfm_sfn_req_s *desc_ptr,
47 const struct iovec_args_t *iovec_args,
48 uint32_t *dst)
49{
50 /* XPSR = as was when called, but make sure it's thread mode */
51 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
52 /* ReturnAddress = resume veneer in new context */
53 *(--dst) = svc_ctx->ra;
54 /* LR = sfn address */
55 *(--dst) = (uint32_t)desc_ptr->sfn;
56 /* R12 = don't care */
57 *(--dst) = 0U;
58
59 /* R0-R3 = sfn arguments */
60 *(--dst) = iovec_args->out_len;
61 *(--dst) = (uint32_t)iovec_args->out_vec;
62 *(--dst) = iovec_args->in_len;
63 *(--dst) = (uint32_t)iovec_args->in_vec;
64
65 return dst;
66}
67
68/**
69 * \brief Create a stack frame that sets the execution environment to thread
70 * mode on exception return.
71 *
72 * \param[in] svc_ctx The stacked SVC context
73 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
74 * \param[in] dst A pointer where the context is to be created. (the
75 * pointer is considered to be a stack pointer, and
76 * the frame is created below it)
77 *
78 * \return A pointer pointing at the created stack frame.
79 */
80static int32_t *prepare_partition_irq_ctx(
81 const struct tfm_state_context_t *svc_ctx,
82 sfn_t unpriv_handler,
83 int32_t *dst)
84{
85 int i;
86
87 /* XPSR = as was when called, but make sure it's thread mode */
88 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
89 /* ReturnAddress = resume to the privileged handler code, but execute it
90 * unprivileged.
91 */
92 *(--dst) = svc_ctx->ra;
93 /* LR = start address */
94 *(--dst) = (int32_t)unpriv_handler;
95
96 /* R12, R0-R3 unused arguments */
97 for (i = 0; i < 5; ++i) {
98 *(--dst) = 0;
99 }
100
101 return dst;
102}
103
104static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
105 struct tfm_state_context_t *target_ctx)
106{
107 /* ReturnAddress = resume veneer after second SVC */
108 target_ctx->ra = svc_ctx->ra;
109
110 /* R0 = function return value */
111 target_ctx->r0 = svc_ctx->r0;
112
113 return;
114}
115
116/**
117 * \brief Check whether the iovec parameters are valid, and the memory ranges
118 * are in the possession of the calling partition.
119 *
120 * \param[in] desc_ptr The secure function request descriptor
121 *
122 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
123 * otherwise as in /ref tfm_status_e
124 */
125static enum tfm_status_e tfm_core_check_sfn_parameters(
126 const struct tfm_sfn_req_s *desc_ptr)
127{
128 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
129 size_t in_len;
130 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
131 size_t out_len;
132 uint32_t i;
133
134 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
135 return TFM_ERROR_INVALID_PARAMETER;
136 }
137
138 in_len = (size_t)(desc_ptr->args[1]);
139 out_len = (size_t)(desc_ptr->args[3]);
140
141 /* The number of vectors are within range. Extra checks to avoid overflow */
142 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
143 (in_len + out_len > PSA_MAX_IOVEC)) {
144 return TFM_ERROR_INVALID_PARAMETER;
145 }
146
147 /* Check whether the caller partition has at write access to the iovec
148 * structures themselves. Use the TT instruction for this.
149 */
150 if (in_len > 0) {
151 if ((in_vec == NULL) ||
152 (tfm_core_has_write_access_to_region(in_vec,
153 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
154 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
155 return TFM_ERROR_INVALID_PARAMETER;
156 }
157 } else {
158 if (in_vec != NULL) {
159 return TFM_ERROR_INVALID_PARAMETER;
160 }
161 }
162 if (out_len > 0) {
163 if ((out_vec == NULL) ||
164 (tfm_core_has_write_access_to_region(out_vec,
165 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
166 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
167 return TFM_ERROR_INVALID_PARAMETER;
168 }
169 } else {
170 if (out_vec != NULL) {
171 return TFM_ERROR_INVALID_PARAMETER;
172 }
173 }
174
175 /* Check whether the caller partition has access to the data inside the
176 * iovecs
177 */
178 for (i = 0; i < in_len; ++i) {
179 if (in_vec[i].len > 0) {
180 if ((in_vec[i].base == NULL) ||
181 (tfm_core_has_read_access_to_region(in_vec[i].base,
182 in_vec[i].len, desc_ptr->ns_caller,
183 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
184 return TFM_ERROR_INVALID_PARAMETER;
185 }
186 }
187 }
188 for (i = 0; i < out_len; ++i) {
189 if (out_vec[i].len > 0) {
190 if ((out_vec[i].base == NULL) ||
191 (tfm_core_has_write_access_to_region(out_vec[i].base,
192 out_vec[i].len, desc_ptr->ns_caller,
193 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
194 return TFM_ERROR_INVALID_PARAMETER;
195 }
196 }
197 }
198
199 return TFM_SUCCESS;
200}
201
202static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
203 const struct iovec_args_t *source)
204{
205 size_t i;
206
207 /* The vectors have been sanity checked already, and since then the
208 * interrupts have been kept disabled. So we can be sure that the
209 * vectors haven't been tampered with since the check. So it is safe to pass
210 * it to the called partition.
211 */
212
213 target->in_len = source->in_len;
214 for (i = 0; i < source->in_len; ++i) {
215 target->in_vec[i].base = source->in_vec[i].base;
216 target->in_vec[i].len = source->in_vec[i].len;
217 }
218 target->out_len = source->out_len;
219 for (i = 0; i < source->out_len; ++i) {
220 target->out_vec[i].base = source->out_vec[i].base;
221 target->out_vec[i].len = source->out_vec[i].len;
222 }
223}
224
225static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
226{
227 int i;
228
229 args->in_len = 0;
230 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
231 args->in_vec[i].base = NULL;
232 args->in_vec[i].len = 0;
233 }
234 args->out_len = 0;
235 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
236 args->out_vec[i].base = NULL;
237 args->out_vec[i].len = 0;
238 }
239}
240
241/**
242 * \brief Check whether the partitions for the secure function call are in a
243 * proper state.
244 *
245 * \param[in] curr_partition_state State of the partition to be called
246 * \param[in] caller_partition_state State of the caller partition
247 *
248 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
249 */
250static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
251 uint32_t caller_partition_state)
252{
253 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
254 /* Calling partition from non-running state (e.g. during handling IRQ)
255 * is not allowed.
256 */
257 return TFM_ERROR_INVALID_EXC_MODE;
258 }
259
260 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
261 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
262 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
263 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
264 /* Active partitions cannot be called! */
265 return TFM_ERROR_PARTITION_NON_REENTRANT;
266 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
267 /* The partition to be called is not in a proper state */
268 return TFM_SECURE_LOCK_FAILED;
269 }
270 return TFM_SUCCESS;
271}
272
273/**
274 * \brief Check whether the partitions for the secure function call of irq are
275 * in a proper state.
276 *
277 * \param[in] called_partition_state State of the partition to be called
278 *
279 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
280 */
281static enum tfm_status_e check_irq_partition_state(
282 uint32_t called_partition_state)
283{
284 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
285 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
286 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
287 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
288 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
289 return TFM_SUCCESS;
290 }
291 return TFM_SECURE_LOCK_FAILED;
292}
293
294/**
295 * \brief Calculate the address where the iovec parameters are to be saved for
296 * the called partition.
297 *
298 * \param[in] partition_idx The index of the partition to be called.
299 *
300 * \return The address where the iovec parameters should be saved.
301 */
302static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
303{
304 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100305 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800306}
307
308static enum tfm_status_e tfm_start_partition(
309 const struct tfm_sfn_req_s *desc_ptr,
310 uint32_t excReturn)
311{
312 enum tfm_status_e res;
313 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
314 const struct spm_partition_runtime_data_t *curr_part_data;
315 const struct spm_partition_runtime_data_t *caller_part_data;
316 uint32_t caller_flags;
317 register uint32_t partition_idx;
318 uint32_t psp;
319 uint32_t partition_psp, partition_psplim;
320 uint32_t partition_state;
321 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800322 struct tfm_state_context_t *svc_ctx;
323 uint32_t caller_partition_id;
324 int32_t client_id;
325 struct iovec_args_t *iovec_args;
326
327 psp = __get_PSP();
328 svc_ctx = (struct tfm_state_context_t *)psp;
329 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
330
331 /* Check partition state consistency */
332 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
333 != (!desc_ptr->ns_caller)) {
334 /* Partition state inconsistency detected */
335 return TFM_SECURE_LOCK_FAILED;
336 }
337
338 partition_idx = get_partition_idx(desc_ptr->sp_id);
339
340 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
341 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
342 partition_state = curr_part_data->partition_state;
343 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800344 caller_partition_id = tfm_spm_partition_get_partition_id(
345 caller_partition_idx);
346
347 if (!tfm_secure_api_initializing) {
348 res = check_partition_state(partition_state, caller_partition_state);
349 if (res != TFM_SUCCESS) {
350 return res;
351 }
352 }
353
354 /* Prepare switch to shared secure partition stack */
355 /* In case the call is coming from the non-secure world, we save the iovecs
356 * on the stop of the stack. So the memory area, that can actually be used
357 * as stack by the partitions starts at a lower address
358 */
359 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100360 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800361 partition_psplim =
362 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
363
364 /* Store the context for the partition call */
365 tfm_spm_partition_set_caller_partition_idx(partition_idx,
366 caller_partition_idx);
367 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
368
369 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
370 tfm_spm_partition_set_caller_client_id(partition_idx,
371 caller_partition_id);
372 } else {
373 client_id = tfm_nspm_get_current_client_id();
374 if (client_id >= 0) {
375 return TFM_SECURE_LOCK_FAILED;
376 }
377 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
378 }
379
380 /* In level one, only switch context and return from exception if in
381 * handler mode
382 */
383 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
384 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
385 SPM_ERR_OK) {
386 return TFM_ERROR_GENERIC;
387 }
388 iovec_args = get_iovec_args_stack_address(partition_idx);
389 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
390
391 /* Prepare the partition context, update stack ptr */
392 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
393 iovec_args,
394 (uint32_t *)partition_psp);
395 __set_PSP(psp);
396 tfm_arch_set_psplim(partition_psplim);
397 }
398
399 tfm_spm_partition_set_state(caller_partition_idx,
400 SPM_PARTITION_STATE_BLOCKED);
401 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
402 tfm_secure_lock++;
403
404 return TFM_SUCCESS;
405}
406
407static enum tfm_status_e tfm_start_partition_for_irq_handling(
408 uint32_t excReturn,
409 struct tfm_state_context_t *svc_ctx)
410{
411 uint32_t handler_partition_id = svc_ctx->r0;
412 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
413 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100414 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800415 enum tfm_status_e res;
416 uint32_t psp = __get_PSP();
417 uint32_t handler_partition_psp;
418 uint32_t handler_partition_state;
419 uint32_t interrupted_partition_idx =
420 tfm_spm_partition_get_running_partition_idx();
421 const struct spm_partition_runtime_data_t *handler_part_data;
422 uint32_t handler_partition_idx;
423
424 handler_partition_idx = get_partition_idx(handler_partition_id);
425 handler_part_data = tfm_spm_partition_get_runtime_data(
426 handler_partition_idx);
427 handler_partition_state = handler_part_data->partition_state;
428
429 res = check_irq_partition_state(handler_partition_state);
430 if (res != TFM_SUCCESS) {
431 return res;
432 }
433
434 /* set mask for the partition */
435 tfm_spm_partition_set_signal_mask(
436 handler_partition_idx,
437 handler_part_data->signal_mask | irq_signal);
438
439 tfm_spm_hal_disable_irq(irq_line);
440
441 /* save the current context of the interrupted partition */
442 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
443
444 handler_partition_psp = psp;
445
446 /* save the current context of the handler partition */
447 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
448
449 /* Store caller for the partition */
450 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
451 interrupted_partition_idx);
452
453 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
454 (int32_t *)handler_partition_psp);
455 __set_PSP(psp);
456
457 tfm_spm_partition_set_state(interrupted_partition_idx,
458 SPM_PARTITION_STATE_SUSPENDED);
459 tfm_spm_partition_set_state(handler_partition_idx,
460 SPM_PARTITION_STATE_HANDLING_IRQ);
461
462 return TFM_SUCCESS;
463}
464
465static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
466{
467 uint32_t current_partition_idx =
468 tfm_spm_partition_get_running_partition_idx();
469 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800470 uint32_t return_partition_idx;
471 uint32_t return_partition_flags;
472 uint32_t psp = __get_PSP();
473 size_t i;
474 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
475 struct iovec_args_t *iovec_args;
476
477 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
478 return TFM_SECURE_UNLOCK_FAILED;
479 }
480
481 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
482 return_partition_idx = curr_part_data->caller_partition_idx;
483
484 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
485 return TFM_SECURE_UNLOCK_FAILED;
486 }
487
488 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
489
490 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800491
492 tfm_secure_lock--;
493
494 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
495 (tfm_secure_api_initializing)) {
496 /* In TFM level 1 context restore is only done when
497 * returning to NS or after initialization
498 */
499 /* Restore caller context */
500 restore_caller_ctx(svc_ctx,
501 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
502 *excReturn = ret_part_data->lr;
503 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100504 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800505 uint32_t psp_stack_bottom =
506 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
507 tfm_arch_set_psplim(psp_stack_bottom);
508
TTornblom99f0be22019-12-17 16:22:38 +0100509 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800510
511 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
512 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
513 }
514 tfm_clear_iovec_parameters(iovec_args);
515 }
516
517 tfm_spm_partition_cleanup_context(current_partition_idx);
518
519 tfm_spm_partition_set_state(current_partition_idx,
520 SPM_PARTITION_STATE_IDLE);
521 tfm_spm_partition_set_state(return_partition_idx,
522 SPM_PARTITION_STATE_RUNNING);
523
524 return TFM_SUCCESS;
525}
526
527static enum tfm_status_e tfm_return_from_partition_irq_handling(
528 uint32_t *excReturn)
529{
530 uint32_t handler_partition_idx =
531 tfm_spm_partition_get_running_partition_idx();
532 const struct spm_partition_runtime_data_t *handler_part_data;
533 uint32_t interrupted_partition_idx;
534 uint32_t psp = __get_PSP();
535 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
536
537 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
538 return TFM_SECURE_UNLOCK_FAILED;
539 }
540
541 handler_part_data = tfm_spm_partition_get_runtime_data(
542 handler_partition_idx);
543 interrupted_partition_idx = handler_part_data->caller_partition_idx;
544
545 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
546 return TFM_SECURE_UNLOCK_FAILED;
547 }
548
549 /* For level 1, modify PSP, so that the SVC stack frame disappears,
550 * and return to the privileged handler using the stack frame still on the
551 * MSP stack.
552 */
553 *excReturn = svc_ctx->ra;
554 psp += sizeof(struct tfm_state_context_t);
555
556 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
557 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
558
559 __set_PSP(psp);
560
561 return TFM_SUCCESS;
562}
563
564static enum tfm_status_e tfm_check_sfn_req_integrity(
565 const struct tfm_sfn_req_s *desc_ptr)
566{
567 if ((desc_ptr == NULL) ||
568 (desc_ptr->sp_id == 0) ||
569 (desc_ptr->sfn == NULL)) {
570 /* invalid parameter */
571 return TFM_ERROR_INVALID_PARAMETER;
572 }
573 return TFM_SUCCESS;
574}
575
576static enum tfm_status_e tfm_core_check_sfn_req_rules(
577 const struct tfm_sfn_req_s *desc_ptr)
578{
579 /* Check partition idx validity */
580 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
581 return TFM_ERROR_NO_ACTIVE_PARTITION;
582 }
583
584 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
585 /* Secure domain is already locked!
586 * This should only happen if caller is secure partition!
587 */
588 /* This scenario is a potential security breach.
589 * Error is handled in caller.
590 */
591 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
592 }
593
594 if (tfm_secure_api_initializing) {
595 int32_t id =
596 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
597
598 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
599 /* Invalid request during system initialization */
600 ERROR_MSG("Invalid service request during initialization!");
601 return TFM_ERROR_NOT_INITIALIZED;
602 }
603 }
604
605 return TFM_SUCCESS;
606}
607
608void tfm_spm_secure_api_init_done(void)
609{
610 tfm_secure_api_initializing = 0;
611}
612
613enum tfm_status_e tfm_spm_sfn_request_handler(
614 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
615{
616 enum tfm_status_e res;
617
618 res = tfm_check_sfn_req_integrity(desc_ptr);
619 if (res != TFM_SUCCESS) {
620 ERROR_MSG("Invalid service request!");
621 tfm_secure_api_error_handler();
622 }
623
624 __disable_irq();
625
626 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
627
628 res = tfm_core_check_sfn_parameters(desc_ptr);
629 if (res != TFM_SUCCESS) {
630 /* The sanity check of iovecs failed. */
631 __enable_irq();
632 tfm_secure_api_error_handler();
633 }
634
635 res = tfm_core_check_sfn_req_rules(desc_ptr);
636 if (res != TFM_SUCCESS) {
637 /* FixMe: error compartmentalization TBD */
638 tfm_spm_partition_set_state(
639 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
640 __enable_irq();
641 ERROR_MSG("Unauthorized service request!");
642 tfm_secure_api_error_handler();
643 }
644
645 res = tfm_start_partition(desc_ptr, excReturn);
646 if (res != TFM_SUCCESS) {
647 /* FixMe: consider possible fault scenarios */
648 __enable_irq();
649 ERROR_MSG("Failed to process service request!");
650 tfm_secure_api_error_handler();
651 }
652
653 __enable_irq();
654
655 return res;
656}
657
658int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
659{
660 enum tfm_status_e res;
661 int32_t *args;
662 int32_t retVal;
663
664 res = tfm_core_check_sfn_parameters(desc_ptr);
665 if (res != TFM_SUCCESS) {
666 /* The sanity check of iovecs failed. */
667 return (int32_t)res;
668 }
669
670 /* No excReturn value is needed as no exception handling is used */
671 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
672
673 if (res != TFM_SUCCESS) {
674 tfm_secure_api_error_handler();
675 }
676
677 /* Secure partition to secure partition call in TFM level 1 */
678 args = desc_ptr->args;
679 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
680
681 /* return handler should restore original exc_return value... */
682 res = tfm_return_from_partition(NULL);
683 if (res == TFM_SUCCESS) {
684 /* If unlock successful, pass SS return value to caller */
685 return retVal;
686 } else {
687 /* Unlock errors indicate ctx database corruption or unknown
688 * anomalies. Halt execution
689 */
690 ERROR_MSG("Secure API error during unlock!");
691 tfm_secure_api_error_handler();
692 }
693 return (int32_t)res;
694}
695
696void tfm_spm_validate_secure_caller_handler(uint32_t *svc_args)
697{
698
699 enum tfm_status_e res = TFM_ERROR_GENERIC;
700 uint32_t running_partition_idx =
701 tfm_spm_partition_get_running_partition_idx();
702 const struct spm_partition_runtime_data_t *curr_part_data =
703 tfm_spm_partition_get_runtime_data(running_partition_idx);
704 uint32_t running_partition_flags =
705 tfm_spm_partition_get_flags(running_partition_idx);
706 uint32_t caller_partition_flags =
707 tfm_spm_partition_get_flags(curr_part_data->caller_partition_idx);
708
709 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
710 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
711 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
712 /* This handler shouldn't be called from outside partition context.
713 * Also if the current partition is handling IRQ, the caller partition
714 * index might not be valid;
715 * Partitions are only allowed to run while S domain is locked.
716 */
717 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
718 return;
719 }
720
721 /* Store return value in r0 */
722 if (caller_partition_flags & SPM_PART_FLAG_APP_ROT) {
723 res = TFM_SUCCESS;
724 }
725 svc_args[0] = (uint32_t)res;
726}
727
728int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
729 void *start_addr,
730 size_t len,
731 uint32_t alignment)
732{
733 uintptr_t start_addr_value = (uintptr_t)start_addr;
734 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
735 uintptr_t alignment_mask;
736
737 alignment_mask = (((uintptr_t)1) << alignment) - 1;
738
739 /* Check that the pointer is aligned properly */
740 if (start_addr_value & alignment_mask) {
741 /* not aligned, return error */
742 return 0;
743 }
744
745 /* Protect against overflow (and zero len) */
746 if (end_addr_value <= start_addr_value) {
747 return 0;
748 }
749
750 /* For privileged partition execution, all secure data memory and stack
751 * is accessible
752 */
753 if (start_addr_value >= S_DATA_START &&
754 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
755 return 1;
756 }
757
758 return 0;
759}
760
761void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
762{
763 uintptr_t result_ptr_value = svc_args[0];
764 uint32_t running_partition_idx =
765 tfm_spm_partition_get_running_partition_idx();
766 const uint32_t running_partition_flags =
767 tfm_spm_partition_get_flags(running_partition_idx);
768 const struct spm_partition_runtime_data_t *curr_part_data =
769 tfm_spm_partition_get_runtime_data(running_partition_idx);
770 int res = 0;
771
772 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
773 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
774 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
775 /* This handler shouldn't be called from outside partition context.
776 * Also if the current partition is handling IRQ, the caller partition
777 * index might not be valid;
778 * Partitions are only allowed to run while S domain is locked.
779 */
780 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
781 return;
782 }
783
784 /* Make sure that the output pointer points to a memory area that is owned
785 * by the partition
786 */
787 res = tfm_spm_check_buffer_access(running_partition_idx,
788 (void *)result_ptr_value,
789 sizeof(curr_part_data->caller_client_id),
790 2);
791 if (!res) {
792 /* Not in accessible range, return error */
793 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
794 return;
795 }
796
797 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
798
799 /* Store return value in r0 */
800 svc_args[0] = (uint32_t)TFM_SUCCESS;
801}
802
803/* This SVC handler is called if veneer is running in thread mode */
804uint32_t tfm_spm_partition_request_svc_handler(
805 const uint32_t *svc_ctx, uint32_t excReturn)
806{
807 struct tfm_sfn_req_s *desc_ptr;
808
809 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
810 /* Service request SVC called with MSP active.
811 * Either invalid configuration for Thread mode or SVC called
812 * from Handler mode, which is not supported.
813 * FixMe: error severity TBD
814 */
815 ERROR_MSG("Service request SVC called with MSP active!");
816 tfm_secure_api_error_handler();
817 }
818
819 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
820
821 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
822 tfm_secure_api_error_handler();
823 }
824
825 return EXC_RETURN_SECURE_FUNCTION;
826}
827
828/* This SVC handler is called, if a thread mode execution environment is to
829 * be set up, to run an unprivileged IRQ handler
830 */
831uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
832{
833 struct tfm_state_context_t *svc_ctx =
834 (struct tfm_state_context_t *)svc_args;
835
836 enum tfm_status_e res;
837
838 if (excReturn & EXC_RETURN_STACK_PROCESS) {
839 /* FixMe: error severity TBD */
840 ERROR_MSG("Partition request SVC called with PSP active!");
841 tfm_secure_api_error_handler();
842 }
843
844 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
845 if (res != TFM_SUCCESS) {
846 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
847 * its code can be run
848 */
849 /* FixMe: For now this case is handled with TF-M panic, however it would
850 * be possible to skip the execution of the interrupt handler, and
851 * resume the execution of the interrupted code.
852 */
853 tfm_secure_api_error_handler();
854 }
855 return EXC_RETURN_SECURE_FUNCTION;
856}
857
858/* This SVC handler is called when sfn returns */
859uint32_t tfm_spm_partition_return_handler(uint32_t lr)
860{
861 enum tfm_status_e res;
862
863 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
864 /* Partition return SVC called with MSP active.
865 * This should not happen!
866 */
867 ERROR_MSG("Partition return SVC called with MSP active!");
868 tfm_secure_api_error_handler();
869 }
870
871 res = tfm_return_from_partition(&lr);
872 if (res != TFM_SUCCESS) {
873 /* Unlock errors indicate ctx database corruption or unknown anomalies
874 * Halt execution
875 */
876 ERROR_MSG("Secure API error during unlock!");
877 tfm_secure_api_error_handler();
878 }
879
880 return lr;
881}
882
883/* This SVC handler is called if a deprivileged IRQ handler was executed, and
884 * the execution environment is to be set back for the privileged handler mode
885 */
886uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
887{
888 enum tfm_status_e res;
889 struct tfm_state_context_t *irq_svc_ctx =
890 (struct tfm_state_context_t *)irq_svc_args;
891
892 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
893 /* Partition request SVC called with MSP active.
894 * FixMe: error severity TBD
895 */
896 ERROR_MSG("Partition request SVC called with MSP active!");
897 tfm_secure_api_error_handler();
898 }
899
900 res = tfm_return_from_partition_irq_handling(&lr);
901 if (res != TFM_SUCCESS) {
902 /* Unlock errors indicate ctx database corruption or unknown anomalies
903 * Halt execution
904 */
905 ERROR_MSG("Secure API error during unlock!");
906 tfm_secure_api_error_handler();
907 }
908
909 irq_svc_ctx->ra = lr;
910
911 return EXC_RETURN_SECURE_HANDLER;
912}
913
914/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
915/**
916 * \brief Return the IRQ line number associated with a signal
917 *
918 * \param[in] partition_id The ID of the partition in which we look for the
919 * signal
920 * \param[in] signal The signal we do the query for
921 *
922 * \retval >=0 The IRQ line number associated with a signal in the partition
923 * \retval <0 error
924 */
TTornblomfaf74f52020-03-04 17:56:27 +0100925static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800926 psa_signal_t signal)
927{
928 size_t i;
929
930 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
931 if (tfm_core_irq_signals[i].partition_id == partition_id &&
932 tfm_core_irq_signals[i].signal_value == signal) {
933 return tfm_core_irq_signals[i].irq_line;
934 }
935 }
TTornblomfaf74f52020-03-04 17:56:27 +0100936 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800937}
938
939void tfm_spm_enable_irq_handler(uint32_t *svc_args)
940{
941 struct tfm_state_context_t *svc_ctx =
942 (struct tfm_state_context_t *)svc_args;
943 psa_signal_t irq_signal = svc_ctx->r0;
944 uint32_t running_partition_idx =
945 tfm_spm_partition_get_running_partition_idx();
946 uint32_t running_partition_id =
947 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100948 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800949
950 /* Only a single signal is allowed */
951 if (!tfm_is_one_bit_set(irq_signal)) {
952 /* FixMe: error severity TBD */
953 tfm_secure_api_error_handler();
954 }
955
956 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
957
958 if (irq_line < 0) {
959 /* FixMe: error severity TBD */
960 tfm_secure_api_error_handler();
961 }
962
963 tfm_spm_hal_enable_irq(irq_line);
964}
965
966void tfm_spm_disable_irq_handler(uint32_t *svc_args)
967{
968 struct tfm_state_context_t *svc_ctx =
969 (struct tfm_state_context_t *)svc_args;
970 psa_signal_t irq_signal = svc_ctx->r0;
971 uint32_t running_partition_idx =
972 tfm_spm_partition_get_running_partition_idx();
973 uint32_t running_partition_id =
974 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100975 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800976
977 /* Only a single signal is allowed */
978 if (!tfm_is_one_bit_set(irq_signal)) {
979 /* FixMe: error severity TBD */
980 tfm_secure_api_error_handler();
981 }
982
983 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
984
985 if (irq_line < 0) {
986 /* FixMe: error severity TBD */
987 tfm_secure_api_error_handler();
988 }
989
990 tfm_spm_hal_disable_irq(irq_line);
991}
992
993void tfm_spm_psa_wait(uint32_t *svc_args)
994{
995 /* Look for partition that is ready for run */
996 struct tfm_state_context_t *svc_ctx =
997 (struct tfm_state_context_t *)svc_args;
998 uint32_t running_partition_idx;
999 const struct spm_partition_runtime_data_t *curr_part_data;
1000
1001 psa_signal_t signal_mask = svc_ctx->r0;
1002 uint32_t timeout = svc_ctx->r1;
1003
1004 /*
1005 * Timeout[30:0] are reserved for future use.
1006 * SPM must ignore the value of RES.
1007 */
1008 timeout &= PSA_TIMEOUT_MASK;
1009
1010 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1011 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1012
1013 if (timeout == PSA_BLOCK) {
1014 /* FIXME: Scheduling is not available in library model, and busy wait is
1015 * also not possible as this code is running in SVC context, and it
1016 * cannot be pre-empted by interrupts. So do nothing here for now
1017 */
1018 (void) signal_mask;
1019 }
1020
1021 svc_ctx->r0 = curr_part_data->signal_mask;
1022}
1023
1024void tfm_spm_psa_eoi(uint32_t *svc_args)
1025{
1026 struct tfm_state_context_t *svc_ctx =
1027 (struct tfm_state_context_t *)svc_args;
1028 psa_signal_t irq_signal = svc_ctx->r0;
1029 uint32_t signal_mask;
1030 uint32_t running_partition_idx;
1031 uint32_t running_partition_id;
1032 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001033 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001034
1035 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1036 running_partition_id =
1037 tfm_spm_partition_get_partition_id(running_partition_idx);
1038 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1039
1040 /* Only a single signal is allowed */
1041 if (!tfm_is_one_bit_set(irq_signal)) {
1042 tfm_secure_api_error_handler();
1043 }
1044
1045 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1046
1047 if (irq_line < 0) {
1048 /* FixMe: error severity TBD */
1049 tfm_secure_api_error_handler();
1050 }
1051
1052 tfm_spm_hal_clear_pending_irq(irq_line);
1053 tfm_spm_hal_enable_irq(irq_line);
1054
1055 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1056 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1057}
Mingyang Sunda01a972019-07-12 17:32:59 +08001058
1059/*
1060 * This function is called when a secure partition causes an error.
1061 * In case of an error in the error handling, a non-zero value have to be
1062 * returned.
1063 */
1064static void tfm_spm_partition_err_handler(
1065 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001066 int32_t err_code)
1067{
Mingyang Sunda01a972019-07-12 17:32:59 +08001068 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001069
Summer Qin423dbef2019-08-22 15:59:35 +08001070 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001071 SPM_PARTITION_STATE_CLOSED);
1072}
1073
1074enum spm_err_t tfm_spm_partition_init(void)
1075{
1076 struct spm_partition_desc_t *part;
1077 struct tfm_sfn_req_s desc;
1078 int32_t args[4] = {0};
1079 int32_t fail_cnt = 0;
1080 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001081 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001082
1083 /* Call the init function for each partition */
1084 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1085 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001086 platform_data_p = part->platform_data_list;
1087 if (platform_data_p != NULL) {
1088 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001089 if (tfm_spm_hal_configure_default_isolation(idx,
1090 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1091 fail_cnt++;
1092 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001093 ++platform_data_p;
1094 }
1095 }
Summer Qin423dbef2019-08-22 15:59:35 +08001096 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001097 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1098 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001099 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001100 } else {
1101 int32_t res;
1102
1103 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001104 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001105 desc.sfn = (sfn_t)part->static_data->partition_init;
1106 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001107 res = tfm_core_sfn_request(&desc);
1108 if (res == TFM_SUCCESS) {
1109 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1110 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001111 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001112 fail_cnt++;
1113 }
1114 }
1115 }
1116
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001117 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001118
1119 if (fail_cnt == 0) {
1120 return SPM_ERR_OK;
1121 } else {
1122 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1123 }
1124}
1125
1126void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1127{
1128 struct spm_partition_runtime_data_t *runtime_data =
1129 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1130 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001131 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001132
1133 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001134
1135 runtime_data->ctx_stack_ptr +=
1136 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001137}
1138
1139void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1140{
1141 struct spm_partition_runtime_data_t *runtime_data =
1142 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1143 struct interrupted_ctx_stack_frame_t *stack_frame;
1144
Matt463ed582019-12-20 12:31:25 +08001145 runtime_data->ctx_stack_ptr -=
1146 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1147
Mingyang Sunda01a972019-07-12 17:32:59 +08001148 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1149 runtime_data->ctx_stack_ptr;
1150 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1151 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001152}
1153
1154void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1155{
1156 struct spm_partition_runtime_data_t *runtime_data =
1157 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1158 struct handler_ctx_stack_frame_t *stack_frame =
1159 (struct handler_ctx_stack_frame_t *)
1160 runtime_data->ctx_stack_ptr;
1161
1162 stack_frame->partition_state = runtime_data->partition_state;
1163 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1164
1165 runtime_data->ctx_stack_ptr +=
1166 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1167}
1168
1169void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1170{
1171 struct spm_partition_runtime_data_t *runtime_data =
1172 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1173 struct handler_ctx_stack_frame_t *stack_frame;
1174
1175 runtime_data->ctx_stack_ptr -=
1176 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1177
1178 stack_frame = (struct handler_ctx_stack_frame_t *)
1179 runtime_data->ctx_stack_ptr;
1180
1181 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1182 stack_frame->partition_state = 0;
1183 tfm_spm_partition_set_caller_partition_idx(
1184 partition_idx, stack_frame->caller_partition_idx);
1185 stack_frame->caller_partition_idx = 0;
1186}
1187
Mingyang Sunda01a972019-07-12 17:32:59 +08001188void tfm_spm_partition_store_context(uint32_t partition_idx,
1189 uint32_t stack_ptr, uint32_t lr)
1190{
1191 g_spm_partition_db.partitions[partition_idx].
1192 runtime_data.stack_ptr = stack_ptr;
1193 g_spm_partition_db.partitions[partition_idx].
1194 runtime_data.lr = lr;
1195}
1196
1197const struct spm_partition_runtime_data_t *
1198 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1199{
1200 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1201}
1202
1203void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1204{
1205 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1206 state;
1207 if (state == SPM_PARTITION_STATE_RUNNING ||
1208 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1209 g_spm_partition_db.running_partition_idx = partition_idx;
1210 }
1211}
1212
1213void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1214 uint32_t caller_partition_idx)
1215{
1216 g_spm_partition_db.partitions[partition_idx].runtime_data.
1217 caller_partition_idx = caller_partition_idx;
1218}
1219
1220void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1221 uint32_t signal_mask)
1222{
1223 g_spm_partition_db.partitions[partition_idx].runtime_data.
1224 signal_mask = signal_mask;
1225}
1226
1227void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1228 int32_t caller_client_id)
1229{
1230 g_spm_partition_db.partitions[partition_idx].runtime_data.
1231 caller_client_id = caller_client_id;
1232}
1233
Mingyang Sunda01a972019-07-12 17:32:59 +08001234enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1235 const int32_t *args)
1236{
1237 struct spm_partition_runtime_data_t *runtime_data =
1238 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1239 size_t i;
1240
1241 if ((args[1] < 0) || (args[3] < 0)) {
1242 return SPM_ERR_INVALID_PARAMETER;
1243 }
1244
1245 runtime_data->iovec_args.in_len = (size_t)args[1];
1246 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1247 runtime_data->iovec_args.in_vec[i].base =
1248 ((psa_invec *)args[0])[i].base;
1249 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1250 }
1251 runtime_data->iovec_args.out_len = (size_t)args[3];
1252 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1253 runtime_data->iovec_args.out_vec[i].base =
1254 ((psa_outvec *)args[2])[i].base;
1255 runtime_data->iovec_args.out_vec[i].len =
1256 ((psa_outvec *)args[2])[i].len;
1257 }
1258 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001259
1260 return SPM_ERR_OK;
1261}
1262
1263uint32_t tfm_spm_partition_get_running_partition_idx(void)
1264{
1265 return g_spm_partition_db.running_partition_idx;
1266}
1267
1268void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1269{
1270 struct spm_partition_desc_t *partition =
1271 &(g_spm_partition_db.partitions[partition_idx]);
1272 int32_t i;
1273
1274 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001275 partition->runtime_data.iovec_args.in_len = 0;
1276 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1277 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1278 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1279 }
1280 partition->runtime_data.iovec_args.out_len = 0;
1281 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1282 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1283 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1284 }
1285 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001286}