blob: c42d1952eb0380543caed7d82369ab2dfe1ec525 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080020#include "spm_func.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080021#include "spm_db.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080022#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080024#include "spm_partition_defs.h"
25#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080026#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080027#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080028
29#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
30#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
31
32#ifndef TFM_LVL
33#error TFM_LVL is not defined!
34#endif
35
36REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
37REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
38
39/*
40 * This is the "Big Lock" on the secure side, to guarantee single entry
41 * to SPE
42 */
Summer Qin5fdcf632020-06-22 16:49:24 +080043static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +080044static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080045
Mingyang Sunabb1aab2020-02-18 13:49:08 +080046static uint32_t *prepare_partition_iovec_ctx(
47 const struct tfm_state_context_t *svc_ctx,
48 const struct tfm_sfn_req_s *desc_ptr,
49 const struct iovec_args_t *iovec_args,
50 uint32_t *dst)
51{
52 /* XPSR = as was when called, but make sure it's thread mode */
53 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
54 /* ReturnAddress = resume veneer in new context */
55 *(--dst) = svc_ctx->ra;
56 /* LR = sfn address */
57 *(--dst) = (uint32_t)desc_ptr->sfn;
58 /* R12 = don't care */
59 *(--dst) = 0U;
60
61 /* R0-R3 = sfn arguments */
62 *(--dst) = iovec_args->out_len;
63 *(--dst) = (uint32_t)iovec_args->out_vec;
64 *(--dst) = iovec_args->in_len;
65 *(--dst) = (uint32_t)iovec_args->in_vec;
66
67 return dst;
68}
69
70/**
71 * \brief Create a stack frame that sets the execution environment to thread
72 * mode on exception return.
73 *
74 * \param[in] svc_ctx The stacked SVC context
75 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
76 * \param[in] dst A pointer where the context is to be created. (the
77 * pointer is considered to be a stack pointer, and
78 * the frame is created below it)
79 *
80 * \return A pointer pointing at the created stack frame.
81 */
82static int32_t *prepare_partition_irq_ctx(
83 const struct tfm_state_context_t *svc_ctx,
84 sfn_t unpriv_handler,
85 int32_t *dst)
86{
87 int i;
88
89 /* XPSR = as was when called, but make sure it's thread mode */
90 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
91 /* ReturnAddress = resume to the privileged handler code, but execute it
92 * unprivileged.
93 */
94 *(--dst) = svc_ctx->ra;
95 /* LR = start address */
96 *(--dst) = (int32_t)unpriv_handler;
97
98 /* R12, R0-R3 unused arguments */
99 for (i = 0; i < 5; ++i) {
100 *(--dst) = 0;
101 }
102
103 return dst;
104}
105
106static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
107 struct tfm_state_context_t *target_ctx)
108{
109 /* ReturnAddress = resume veneer after second SVC */
110 target_ctx->ra = svc_ctx->ra;
111
112 /* R0 = function return value */
113 target_ctx->r0 = svc_ctx->r0;
114
115 return;
116}
117
118/**
119 * \brief Check whether the iovec parameters are valid, and the memory ranges
120 * are in the possession of the calling partition.
121 *
122 * \param[in] desc_ptr The secure function request descriptor
123 *
124 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
125 * otherwise as in /ref tfm_status_e
126 */
127static enum tfm_status_e tfm_core_check_sfn_parameters(
128 const struct tfm_sfn_req_s *desc_ptr)
129{
130 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
131 size_t in_len;
132 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
133 size_t out_len;
134 uint32_t i;
135
136 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
137 return TFM_ERROR_INVALID_PARAMETER;
138 }
139
140 in_len = (size_t)(desc_ptr->args[1]);
141 out_len = (size_t)(desc_ptr->args[3]);
142
143 /* The number of vectors are within range. Extra checks to avoid overflow */
144 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
145 (in_len + out_len > PSA_MAX_IOVEC)) {
146 return TFM_ERROR_INVALID_PARAMETER;
147 }
148
149 /* Check whether the caller partition has at write access to the iovec
150 * structures themselves. Use the TT instruction for this.
151 */
152 if (in_len > 0) {
153 if ((in_vec == NULL) ||
154 (tfm_core_has_write_access_to_region(in_vec,
155 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
156 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
157 return TFM_ERROR_INVALID_PARAMETER;
158 }
159 } else {
160 if (in_vec != NULL) {
161 return TFM_ERROR_INVALID_PARAMETER;
162 }
163 }
164 if (out_len > 0) {
165 if ((out_vec == NULL) ||
166 (tfm_core_has_write_access_to_region(out_vec,
167 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
168 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
169 return TFM_ERROR_INVALID_PARAMETER;
170 }
171 } else {
172 if (out_vec != NULL) {
173 return TFM_ERROR_INVALID_PARAMETER;
174 }
175 }
176
177 /* Check whether the caller partition has access to the data inside the
178 * iovecs
179 */
180 for (i = 0; i < in_len; ++i) {
181 if (in_vec[i].len > 0) {
182 if ((in_vec[i].base == NULL) ||
183 (tfm_core_has_read_access_to_region(in_vec[i].base,
184 in_vec[i].len, desc_ptr->ns_caller,
185 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
186 return TFM_ERROR_INVALID_PARAMETER;
187 }
188 }
189 }
190 for (i = 0; i < out_len; ++i) {
191 if (out_vec[i].len > 0) {
192 if ((out_vec[i].base == NULL) ||
193 (tfm_core_has_write_access_to_region(out_vec[i].base,
194 out_vec[i].len, desc_ptr->ns_caller,
195 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
196 return TFM_ERROR_INVALID_PARAMETER;
197 }
198 }
199 }
200
201 return TFM_SUCCESS;
202}
203
204static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
205 const struct iovec_args_t *source)
206{
207 size_t i;
208
209 /* The vectors have been sanity checked already, and since then the
210 * interrupts have been kept disabled. So we can be sure that the
211 * vectors haven't been tampered with since the check. So it is safe to pass
212 * it to the called partition.
213 */
214
215 target->in_len = source->in_len;
216 for (i = 0; i < source->in_len; ++i) {
217 target->in_vec[i].base = source->in_vec[i].base;
218 target->in_vec[i].len = source->in_vec[i].len;
219 }
220 target->out_len = source->out_len;
221 for (i = 0; i < source->out_len; ++i) {
222 target->out_vec[i].base = source->out_vec[i].base;
223 target->out_vec[i].len = source->out_vec[i].len;
224 }
225}
226
227static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
228{
229 int i;
230
231 args->in_len = 0;
232 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
233 args->in_vec[i].base = NULL;
234 args->in_vec[i].len = 0;
235 }
236 args->out_len = 0;
237 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
238 args->out_vec[i].base = NULL;
239 args->out_vec[i].len = 0;
240 }
241}
242
243/**
244 * \brief Check whether the partitions for the secure function call are in a
245 * proper state.
246 *
247 * \param[in] curr_partition_state State of the partition to be called
248 * \param[in] caller_partition_state State of the caller partition
249 *
250 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
251 */
252static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
253 uint32_t caller_partition_state)
254{
255 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
256 /* Calling partition from non-running state (e.g. during handling IRQ)
257 * is not allowed.
258 */
259 return TFM_ERROR_INVALID_EXC_MODE;
260 }
261
262 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
263 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
264 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
265 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
266 /* Active partitions cannot be called! */
267 return TFM_ERROR_PARTITION_NON_REENTRANT;
268 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
269 /* The partition to be called is not in a proper state */
270 return TFM_SECURE_LOCK_FAILED;
271 }
272 return TFM_SUCCESS;
273}
274
275/**
276 * \brief Check whether the partitions for the secure function call of irq are
277 * in a proper state.
278 *
279 * \param[in] called_partition_state State of the partition to be called
280 *
281 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
282 */
283static enum tfm_status_e check_irq_partition_state(
284 uint32_t called_partition_state)
285{
286 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
287 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
288 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
289 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
290 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
291 return TFM_SUCCESS;
292 }
293 return TFM_SECURE_LOCK_FAILED;
294}
295
296/**
297 * \brief Calculate the address where the iovec parameters are to be saved for
298 * the called partition.
299 *
300 * \param[in] partition_idx The index of the partition to be called.
301 *
302 * \return The address where the iovec parameters should be saved.
303 */
304static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
305{
306 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100307 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800308}
309
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800310/**
311 * \brief Returns the index of the partition with the given partition ID.
312 *
313 * \param[in] partition_id Partition id
314 *
315 * \return the partition idx if partition_id is valid,
316 * \ref SPM_INVALID_PARTITION_IDX othervise
317 */
318static uint32_t get_partition_idx(uint32_t partition_id)
319{
320 uint32_t i;
321
322 if (partition_id == INVALID_PARTITION_ID) {
323 return SPM_INVALID_PARTITION_IDX;
324 }
325
326 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
327 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
328 partition_id) {
329 return i;
330 }
331 }
332 return SPM_INVALID_PARTITION_IDX;
333}
334
335/**
336 * \brief Get the flags associated with a partition
337 *
338 * \param[in] partition_idx Partition index
339 *
340 * \return Flags associated with the partition
341 *
342 * \note This function doesn't check if partition_idx is valid.
343 */
344static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
345{
346 return g_spm_partition_db.partitions[partition_idx].static_data->
347 partition_flags;
348}
349
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800350static enum tfm_status_e tfm_start_partition(
351 const struct tfm_sfn_req_s *desc_ptr,
352 uint32_t excReturn)
353{
354 enum tfm_status_e res;
355 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
356 const struct spm_partition_runtime_data_t *curr_part_data;
357 const struct spm_partition_runtime_data_t *caller_part_data;
358 uint32_t caller_flags;
359 register uint32_t partition_idx;
360 uint32_t psp;
361 uint32_t partition_psp, partition_psplim;
362 uint32_t partition_state;
363 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800364 struct tfm_state_context_t *svc_ctx;
365 uint32_t caller_partition_id;
366 int32_t client_id;
367 struct iovec_args_t *iovec_args;
368
369 psp = __get_PSP();
370 svc_ctx = (struct tfm_state_context_t *)psp;
371 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
372
373 /* Check partition state consistency */
374 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
375 != (!desc_ptr->ns_caller)) {
376 /* Partition state inconsistency detected */
377 return TFM_SECURE_LOCK_FAILED;
378 }
379
380 partition_idx = get_partition_idx(desc_ptr->sp_id);
381
382 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
383 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
384 partition_state = curr_part_data->partition_state;
385 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800386 caller_partition_id = tfm_spm_partition_get_partition_id(
387 caller_partition_idx);
388
389 if (!tfm_secure_api_initializing) {
390 res = check_partition_state(partition_state, caller_partition_state);
391 if (res != TFM_SUCCESS) {
392 return res;
393 }
394 }
395
396 /* Prepare switch to shared secure partition stack */
397 /* In case the call is coming from the non-secure world, we save the iovecs
398 * on the stop of the stack. So the memory area, that can actually be used
399 * as stack by the partitions starts at a lower address
400 */
401 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100402 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800403 partition_psplim =
404 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
405
406 /* Store the context for the partition call */
407 tfm_spm_partition_set_caller_partition_idx(partition_idx,
408 caller_partition_idx);
409 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
410
411 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
412 tfm_spm_partition_set_caller_client_id(partition_idx,
413 caller_partition_id);
414 } else {
415 client_id = tfm_nspm_get_current_client_id();
416 if (client_id >= 0) {
417 return TFM_SECURE_LOCK_FAILED;
418 }
419 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
420 }
421
422 /* In level one, only switch context and return from exception if in
423 * handler mode
424 */
425 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
426 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
427 SPM_ERR_OK) {
428 return TFM_ERROR_GENERIC;
429 }
430 iovec_args = get_iovec_args_stack_address(partition_idx);
431 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
432
433 /* Prepare the partition context, update stack ptr */
434 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
435 iovec_args,
436 (uint32_t *)partition_psp);
437 __set_PSP(psp);
438 tfm_arch_set_psplim(partition_psplim);
439 }
440
441 tfm_spm_partition_set_state(caller_partition_idx,
442 SPM_PARTITION_STATE_BLOCKED);
443 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
444 tfm_secure_lock++;
445
446 return TFM_SUCCESS;
447}
448
449static enum tfm_status_e tfm_start_partition_for_irq_handling(
450 uint32_t excReturn,
451 struct tfm_state_context_t *svc_ctx)
452{
453 uint32_t handler_partition_id = svc_ctx->r0;
454 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
455 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100456 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800457 enum tfm_status_e res;
458 uint32_t psp = __get_PSP();
459 uint32_t handler_partition_psp;
460 uint32_t handler_partition_state;
461 uint32_t interrupted_partition_idx =
462 tfm_spm_partition_get_running_partition_idx();
463 const struct spm_partition_runtime_data_t *handler_part_data;
464 uint32_t handler_partition_idx;
465
466 handler_partition_idx = get_partition_idx(handler_partition_id);
467 handler_part_data = tfm_spm_partition_get_runtime_data(
468 handler_partition_idx);
469 handler_partition_state = handler_part_data->partition_state;
470
471 res = check_irq_partition_state(handler_partition_state);
472 if (res != TFM_SUCCESS) {
473 return res;
474 }
475
476 /* set mask for the partition */
477 tfm_spm_partition_set_signal_mask(
478 handler_partition_idx,
479 handler_part_data->signal_mask | irq_signal);
480
481 tfm_spm_hal_disable_irq(irq_line);
482
483 /* save the current context of the interrupted partition */
484 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
485
486 handler_partition_psp = psp;
487
488 /* save the current context of the handler partition */
489 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
490
491 /* Store caller for the partition */
492 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
493 interrupted_partition_idx);
494
495 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
496 (int32_t *)handler_partition_psp);
497 __set_PSP(psp);
498
499 tfm_spm_partition_set_state(interrupted_partition_idx,
500 SPM_PARTITION_STATE_SUSPENDED);
501 tfm_spm_partition_set_state(handler_partition_idx,
502 SPM_PARTITION_STATE_HANDLING_IRQ);
503
504 return TFM_SUCCESS;
505}
506
507static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
508{
509 uint32_t current_partition_idx =
510 tfm_spm_partition_get_running_partition_idx();
511 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800512 uint32_t return_partition_idx;
513 uint32_t return_partition_flags;
514 uint32_t psp = __get_PSP();
515 size_t i;
516 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
517 struct iovec_args_t *iovec_args;
518
519 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
520 return TFM_SECURE_UNLOCK_FAILED;
521 }
522
523 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
524 return_partition_idx = curr_part_data->caller_partition_idx;
525
526 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
527 return TFM_SECURE_UNLOCK_FAILED;
528 }
529
530 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
531
532 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800533
534 tfm_secure_lock--;
535
536 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
537 (tfm_secure_api_initializing)) {
538 /* In TFM level 1 context restore is only done when
539 * returning to NS or after initialization
540 */
541 /* Restore caller context */
542 restore_caller_ctx(svc_ctx,
543 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
544 *excReturn = ret_part_data->lr;
545 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100546 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800547 uint32_t psp_stack_bottom =
548 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
549 tfm_arch_set_psplim(psp_stack_bottom);
550
TTornblom99f0be22019-12-17 16:22:38 +0100551 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800552
553 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
554 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
555 }
556 tfm_clear_iovec_parameters(iovec_args);
557 }
558
559 tfm_spm_partition_cleanup_context(current_partition_idx);
560
561 tfm_spm_partition_set_state(current_partition_idx,
562 SPM_PARTITION_STATE_IDLE);
563 tfm_spm_partition_set_state(return_partition_idx,
564 SPM_PARTITION_STATE_RUNNING);
565
566 return TFM_SUCCESS;
567}
568
569static enum tfm_status_e tfm_return_from_partition_irq_handling(
570 uint32_t *excReturn)
571{
572 uint32_t handler_partition_idx =
573 tfm_spm_partition_get_running_partition_idx();
574 const struct spm_partition_runtime_data_t *handler_part_data;
575 uint32_t interrupted_partition_idx;
576 uint32_t psp = __get_PSP();
577 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
578
579 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
580 return TFM_SECURE_UNLOCK_FAILED;
581 }
582
583 handler_part_data = tfm_spm_partition_get_runtime_data(
584 handler_partition_idx);
585 interrupted_partition_idx = handler_part_data->caller_partition_idx;
586
587 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
588 return TFM_SECURE_UNLOCK_FAILED;
589 }
590
591 /* For level 1, modify PSP, so that the SVC stack frame disappears,
592 * and return to the privileged handler using the stack frame still on the
593 * MSP stack.
594 */
595 *excReturn = svc_ctx->ra;
596 psp += sizeof(struct tfm_state_context_t);
597
598 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
599 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
600
601 __set_PSP(psp);
602
603 return TFM_SUCCESS;
604}
605
606static enum tfm_status_e tfm_check_sfn_req_integrity(
607 const struct tfm_sfn_req_s *desc_ptr)
608{
609 if ((desc_ptr == NULL) ||
610 (desc_ptr->sp_id == 0) ||
611 (desc_ptr->sfn == NULL)) {
612 /* invalid parameter */
613 return TFM_ERROR_INVALID_PARAMETER;
614 }
615 return TFM_SUCCESS;
616}
617
618static enum tfm_status_e tfm_core_check_sfn_req_rules(
619 const struct tfm_sfn_req_s *desc_ptr)
620{
621 /* Check partition idx validity */
622 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
623 return TFM_ERROR_NO_ACTIVE_PARTITION;
624 }
625
626 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
627 /* Secure domain is already locked!
628 * This should only happen if caller is secure partition!
629 */
630 /* This scenario is a potential security breach.
631 * Error is handled in caller.
632 */
633 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
634 }
635
636 if (tfm_secure_api_initializing) {
637 int32_t id =
638 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
639
640 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
641 /* Invalid request during system initialization */
642 ERROR_MSG("Invalid service request during initialization!");
643 return TFM_ERROR_NOT_INITIALIZED;
644 }
645 }
646
647 return TFM_SUCCESS;
648}
649
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800650uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
651{
652 return g_spm_partition_db.partitions[partition_idx].static_data->
653 partition_id;
654}
655
656uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
657{
658 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
659 return TFM_PARTITION_PRIVILEGED_MODE;
660 } else {
661 return TFM_PARTITION_UNPRIVILEGED_MODE;
662 }
663}
664
665bool tfm_is_partition_privileged(uint32_t partition_idx)
666{
667 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
668
669 return tfm_spm_partition_get_privileged_mode(flags) ==
670 TFM_PARTITION_PRIVILEGED_MODE;
671}
672
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800673void tfm_spm_secure_api_init_done(void)
674{
675 tfm_secure_api_initializing = 0;
676}
677
678enum tfm_status_e tfm_spm_sfn_request_handler(
679 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
680{
681 enum tfm_status_e res;
682
683 res = tfm_check_sfn_req_integrity(desc_ptr);
684 if (res != TFM_SUCCESS) {
685 ERROR_MSG("Invalid service request!");
686 tfm_secure_api_error_handler();
687 }
688
689 __disable_irq();
690
691 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
692
693 res = tfm_core_check_sfn_parameters(desc_ptr);
694 if (res != TFM_SUCCESS) {
695 /* The sanity check of iovecs failed. */
696 __enable_irq();
697 tfm_secure_api_error_handler();
698 }
699
700 res = tfm_core_check_sfn_req_rules(desc_ptr);
701 if (res != TFM_SUCCESS) {
702 /* FixMe: error compartmentalization TBD */
703 tfm_spm_partition_set_state(
704 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
705 __enable_irq();
706 ERROR_MSG("Unauthorized service request!");
707 tfm_secure_api_error_handler();
708 }
709
710 res = tfm_start_partition(desc_ptr, excReturn);
711 if (res != TFM_SUCCESS) {
712 /* FixMe: consider possible fault scenarios */
713 __enable_irq();
714 ERROR_MSG("Failed to process service request!");
715 tfm_secure_api_error_handler();
716 }
717
718 __enable_irq();
719
720 return res;
721}
722
723int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
724{
725 enum tfm_status_e res;
726 int32_t *args;
727 int32_t retVal;
728
729 res = tfm_core_check_sfn_parameters(desc_ptr);
730 if (res != TFM_SUCCESS) {
731 /* The sanity check of iovecs failed. */
732 return (int32_t)res;
733 }
734
735 /* No excReturn value is needed as no exception handling is used */
736 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
737
738 if (res != TFM_SUCCESS) {
739 tfm_secure_api_error_handler();
740 }
741
742 /* Secure partition to secure partition call in TFM level 1 */
743 args = desc_ptr->args;
744 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
745
746 /* return handler should restore original exc_return value... */
747 res = tfm_return_from_partition(NULL);
748 if (res == TFM_SUCCESS) {
749 /* If unlock successful, pass SS return value to caller */
750 return retVal;
751 } else {
752 /* Unlock errors indicate ctx database corruption or unknown
753 * anomalies. Halt execution
754 */
755 ERROR_MSG("Secure API error during unlock!");
756 tfm_secure_api_error_handler();
757 }
758 return (int32_t)res;
759}
760
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800761int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
762 void *start_addr,
763 size_t len,
764 uint32_t alignment)
765{
766 uintptr_t start_addr_value = (uintptr_t)start_addr;
767 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
768 uintptr_t alignment_mask;
769
770 alignment_mask = (((uintptr_t)1) << alignment) - 1;
771
772 /* Check that the pointer is aligned properly */
773 if (start_addr_value & alignment_mask) {
774 /* not aligned, return error */
775 return 0;
776 }
777
778 /* Protect against overflow (and zero len) */
779 if (end_addr_value <= start_addr_value) {
780 return 0;
781 }
782
783 /* For privileged partition execution, all secure data memory and stack
784 * is accessible
785 */
786 if (start_addr_value >= S_DATA_START &&
787 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
788 return 1;
789 }
790
791 return 0;
792}
793
794void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
795{
796 uintptr_t result_ptr_value = svc_args[0];
797 uint32_t running_partition_idx =
798 tfm_spm_partition_get_running_partition_idx();
799 const uint32_t running_partition_flags =
800 tfm_spm_partition_get_flags(running_partition_idx);
801 const struct spm_partition_runtime_data_t *curr_part_data =
802 tfm_spm_partition_get_runtime_data(running_partition_idx);
803 int res = 0;
804
805 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
806 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
807 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
808 /* This handler shouldn't be called from outside partition context.
809 * Also if the current partition is handling IRQ, the caller partition
810 * index might not be valid;
811 * Partitions are only allowed to run while S domain is locked.
812 */
813 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
814 return;
815 }
816
817 /* Make sure that the output pointer points to a memory area that is owned
818 * by the partition
819 */
820 res = tfm_spm_check_buffer_access(running_partition_idx,
821 (void *)result_ptr_value,
822 sizeof(curr_part_data->caller_client_id),
823 2);
824 if (!res) {
825 /* Not in accessible range, return error */
826 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
827 return;
828 }
829
830 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
831
832 /* Store return value in r0 */
833 svc_args[0] = (uint32_t)TFM_SUCCESS;
834}
835
836/* This SVC handler is called if veneer is running in thread mode */
837uint32_t tfm_spm_partition_request_svc_handler(
838 const uint32_t *svc_ctx, uint32_t excReturn)
839{
840 struct tfm_sfn_req_s *desc_ptr;
841
842 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
843 /* Service request SVC called with MSP active.
844 * Either invalid configuration for Thread mode or SVC called
845 * from Handler mode, which is not supported.
846 * FixMe: error severity TBD
847 */
848 ERROR_MSG("Service request SVC called with MSP active!");
849 tfm_secure_api_error_handler();
850 }
851
852 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
853
854 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
855 tfm_secure_api_error_handler();
856 }
857
858 return EXC_RETURN_SECURE_FUNCTION;
859}
860
861/* This SVC handler is called, if a thread mode execution environment is to
862 * be set up, to run an unprivileged IRQ handler
863 */
864uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
865{
866 struct tfm_state_context_t *svc_ctx =
867 (struct tfm_state_context_t *)svc_args;
868
869 enum tfm_status_e res;
870
871 if (excReturn & EXC_RETURN_STACK_PROCESS) {
872 /* FixMe: error severity TBD */
873 ERROR_MSG("Partition request SVC called with PSP active!");
874 tfm_secure_api_error_handler();
875 }
876
877 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
878 if (res != TFM_SUCCESS) {
879 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
880 * its code can be run
881 */
882 /* FixMe: For now this case is handled with TF-M panic, however it would
883 * be possible to skip the execution of the interrupt handler, and
884 * resume the execution of the interrupted code.
885 */
886 tfm_secure_api_error_handler();
887 }
888 return EXC_RETURN_SECURE_FUNCTION;
889}
890
891/* This SVC handler is called when sfn returns */
892uint32_t tfm_spm_partition_return_handler(uint32_t lr)
893{
894 enum tfm_status_e res;
895
896 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
897 /* Partition return SVC called with MSP active.
898 * This should not happen!
899 */
900 ERROR_MSG("Partition return SVC called with MSP active!");
901 tfm_secure_api_error_handler();
902 }
903
904 res = tfm_return_from_partition(&lr);
905 if (res != TFM_SUCCESS) {
906 /* Unlock errors indicate ctx database corruption or unknown anomalies
907 * Halt execution
908 */
909 ERROR_MSG("Secure API error during unlock!");
910 tfm_secure_api_error_handler();
911 }
912
913 return lr;
914}
915
916/* This SVC handler is called if a deprivileged IRQ handler was executed, and
917 * the execution environment is to be set back for the privileged handler mode
918 */
919uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
920{
921 enum tfm_status_e res;
922 struct tfm_state_context_t *irq_svc_ctx =
923 (struct tfm_state_context_t *)irq_svc_args;
924
925 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
926 /* Partition request SVC called with MSP active.
927 * FixMe: error severity TBD
928 */
929 ERROR_MSG("Partition request SVC called with MSP active!");
930 tfm_secure_api_error_handler();
931 }
932
933 res = tfm_return_from_partition_irq_handling(&lr);
934 if (res != TFM_SUCCESS) {
935 /* Unlock errors indicate ctx database corruption or unknown anomalies
936 * Halt execution
937 */
938 ERROR_MSG("Secure API error during unlock!");
939 tfm_secure_api_error_handler();
940 }
941
942 irq_svc_ctx->ra = lr;
943
944 return EXC_RETURN_SECURE_HANDLER;
945}
946
947/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
948/**
949 * \brief Return the IRQ line number associated with a signal
950 *
951 * \param[in] partition_id The ID of the partition in which we look for the
952 * signal
953 * \param[in] signal The signal we do the query for
954 *
955 * \retval >=0 The IRQ line number associated with a signal in the partition
956 * \retval <0 error
957 */
TTornblomfaf74f52020-03-04 17:56:27 +0100958static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800959 psa_signal_t signal)
960{
961 size_t i;
962
963 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
964 if (tfm_core_irq_signals[i].partition_id == partition_id &&
965 tfm_core_irq_signals[i].signal_value == signal) {
966 return tfm_core_irq_signals[i].irq_line;
967 }
968 }
TTornblomfaf74f52020-03-04 17:56:27 +0100969 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800970}
971
972void tfm_spm_enable_irq_handler(uint32_t *svc_args)
973{
974 struct tfm_state_context_t *svc_ctx =
975 (struct tfm_state_context_t *)svc_args;
976 psa_signal_t irq_signal = svc_ctx->r0;
977 uint32_t running_partition_idx =
978 tfm_spm_partition_get_running_partition_idx();
979 uint32_t running_partition_id =
980 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100981 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800982
983 /* Only a single signal is allowed */
984 if (!tfm_is_one_bit_set(irq_signal)) {
985 /* FixMe: error severity TBD */
986 tfm_secure_api_error_handler();
987 }
988
989 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
990
991 if (irq_line < 0) {
992 /* FixMe: error severity TBD */
993 tfm_secure_api_error_handler();
994 }
995
996 tfm_spm_hal_enable_irq(irq_line);
997}
998
999void tfm_spm_disable_irq_handler(uint32_t *svc_args)
1000{
1001 struct tfm_state_context_t *svc_ctx =
1002 (struct tfm_state_context_t *)svc_args;
1003 psa_signal_t irq_signal = svc_ctx->r0;
1004 uint32_t running_partition_idx =
1005 tfm_spm_partition_get_running_partition_idx();
1006 uint32_t running_partition_id =
1007 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +01001008 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001009
1010 /* Only a single signal is allowed */
1011 if (!tfm_is_one_bit_set(irq_signal)) {
1012 /* FixMe: error severity TBD */
1013 tfm_secure_api_error_handler();
1014 }
1015
1016 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1017
1018 if (irq_line < 0) {
1019 /* FixMe: error severity TBD */
1020 tfm_secure_api_error_handler();
1021 }
1022
1023 tfm_spm_hal_disable_irq(irq_line);
1024}
1025
1026void tfm_spm_psa_wait(uint32_t *svc_args)
1027{
1028 /* Look for partition that is ready for run */
1029 struct tfm_state_context_t *svc_ctx =
1030 (struct tfm_state_context_t *)svc_args;
1031 uint32_t running_partition_idx;
1032 const struct spm_partition_runtime_data_t *curr_part_data;
1033
1034 psa_signal_t signal_mask = svc_ctx->r0;
1035 uint32_t timeout = svc_ctx->r1;
1036
1037 /*
1038 * Timeout[30:0] are reserved for future use.
1039 * SPM must ignore the value of RES.
1040 */
1041 timeout &= PSA_TIMEOUT_MASK;
1042
1043 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1044 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1045
1046 if (timeout == PSA_BLOCK) {
1047 /* FIXME: Scheduling is not available in library model, and busy wait is
1048 * also not possible as this code is running in SVC context, and it
1049 * cannot be pre-empted by interrupts. So do nothing here for now
1050 */
1051 (void) signal_mask;
1052 }
1053
1054 svc_ctx->r0 = curr_part_data->signal_mask;
1055}
1056
1057void tfm_spm_psa_eoi(uint32_t *svc_args)
1058{
1059 struct tfm_state_context_t *svc_ctx =
1060 (struct tfm_state_context_t *)svc_args;
1061 psa_signal_t irq_signal = svc_ctx->r0;
1062 uint32_t signal_mask;
1063 uint32_t running_partition_idx;
1064 uint32_t running_partition_id;
1065 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001066 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001067
1068 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1069 running_partition_id =
1070 tfm_spm_partition_get_partition_id(running_partition_idx);
1071 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1072
1073 /* Only a single signal is allowed */
1074 if (!tfm_is_one_bit_set(irq_signal)) {
1075 tfm_secure_api_error_handler();
1076 }
1077
1078 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1079
1080 if (irq_line < 0) {
1081 /* FixMe: error severity TBD */
1082 tfm_secure_api_error_handler();
1083 }
1084
1085 tfm_spm_hal_clear_pending_irq(irq_line);
1086 tfm_spm_hal_enable_irq(irq_line);
1087
1088 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1089 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1090}
Mingyang Sunda01a972019-07-12 17:32:59 +08001091
1092/*
1093 * This function is called when a secure partition causes an error.
1094 * In case of an error in the error handling, a non-zero value have to be
1095 * returned.
1096 */
1097static void tfm_spm_partition_err_handler(
1098 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001099 int32_t err_code)
1100{
Mingyang Sunda01a972019-07-12 17:32:59 +08001101 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001102
Summer Qin423dbef2019-08-22 15:59:35 +08001103 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001104 SPM_PARTITION_STATE_CLOSED);
1105}
1106
1107enum spm_err_t tfm_spm_partition_init(void)
1108{
1109 struct spm_partition_desc_t *part;
1110 struct tfm_sfn_req_s desc;
1111 int32_t args[4] = {0};
1112 int32_t fail_cnt = 0;
1113 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001114 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001115
1116 /* Call the init function for each partition */
1117 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1118 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001119 platform_data_p = part->platform_data_list;
1120 if (platform_data_p != NULL) {
1121 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001122 if (tfm_spm_hal_configure_default_isolation(idx,
1123 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1124 fail_cnt++;
1125 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001126 ++platform_data_p;
1127 }
1128 }
Summer Qin423dbef2019-08-22 15:59:35 +08001129 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001130 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1131 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001132 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001133 } else {
1134 int32_t res;
1135
1136 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001137 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001138 desc.sfn = (sfn_t)part->static_data->partition_init;
1139 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001140 res = tfm_core_sfn_request(&desc);
1141 if (res == TFM_SUCCESS) {
1142 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1143 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001144 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001145 fail_cnt++;
1146 }
1147 }
1148 }
1149
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001150 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001151
1152 if (fail_cnt == 0) {
1153 return SPM_ERR_OK;
1154 } else {
1155 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1156 }
1157}
1158
1159void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1160{
1161 struct spm_partition_runtime_data_t *runtime_data =
1162 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1163 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001164 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001165
1166 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001167
1168 runtime_data->ctx_stack_ptr +=
1169 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001170}
1171
1172void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1173{
1174 struct spm_partition_runtime_data_t *runtime_data =
1175 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1176 struct interrupted_ctx_stack_frame_t *stack_frame;
1177
Matt463ed582019-12-20 12:31:25 +08001178 runtime_data->ctx_stack_ptr -=
1179 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1180
Mingyang Sunda01a972019-07-12 17:32:59 +08001181 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1182 runtime_data->ctx_stack_ptr;
1183 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1184 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001185}
1186
1187void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1188{
1189 struct spm_partition_runtime_data_t *runtime_data =
1190 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1191 struct handler_ctx_stack_frame_t *stack_frame =
1192 (struct handler_ctx_stack_frame_t *)
1193 runtime_data->ctx_stack_ptr;
1194
1195 stack_frame->partition_state = runtime_data->partition_state;
1196 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1197
1198 runtime_data->ctx_stack_ptr +=
1199 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1200}
1201
1202void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1203{
1204 struct spm_partition_runtime_data_t *runtime_data =
1205 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1206 struct handler_ctx_stack_frame_t *stack_frame;
1207
1208 runtime_data->ctx_stack_ptr -=
1209 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1210
1211 stack_frame = (struct handler_ctx_stack_frame_t *)
1212 runtime_data->ctx_stack_ptr;
1213
1214 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1215 stack_frame->partition_state = 0;
1216 tfm_spm_partition_set_caller_partition_idx(
1217 partition_idx, stack_frame->caller_partition_idx);
1218 stack_frame->caller_partition_idx = 0;
1219}
1220
Mingyang Sunda01a972019-07-12 17:32:59 +08001221void tfm_spm_partition_store_context(uint32_t partition_idx,
1222 uint32_t stack_ptr, uint32_t lr)
1223{
1224 g_spm_partition_db.partitions[partition_idx].
1225 runtime_data.stack_ptr = stack_ptr;
1226 g_spm_partition_db.partitions[partition_idx].
1227 runtime_data.lr = lr;
1228}
1229
1230const struct spm_partition_runtime_data_t *
1231 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1232{
1233 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1234}
1235
1236void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1237{
1238 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1239 state;
1240 if (state == SPM_PARTITION_STATE_RUNNING ||
1241 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1242 g_spm_partition_db.running_partition_idx = partition_idx;
1243 }
1244}
1245
1246void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1247 uint32_t caller_partition_idx)
1248{
1249 g_spm_partition_db.partitions[partition_idx].runtime_data.
1250 caller_partition_idx = caller_partition_idx;
1251}
1252
1253void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1254 uint32_t signal_mask)
1255{
1256 g_spm_partition_db.partitions[partition_idx].runtime_data.
1257 signal_mask = signal_mask;
1258}
1259
1260void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1261 int32_t caller_client_id)
1262{
1263 g_spm_partition_db.partitions[partition_idx].runtime_data.
1264 caller_client_id = caller_client_id;
1265}
1266
Mingyang Sunda01a972019-07-12 17:32:59 +08001267enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1268 const int32_t *args)
1269{
1270 struct spm_partition_runtime_data_t *runtime_data =
1271 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1272 size_t i;
1273
1274 if ((args[1] < 0) || (args[3] < 0)) {
1275 return SPM_ERR_INVALID_PARAMETER;
1276 }
1277
1278 runtime_data->iovec_args.in_len = (size_t)args[1];
1279 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1280 runtime_data->iovec_args.in_vec[i].base =
1281 ((psa_invec *)args[0])[i].base;
1282 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1283 }
1284 runtime_data->iovec_args.out_len = (size_t)args[3];
1285 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1286 runtime_data->iovec_args.out_vec[i].base =
1287 ((psa_outvec *)args[2])[i].base;
1288 runtime_data->iovec_args.out_vec[i].len =
1289 ((psa_outvec *)args[2])[i].len;
1290 }
1291 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001292
1293 return SPM_ERR_OK;
1294}
1295
1296uint32_t tfm_spm_partition_get_running_partition_idx(void)
1297{
1298 return g_spm_partition_db.running_partition_idx;
1299}
1300
1301void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1302{
1303 struct spm_partition_desc_t *partition =
1304 &(g_spm_partition_db.partitions[partition_idx]);
1305 int32_t i;
1306
1307 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001308 partition->runtime_data.iovec_args.in_len = 0;
1309 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1310 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1311 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1312 }
1313 partition->runtime_data.iovec_args.out_len = 0;
1314 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1315 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1316 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1317 }
1318 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001319}
Summer Qin830c5542020-02-14 13:44:20 +08001320
1321void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1322{
1323 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1324 uint32_t running_partition_flags = 0;
1325 uint32_t running_partition_idx;
1326
1327 /* Check permissions on request type basis */
1328
1329 switch (svc_ctx->r0) {
1330 case TFM_SPM_REQUEST_RESET_VOTE:
1331 running_partition_idx =
1332 tfm_spm_partition_get_running_partition_idx();
1333 running_partition_flags = tfm_spm_partition_get_flags(
1334 running_partition_idx);
1335
1336 /* Currently only PSA Root of Trust services are allowed to make Reset
1337 * vote request
1338 */
1339 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1340 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1341 }
1342
1343 /* FixMe: this is a placeholder for checks to be performed before
1344 * allowing execution of reset
1345 */
1346 *res_ptr = (uint32_t)TFM_SUCCESS;
1347
1348 break;
1349 default:
1350 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1351 }
1352}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001353
1354enum spm_err_t tfm_spm_db_init(void)
1355{
1356 uint32_t i;
1357
1358 /* This function initialises partition db */
1359
1360 /* For the non secure Execution environment */
1361 tfm_nspm_configure_clients();
1362
1363 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1364 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1365 SPM_PARTITION_STATE_UNINIT;
1366 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1367 SPM_INVALID_PARTITION_IDX;
1368 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1369 TFM_INVALID_CLIENT_ID;
1370 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1371 ctx_stack_list[i];
1372 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1373 g_spm_partition_db.partitions[i].platform_data_list =
1374 platform_data_list_list[i];
1375 }
1376 g_spm_partition_db.is_init = 1;
1377
1378 return SPM_ERR_OK;
1379}