blob: 29cb959ec5556b96d8b5948d0d64cb62f7eb51ac [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Soby Mathew960521a2020-09-29 12:48:50 +010020#include "tfm_core_trustzone.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080021#include "spm_func.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080022#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080024#include "spm_partition_defs.h"
25#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080026#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080027#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080028
29#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
30#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
31
32#ifndef TFM_LVL
33#error TFM_LVL is not defined!
34#endif
35
Soby Mathew960521a2020-09-29 12:48:50 +010036#ifdef TFM_MULTI_CORE_TOPOLOGY
37#error Multi core is not supported by Function mode
38#endif
39
Mingyang Sunabb1aab2020-02-18 13:49:08 +080040REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
41REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
42
Soby Mathew960521a2020-09-29 12:48:50 +010043static uint32_t *tfm_secure_stack_seal =
44 ((uint32_t *)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1]) - 2;
45
46REGION_DECLARE_T(Image$$, ARM_LIB_STACK_SEAL, $$ZI$$Base, uint32_t);
47
48/*
49 * Function to seal the psp stacks for Function model of TF-M.
50 */
51void tfm_spm_seal_psp_stacks(void)
52{
53 /*
54 * The top of TFM_SECURE_STACK is used for iovec parameters, we need to
55 * place the seal between iovec parameters and partition stack.
56 *
57 * Image$$TFM_SECURE_STACK$$ZI$$Limit-> +-------------------------+
58 * | |
59 * | iovec parameters for |
60 * | partition |
61 * (Image$$TFM_SECURE_STACK$$ZI$$Limit -| |
62 * sizeof(iovec_args_t)) -> +-------------------------+
63 * | Stack Seal |
64 * +-------------------------+
65 * | |
66 * | Partition stack |
67 * | |
68 * Image$$TFM_SECURE_STACK$$ZI$$Base-> +-------------------------+
69 */
70 *(tfm_secure_stack_seal) = TFM_STACK_SEAL_VALUE;
71 *(tfm_secure_stack_seal + 1) = TFM_STACK_SEAL_VALUE;
72
73 /*
74 * Seal the ARM_LIB_STACK by writing the seal value to the reserved
75 * region.
76 */
77 uint32_t *arm_lib_stck_seal_base = (uint32_t *)&REGION_NAME(Image$$,
78 ARM_LIB_STACK_SEAL, $$ZI$$Base);
79
80 *(arm_lib_stck_seal_base) = TFM_STACK_SEAL_VALUE;
81 *(arm_lib_stck_seal_base + 1) = TFM_STACK_SEAL_VALUE;
82}
83
Mingyang Sunabb1aab2020-02-18 13:49:08 +080084/*
85 * This is the "Big Lock" on the secure side, to guarantee single entry
86 * to SPE
87 */
Summer Qin5fdcf632020-06-22 16:49:24 +080088static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +080089static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080090
Mingyang Sunabb1aab2020-02-18 13:49:08 +080091static uint32_t *prepare_partition_iovec_ctx(
92 const struct tfm_state_context_t *svc_ctx,
93 const struct tfm_sfn_req_s *desc_ptr,
94 const struct iovec_args_t *iovec_args,
95 uint32_t *dst)
96{
97 /* XPSR = as was when called, but make sure it's thread mode */
98 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
99 /* ReturnAddress = resume veneer in new context */
100 *(--dst) = svc_ctx->ra;
101 /* LR = sfn address */
102 *(--dst) = (uint32_t)desc_ptr->sfn;
103 /* R12 = don't care */
104 *(--dst) = 0U;
105
106 /* R0-R3 = sfn arguments */
107 *(--dst) = iovec_args->out_len;
108 *(--dst) = (uint32_t)iovec_args->out_vec;
109 *(--dst) = iovec_args->in_len;
110 *(--dst) = (uint32_t)iovec_args->in_vec;
111
112 return dst;
113}
114
115/**
116 * \brief Create a stack frame that sets the execution environment to thread
117 * mode on exception return.
118 *
119 * \param[in] svc_ctx The stacked SVC context
120 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
121 * \param[in] dst A pointer where the context is to be created. (the
122 * pointer is considered to be a stack pointer, and
123 * the frame is created below it)
124 *
125 * \return A pointer pointing at the created stack frame.
126 */
127static int32_t *prepare_partition_irq_ctx(
128 const struct tfm_state_context_t *svc_ctx,
129 sfn_t unpriv_handler,
130 int32_t *dst)
131{
132 int i;
133
134 /* XPSR = as was when called, but make sure it's thread mode */
135 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
136 /* ReturnAddress = resume to the privileged handler code, but execute it
137 * unprivileged.
138 */
139 *(--dst) = svc_ctx->ra;
140 /* LR = start address */
141 *(--dst) = (int32_t)unpriv_handler;
142
143 /* R12, R0-R3 unused arguments */
144 for (i = 0; i < 5; ++i) {
145 *(--dst) = 0;
146 }
147
148 return dst;
149}
150
151static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
152 struct tfm_state_context_t *target_ctx)
153{
154 /* ReturnAddress = resume veneer after second SVC */
155 target_ctx->ra = svc_ctx->ra;
156
157 /* R0 = function return value */
158 target_ctx->r0 = svc_ctx->r0;
159
160 return;
161}
162
163/**
164 * \brief Check whether the iovec parameters are valid, and the memory ranges
165 * are in the possession of the calling partition.
166 *
167 * \param[in] desc_ptr The secure function request descriptor
168 *
169 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
170 * otherwise as in /ref tfm_status_e
171 */
172static enum tfm_status_e tfm_core_check_sfn_parameters(
173 const struct tfm_sfn_req_s *desc_ptr)
174{
175 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
176 size_t in_len;
177 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
178 size_t out_len;
179 uint32_t i;
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800180 uint32_t privileged_mode = TFM_PARTITION_UNPRIVILEGED_MODE;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800181
182 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
183 return TFM_ERROR_INVALID_PARAMETER;
184 }
185
186 in_len = (size_t)(desc_ptr->args[1]);
187 out_len = (size_t)(desc_ptr->args[3]);
188
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800189 /*
190 * Get caller's privileged mode:
191 * The privileged mode of NS Secure Service caller will be decided by the
192 * tfm_core_has_xxx_access_to_region functions.
193 * Secure caller can be only privileged mode because the whole SPE is
194 * running under privileged mode
195 */
196 if (!desc_ptr->ns_caller) {
197 privileged_mode = TFM_PARTITION_PRIVILEGED_MODE;
198 }
199
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800200 /* The number of vectors are within range. Extra checks to avoid overflow */
201 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
202 (in_len + out_len > PSA_MAX_IOVEC)) {
203 return TFM_ERROR_INVALID_PARAMETER;
204 }
205
206 /* Check whether the caller partition has at write access to the iovec
207 * structures themselves. Use the TT instruction for this.
208 */
209 if (in_len > 0) {
210 if ((in_vec == NULL) ||
211 (tfm_core_has_write_access_to_region(in_vec,
212 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800213 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800214 return TFM_ERROR_INVALID_PARAMETER;
215 }
216 } else {
217 if (in_vec != NULL) {
218 return TFM_ERROR_INVALID_PARAMETER;
219 }
220 }
221 if (out_len > 0) {
222 if ((out_vec == NULL) ||
223 (tfm_core_has_write_access_to_region(out_vec,
224 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800225 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800226 return TFM_ERROR_INVALID_PARAMETER;
227 }
228 } else {
229 if (out_vec != NULL) {
230 return TFM_ERROR_INVALID_PARAMETER;
231 }
232 }
233
234 /* Check whether the caller partition has access to the data inside the
235 * iovecs
236 */
237 for (i = 0; i < in_len; ++i) {
238 if (in_vec[i].len > 0) {
239 if ((in_vec[i].base == NULL) ||
240 (tfm_core_has_read_access_to_region(in_vec[i].base,
241 in_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800242 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800243 return TFM_ERROR_INVALID_PARAMETER;
244 }
245 }
246 }
247 for (i = 0; i < out_len; ++i) {
248 if (out_vec[i].len > 0) {
249 if ((out_vec[i].base == NULL) ||
250 (tfm_core_has_write_access_to_region(out_vec[i].base,
251 out_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800252 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800253 return TFM_ERROR_INVALID_PARAMETER;
254 }
255 }
256 }
257
258 return TFM_SUCCESS;
259}
260
261static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
262 const struct iovec_args_t *source)
263{
264 size_t i;
265
266 /* The vectors have been sanity checked already, and since then the
267 * interrupts have been kept disabled. So we can be sure that the
268 * vectors haven't been tampered with since the check. So it is safe to pass
269 * it to the called partition.
270 */
271
272 target->in_len = source->in_len;
273 for (i = 0; i < source->in_len; ++i) {
274 target->in_vec[i].base = source->in_vec[i].base;
275 target->in_vec[i].len = source->in_vec[i].len;
276 }
277 target->out_len = source->out_len;
278 for (i = 0; i < source->out_len; ++i) {
279 target->out_vec[i].base = source->out_vec[i].base;
280 target->out_vec[i].len = source->out_vec[i].len;
281 }
282}
283
284static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
285{
286 int i;
287
288 args->in_len = 0;
289 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
290 args->in_vec[i].base = NULL;
291 args->in_vec[i].len = 0;
292 }
293 args->out_len = 0;
294 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
295 args->out_vec[i].base = NULL;
296 args->out_vec[i].len = 0;
297 }
298}
299
300/**
301 * \brief Check whether the partitions for the secure function call are in a
302 * proper state.
303 *
304 * \param[in] curr_partition_state State of the partition to be called
305 * \param[in] caller_partition_state State of the caller partition
306 *
307 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
308 */
309static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
310 uint32_t caller_partition_state)
311{
312 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
313 /* Calling partition from non-running state (e.g. during handling IRQ)
314 * is not allowed.
315 */
316 return TFM_ERROR_INVALID_EXC_MODE;
317 }
318
319 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
320 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
321 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
322 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
323 /* Active partitions cannot be called! */
324 return TFM_ERROR_PARTITION_NON_REENTRANT;
325 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
326 /* The partition to be called is not in a proper state */
327 return TFM_SECURE_LOCK_FAILED;
328 }
329 return TFM_SUCCESS;
330}
331
332/**
333 * \brief Check whether the partitions for the secure function call of irq are
334 * in a proper state.
335 *
336 * \param[in] called_partition_state State of the partition to be called
337 *
338 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
339 */
340static enum tfm_status_e check_irq_partition_state(
341 uint32_t called_partition_state)
342{
343 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
344 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
345 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
346 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
347 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
348 return TFM_SUCCESS;
349 }
350 return TFM_SECURE_LOCK_FAILED;
351}
352
353/**
354 * \brief Calculate the address where the iovec parameters are to be saved for
355 * the called partition.
356 *
357 * \param[in] partition_idx The index of the partition to be called.
358 *
359 * \return The address where the iovec parameters should be saved.
360 */
361static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
362{
363 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100364 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800365}
366
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800367/**
368 * \brief Returns the index of the partition with the given partition ID.
369 *
370 * \param[in] partition_id Partition id
371 *
372 * \return the partition idx if partition_id is valid,
373 * \ref SPM_INVALID_PARTITION_IDX othervise
374 */
375static uint32_t get_partition_idx(uint32_t partition_id)
376{
377 uint32_t i;
378
379 if (partition_id == INVALID_PARTITION_ID) {
380 return SPM_INVALID_PARTITION_IDX;
381 }
382
383 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
384 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
385 partition_id) {
386 return i;
387 }
388 }
389 return SPM_INVALID_PARTITION_IDX;
390}
391
392/**
393 * \brief Get the flags associated with a partition
394 *
395 * \param[in] partition_idx Partition index
396 *
397 * \return Flags associated with the partition
398 *
399 * \note This function doesn't check if partition_idx is valid.
400 */
401static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
402{
403 return g_spm_partition_db.partitions[partition_idx].static_data->
404 partition_flags;
405}
406
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800407static enum tfm_status_e tfm_start_partition(
408 const struct tfm_sfn_req_s *desc_ptr,
409 uint32_t excReturn)
410{
411 enum tfm_status_e res;
412 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
413 const struct spm_partition_runtime_data_t *curr_part_data;
414 const struct spm_partition_runtime_data_t *caller_part_data;
415 uint32_t caller_flags;
416 register uint32_t partition_idx;
417 uint32_t psp;
418 uint32_t partition_psp, partition_psplim;
419 uint32_t partition_state;
420 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800421 struct tfm_state_context_t *svc_ctx;
422 uint32_t caller_partition_id;
423 int32_t client_id;
424 struct iovec_args_t *iovec_args;
425
426 psp = __get_PSP();
427 svc_ctx = (struct tfm_state_context_t *)psp;
428 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
429
430 /* Check partition state consistency */
431 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
432 != (!desc_ptr->ns_caller)) {
433 /* Partition state inconsistency detected */
434 return TFM_SECURE_LOCK_FAILED;
435 }
436
437 partition_idx = get_partition_idx(desc_ptr->sp_id);
438
439 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
440 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
441 partition_state = curr_part_data->partition_state;
442 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800443 caller_partition_id = tfm_spm_partition_get_partition_id(
444 caller_partition_idx);
445
446 if (!tfm_secure_api_initializing) {
447 res = check_partition_state(partition_state, caller_partition_state);
448 if (res != TFM_SUCCESS) {
449 return res;
450 }
451 }
452
453 /* Prepare switch to shared secure partition stack */
454 /* In case the call is coming from the non-secure world, we save the iovecs
Soby Mathew960521a2020-09-29 12:48:50 +0100455 * on the stop of the stack. Also the stack seal is present below this region.
456 * So the memory area, that can actually be used as stack by the partitions
457 * starts at a lower address.
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800458 */
Soby Mathew960521a2020-09-29 12:48:50 +0100459 partition_psp = (uint32_t) tfm_secure_stack_seal;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800460 partition_psplim =
461 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
462
463 /* Store the context for the partition call */
464 tfm_spm_partition_set_caller_partition_idx(partition_idx,
465 caller_partition_idx);
466 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
467
468 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
469 tfm_spm_partition_set_caller_client_id(partition_idx,
470 caller_partition_id);
471 } else {
472 client_id = tfm_nspm_get_current_client_id();
473 if (client_id >= 0) {
474 return TFM_SECURE_LOCK_FAILED;
475 }
476 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
477 }
478
479 /* In level one, only switch context and return from exception if in
480 * handler mode
481 */
482 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
483 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
484 SPM_ERR_OK) {
485 return TFM_ERROR_GENERIC;
486 }
487 iovec_args = get_iovec_args_stack_address(partition_idx);
488 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
489
490 /* Prepare the partition context, update stack ptr */
491 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
492 iovec_args,
493 (uint32_t *)partition_psp);
494 __set_PSP(psp);
495 tfm_arch_set_psplim(partition_psplim);
496 }
497
498 tfm_spm_partition_set_state(caller_partition_idx,
499 SPM_PARTITION_STATE_BLOCKED);
500 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
501 tfm_secure_lock++;
502
503 return TFM_SUCCESS;
504}
505
506static enum tfm_status_e tfm_start_partition_for_irq_handling(
507 uint32_t excReturn,
508 struct tfm_state_context_t *svc_ctx)
509{
510 uint32_t handler_partition_id = svc_ctx->r0;
511 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
512 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100513 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800514 enum tfm_status_e res;
515 uint32_t psp = __get_PSP();
516 uint32_t handler_partition_psp;
517 uint32_t handler_partition_state;
518 uint32_t interrupted_partition_idx =
519 tfm_spm_partition_get_running_partition_idx();
520 const struct spm_partition_runtime_data_t *handler_part_data;
521 uint32_t handler_partition_idx;
522
523 handler_partition_idx = get_partition_idx(handler_partition_id);
524 handler_part_data = tfm_spm_partition_get_runtime_data(
525 handler_partition_idx);
526 handler_partition_state = handler_part_data->partition_state;
527
528 res = check_irq_partition_state(handler_partition_state);
529 if (res != TFM_SUCCESS) {
530 return res;
531 }
532
533 /* set mask for the partition */
534 tfm_spm_partition_set_signal_mask(
535 handler_partition_idx,
536 handler_part_data->signal_mask | irq_signal);
537
538 tfm_spm_hal_disable_irq(irq_line);
539
540 /* save the current context of the interrupted partition */
541 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
542
543 handler_partition_psp = psp;
544
545 /* save the current context of the handler partition */
546 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
547
548 /* Store caller for the partition */
549 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
550 interrupted_partition_idx);
551
552 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
553 (int32_t *)handler_partition_psp);
554 __set_PSP(psp);
555
556 tfm_spm_partition_set_state(interrupted_partition_idx,
557 SPM_PARTITION_STATE_SUSPENDED);
558 tfm_spm_partition_set_state(handler_partition_idx,
559 SPM_PARTITION_STATE_HANDLING_IRQ);
560
561 return TFM_SUCCESS;
562}
563
564static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
565{
566 uint32_t current_partition_idx =
567 tfm_spm_partition_get_running_partition_idx();
568 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800569 uint32_t return_partition_idx;
570 uint32_t return_partition_flags;
571 uint32_t psp = __get_PSP();
572 size_t i;
573 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
574 struct iovec_args_t *iovec_args;
575
576 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
577 return TFM_SECURE_UNLOCK_FAILED;
578 }
579
580 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
581 return_partition_idx = curr_part_data->caller_partition_idx;
582
583 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
584 return TFM_SECURE_UNLOCK_FAILED;
585 }
586
587 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
588
589 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800590
591 tfm_secure_lock--;
592
593 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
594 (tfm_secure_api_initializing)) {
595 /* In TFM level 1 context restore is only done when
596 * returning to NS or after initialization
597 */
598 /* Restore caller context */
599 restore_caller_ctx(svc_ctx,
600 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
601 *excReturn = ret_part_data->lr;
602 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100603 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800604 uint32_t psp_stack_bottom =
605 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
606 tfm_arch_set_psplim(psp_stack_bottom);
607
TTornblom99f0be22019-12-17 16:22:38 +0100608 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800609
610 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
611 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
612 }
613 tfm_clear_iovec_parameters(iovec_args);
614 }
615
616 tfm_spm_partition_cleanup_context(current_partition_idx);
617
618 tfm_spm_partition_set_state(current_partition_idx,
619 SPM_PARTITION_STATE_IDLE);
620 tfm_spm_partition_set_state(return_partition_idx,
621 SPM_PARTITION_STATE_RUNNING);
622
623 return TFM_SUCCESS;
624}
625
626static enum tfm_status_e tfm_return_from_partition_irq_handling(
627 uint32_t *excReturn)
628{
629 uint32_t handler_partition_idx =
630 tfm_spm_partition_get_running_partition_idx();
631 const struct spm_partition_runtime_data_t *handler_part_data;
632 uint32_t interrupted_partition_idx;
633 uint32_t psp = __get_PSP();
634 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
635
636 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
637 return TFM_SECURE_UNLOCK_FAILED;
638 }
639
640 handler_part_data = tfm_spm_partition_get_runtime_data(
641 handler_partition_idx);
642 interrupted_partition_idx = handler_part_data->caller_partition_idx;
643
644 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
645 return TFM_SECURE_UNLOCK_FAILED;
646 }
647
648 /* For level 1, modify PSP, so that the SVC stack frame disappears,
649 * and return to the privileged handler using the stack frame still on the
650 * MSP stack.
651 */
652 *excReturn = svc_ctx->ra;
653 psp += sizeof(struct tfm_state_context_t);
654
655 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
656 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
657
658 __set_PSP(psp);
659
660 return TFM_SUCCESS;
661}
662
663static enum tfm_status_e tfm_check_sfn_req_integrity(
664 const struct tfm_sfn_req_s *desc_ptr)
665{
666 if ((desc_ptr == NULL) ||
667 (desc_ptr->sp_id == 0) ||
668 (desc_ptr->sfn == NULL)) {
669 /* invalid parameter */
670 return TFM_ERROR_INVALID_PARAMETER;
671 }
672 return TFM_SUCCESS;
673}
674
675static enum tfm_status_e tfm_core_check_sfn_req_rules(
676 const struct tfm_sfn_req_s *desc_ptr)
677{
678 /* Check partition idx validity */
679 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
680 return TFM_ERROR_NO_ACTIVE_PARTITION;
681 }
682
683 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
684 /* Secure domain is already locked!
685 * This should only happen if caller is secure partition!
686 */
687 /* This scenario is a potential security breach.
688 * Error is handled in caller.
689 */
690 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
691 }
692
693 if (tfm_secure_api_initializing) {
694 int32_t id =
695 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
696
697 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
698 /* Invalid request during system initialization */
699 ERROR_MSG("Invalid service request during initialization!");
700 return TFM_ERROR_NOT_INITIALIZED;
701 }
702 }
703
704 return TFM_SUCCESS;
705}
706
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800707uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
708{
709 return g_spm_partition_db.partitions[partition_idx].static_data->
710 partition_id;
711}
712
713uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
714{
715 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
716 return TFM_PARTITION_PRIVILEGED_MODE;
717 } else {
718 return TFM_PARTITION_UNPRIVILEGED_MODE;
719 }
720}
721
722bool tfm_is_partition_privileged(uint32_t partition_idx)
723{
724 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
725
726 return tfm_spm_partition_get_privileged_mode(flags) ==
727 TFM_PARTITION_PRIVILEGED_MODE;
728}
729
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800730void tfm_spm_secure_api_init_done(void)
731{
732 tfm_secure_api_initializing = 0;
733}
734
735enum tfm_status_e tfm_spm_sfn_request_handler(
736 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
737{
738 enum tfm_status_e res;
739
740 res = tfm_check_sfn_req_integrity(desc_ptr);
741 if (res != TFM_SUCCESS) {
742 ERROR_MSG("Invalid service request!");
743 tfm_secure_api_error_handler();
744 }
745
746 __disable_irq();
747
748 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
749
750 res = tfm_core_check_sfn_parameters(desc_ptr);
751 if (res != TFM_SUCCESS) {
752 /* The sanity check of iovecs failed. */
753 __enable_irq();
754 tfm_secure_api_error_handler();
755 }
756
757 res = tfm_core_check_sfn_req_rules(desc_ptr);
758 if (res != TFM_SUCCESS) {
759 /* FixMe: error compartmentalization TBD */
760 tfm_spm_partition_set_state(
761 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
762 __enable_irq();
763 ERROR_MSG("Unauthorized service request!");
764 tfm_secure_api_error_handler();
765 }
766
767 res = tfm_start_partition(desc_ptr, excReturn);
768 if (res != TFM_SUCCESS) {
769 /* FixMe: consider possible fault scenarios */
770 __enable_irq();
771 ERROR_MSG("Failed to process service request!");
772 tfm_secure_api_error_handler();
773 }
774
775 __enable_irq();
776
777 return res;
778}
779
780int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
781{
782 enum tfm_status_e res;
783 int32_t *args;
784 int32_t retVal;
785
786 res = tfm_core_check_sfn_parameters(desc_ptr);
787 if (res != TFM_SUCCESS) {
788 /* The sanity check of iovecs failed. */
789 return (int32_t)res;
790 }
791
792 /* No excReturn value is needed as no exception handling is used */
793 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
794
795 if (res != TFM_SUCCESS) {
796 tfm_secure_api_error_handler();
797 }
798
799 /* Secure partition to secure partition call in TFM level 1 */
800 args = desc_ptr->args;
801 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
802
803 /* return handler should restore original exc_return value... */
804 res = tfm_return_from_partition(NULL);
805 if (res == TFM_SUCCESS) {
806 /* If unlock successful, pass SS return value to caller */
807 return retVal;
808 } else {
809 /* Unlock errors indicate ctx database corruption or unknown
810 * anomalies. Halt execution
811 */
812 ERROR_MSG("Secure API error during unlock!");
813 tfm_secure_api_error_handler();
814 }
815 return (int32_t)res;
816}
817
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800818int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
819 void *start_addr,
820 size_t len,
821 uint32_t alignment)
822{
823 uintptr_t start_addr_value = (uintptr_t)start_addr;
824 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
825 uintptr_t alignment_mask;
826
827 alignment_mask = (((uintptr_t)1) << alignment) - 1;
828
829 /* Check that the pointer is aligned properly */
830 if (start_addr_value & alignment_mask) {
831 /* not aligned, return error */
832 return 0;
833 }
834
835 /* Protect against overflow (and zero len) */
836 if (end_addr_value <= start_addr_value) {
837 return 0;
838 }
839
840 /* For privileged partition execution, all secure data memory and stack
841 * is accessible
842 */
843 if (start_addr_value >= S_DATA_START &&
844 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
845 return 1;
846 }
847
848 return 0;
849}
850
851void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
852{
853 uintptr_t result_ptr_value = svc_args[0];
854 uint32_t running_partition_idx =
855 tfm_spm_partition_get_running_partition_idx();
856 const uint32_t running_partition_flags =
857 tfm_spm_partition_get_flags(running_partition_idx);
858 const struct spm_partition_runtime_data_t *curr_part_data =
859 tfm_spm_partition_get_runtime_data(running_partition_idx);
860 int res = 0;
861
862 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
863 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
864 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
865 /* This handler shouldn't be called from outside partition context.
866 * Also if the current partition is handling IRQ, the caller partition
867 * index might not be valid;
868 * Partitions are only allowed to run while S domain is locked.
869 */
870 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
871 return;
872 }
873
874 /* Make sure that the output pointer points to a memory area that is owned
875 * by the partition
876 */
877 res = tfm_spm_check_buffer_access(running_partition_idx,
878 (void *)result_ptr_value,
879 sizeof(curr_part_data->caller_client_id),
880 2);
881 if (!res) {
882 /* Not in accessible range, return error */
883 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
884 return;
885 }
886
887 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
888
889 /* Store return value in r0 */
890 svc_args[0] = (uint32_t)TFM_SUCCESS;
891}
892
893/* This SVC handler is called if veneer is running in thread mode */
894uint32_t tfm_spm_partition_request_svc_handler(
895 const uint32_t *svc_ctx, uint32_t excReturn)
896{
897 struct tfm_sfn_req_s *desc_ptr;
898
899 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
900 /* Service request SVC called with MSP active.
901 * Either invalid configuration for Thread mode or SVC called
902 * from Handler mode, which is not supported.
903 * FixMe: error severity TBD
904 */
905 ERROR_MSG("Service request SVC called with MSP active!");
906 tfm_secure_api_error_handler();
907 }
908
909 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
910
911 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
912 tfm_secure_api_error_handler();
913 }
914
915 return EXC_RETURN_SECURE_FUNCTION;
916}
917
918/* This SVC handler is called, if a thread mode execution environment is to
919 * be set up, to run an unprivileged IRQ handler
920 */
921uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
922{
923 struct tfm_state_context_t *svc_ctx =
924 (struct tfm_state_context_t *)svc_args;
925
926 enum tfm_status_e res;
927
928 if (excReturn & EXC_RETURN_STACK_PROCESS) {
929 /* FixMe: error severity TBD */
930 ERROR_MSG("Partition request SVC called with PSP active!");
931 tfm_secure_api_error_handler();
932 }
933
934 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
935 if (res != TFM_SUCCESS) {
936 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
937 * its code can be run
938 */
939 /* FixMe: For now this case is handled with TF-M panic, however it would
940 * be possible to skip the execution of the interrupt handler, and
941 * resume the execution of the interrupted code.
942 */
943 tfm_secure_api_error_handler();
944 }
945 return EXC_RETURN_SECURE_FUNCTION;
946}
947
948/* This SVC handler is called when sfn returns */
949uint32_t tfm_spm_partition_return_handler(uint32_t lr)
950{
951 enum tfm_status_e res;
952
953 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
954 /* Partition return SVC called with MSP active.
955 * This should not happen!
956 */
957 ERROR_MSG("Partition return SVC called with MSP active!");
958 tfm_secure_api_error_handler();
959 }
960
961 res = tfm_return_from_partition(&lr);
962 if (res != TFM_SUCCESS) {
963 /* Unlock errors indicate ctx database corruption or unknown anomalies
964 * Halt execution
965 */
966 ERROR_MSG("Secure API error during unlock!");
967 tfm_secure_api_error_handler();
968 }
969
970 return lr;
971}
972
973/* This SVC handler is called if a deprivileged IRQ handler was executed, and
974 * the execution environment is to be set back for the privileged handler mode
975 */
976uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
977{
978 enum tfm_status_e res;
Ken Liue0af44c2020-07-25 22:51:30 +0800979 struct tfm_state_context_t *irq_svc_ctx;
980
981 /* Take into account the sealed stack*/
982 irq_svc_args += 2;
983
984 irq_svc_ctx = (struct tfm_state_context_t *)irq_svc_args;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800985
986 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
987 /* Partition request SVC called with MSP active.
988 * FixMe: error severity TBD
989 */
990 ERROR_MSG("Partition request SVC called with MSP active!");
991 tfm_secure_api_error_handler();
992 }
993
994 res = tfm_return_from_partition_irq_handling(&lr);
995 if (res != TFM_SUCCESS) {
996 /* Unlock errors indicate ctx database corruption or unknown anomalies
997 * Halt execution
998 */
999 ERROR_MSG("Secure API error during unlock!");
1000 tfm_secure_api_error_handler();
1001 }
1002
1003 irq_svc_ctx->ra = lr;
1004
1005 return EXC_RETURN_SECURE_HANDLER;
1006}
1007
1008/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
1009/**
1010 * \brief Return the IRQ line number associated with a signal
1011 *
1012 * \param[in] partition_id The ID of the partition in which we look for the
1013 * signal
1014 * \param[in] signal The signal we do the query for
1015 *
1016 * \retval >=0 The IRQ line number associated with a signal in the partition
1017 * \retval <0 error
1018 */
TTornblomfaf74f52020-03-04 17:56:27 +01001019static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001020 psa_signal_t signal)
1021{
1022 size_t i;
1023
1024 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1025 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1026 tfm_core_irq_signals[i].signal_value == signal) {
1027 return tfm_core_irq_signals[i].irq_line;
1028 }
1029 }
TTornblomfaf74f52020-03-04 17:56:27 +01001030 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001031}
1032
1033void tfm_spm_enable_irq_handler(uint32_t *svc_args)
1034{
1035 struct tfm_state_context_t *svc_ctx =
1036 (struct tfm_state_context_t *)svc_args;
1037 psa_signal_t irq_signal = svc_ctx->r0;
1038 uint32_t running_partition_idx =
1039 tfm_spm_partition_get_running_partition_idx();
1040 uint32_t running_partition_id =
1041 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +01001042 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001043
1044 /* Only a single signal is allowed */
1045 if (!tfm_is_one_bit_set(irq_signal)) {
1046 /* FixMe: error severity TBD */
1047 tfm_secure_api_error_handler();
1048 }
1049
1050 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1051
1052 if (irq_line < 0) {
1053 /* FixMe: error severity TBD */
1054 tfm_secure_api_error_handler();
1055 }
1056
1057 tfm_spm_hal_enable_irq(irq_line);
1058}
1059
1060void tfm_spm_disable_irq_handler(uint32_t *svc_args)
1061{
1062 struct tfm_state_context_t *svc_ctx =
1063 (struct tfm_state_context_t *)svc_args;
1064 psa_signal_t irq_signal = svc_ctx->r0;
1065 uint32_t running_partition_idx =
1066 tfm_spm_partition_get_running_partition_idx();
1067 uint32_t running_partition_id =
1068 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +01001069 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001070
1071 /* Only a single signal is allowed */
1072 if (!tfm_is_one_bit_set(irq_signal)) {
1073 /* FixMe: error severity TBD */
1074 tfm_secure_api_error_handler();
1075 }
1076
1077 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1078
1079 if (irq_line < 0) {
1080 /* FixMe: error severity TBD */
1081 tfm_secure_api_error_handler();
1082 }
1083
1084 tfm_spm_hal_disable_irq(irq_line);
1085}
1086
1087void tfm_spm_psa_wait(uint32_t *svc_args)
1088{
1089 /* Look for partition that is ready for run */
1090 struct tfm_state_context_t *svc_ctx =
1091 (struct tfm_state_context_t *)svc_args;
1092 uint32_t running_partition_idx;
1093 const struct spm_partition_runtime_data_t *curr_part_data;
1094
1095 psa_signal_t signal_mask = svc_ctx->r0;
1096 uint32_t timeout = svc_ctx->r1;
1097
1098 /*
1099 * Timeout[30:0] are reserved for future use.
1100 * SPM must ignore the value of RES.
1101 */
1102 timeout &= PSA_TIMEOUT_MASK;
1103
1104 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1105 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1106
1107 if (timeout == PSA_BLOCK) {
1108 /* FIXME: Scheduling is not available in library model, and busy wait is
1109 * also not possible as this code is running in SVC context, and it
1110 * cannot be pre-empted by interrupts. So do nothing here for now
1111 */
1112 (void) signal_mask;
1113 }
1114
1115 svc_ctx->r0 = curr_part_data->signal_mask;
1116}
1117
1118void tfm_spm_psa_eoi(uint32_t *svc_args)
1119{
1120 struct tfm_state_context_t *svc_ctx =
1121 (struct tfm_state_context_t *)svc_args;
1122 psa_signal_t irq_signal = svc_ctx->r0;
1123 uint32_t signal_mask;
1124 uint32_t running_partition_idx;
1125 uint32_t running_partition_id;
1126 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001127 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001128
1129 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1130 running_partition_id =
1131 tfm_spm_partition_get_partition_id(running_partition_idx);
1132 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1133
1134 /* Only a single signal is allowed */
1135 if (!tfm_is_one_bit_set(irq_signal)) {
1136 tfm_secure_api_error_handler();
1137 }
1138
1139 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1140
1141 if (irq_line < 0) {
1142 /* FixMe: error severity TBD */
1143 tfm_secure_api_error_handler();
1144 }
1145
1146 tfm_spm_hal_clear_pending_irq(irq_line);
1147 tfm_spm_hal_enable_irq(irq_line);
1148
1149 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1150 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1151}
Mingyang Sunda01a972019-07-12 17:32:59 +08001152
1153/*
1154 * This function is called when a secure partition causes an error.
1155 * In case of an error in the error handling, a non-zero value have to be
1156 * returned.
1157 */
1158static void tfm_spm_partition_err_handler(
1159 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001160 int32_t err_code)
1161{
Mingyang Sunda01a972019-07-12 17:32:59 +08001162 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001163
Summer Qin423dbef2019-08-22 15:59:35 +08001164 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001165 SPM_PARTITION_STATE_CLOSED);
1166}
1167
1168enum spm_err_t tfm_spm_partition_init(void)
1169{
1170 struct spm_partition_desc_t *part;
1171 struct tfm_sfn_req_s desc;
1172 int32_t args[4] = {0};
1173 int32_t fail_cnt = 0;
1174 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001175 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001176
1177 /* Call the init function for each partition */
1178 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1179 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001180 platform_data_p = part->platform_data_list;
1181 if (platform_data_p != NULL) {
1182 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001183 if (tfm_spm_hal_configure_default_isolation(idx,
1184 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1185 fail_cnt++;
1186 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001187 ++platform_data_p;
1188 }
1189 }
Summer Qin423dbef2019-08-22 15:59:35 +08001190 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001191 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1192 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001193 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001194 } else {
1195 int32_t res;
1196
1197 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001198 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001199 desc.sfn = (sfn_t)part->static_data->partition_init;
1200 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001201 res = tfm_core_sfn_request(&desc);
1202 if (res == TFM_SUCCESS) {
1203 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1204 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001205 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001206 fail_cnt++;
1207 }
1208 }
1209 }
1210
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001211 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001212
1213 if (fail_cnt == 0) {
1214 return SPM_ERR_OK;
1215 } else {
1216 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1217 }
1218}
1219
1220void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1221{
1222 struct spm_partition_runtime_data_t *runtime_data =
1223 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1224 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001225 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001226
1227 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001228
1229 runtime_data->ctx_stack_ptr +=
1230 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001231}
1232
1233void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1234{
1235 struct spm_partition_runtime_data_t *runtime_data =
1236 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1237 struct interrupted_ctx_stack_frame_t *stack_frame;
1238
Matt463ed582019-12-20 12:31:25 +08001239 runtime_data->ctx_stack_ptr -=
1240 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1241
Mingyang Sunda01a972019-07-12 17:32:59 +08001242 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1243 runtime_data->ctx_stack_ptr;
1244 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1245 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001246}
1247
1248void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1249{
1250 struct spm_partition_runtime_data_t *runtime_data =
1251 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1252 struct handler_ctx_stack_frame_t *stack_frame =
1253 (struct handler_ctx_stack_frame_t *)
1254 runtime_data->ctx_stack_ptr;
1255
1256 stack_frame->partition_state = runtime_data->partition_state;
1257 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1258
1259 runtime_data->ctx_stack_ptr +=
1260 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1261}
1262
1263void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1264{
1265 struct spm_partition_runtime_data_t *runtime_data =
1266 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1267 struct handler_ctx_stack_frame_t *stack_frame;
1268
1269 runtime_data->ctx_stack_ptr -=
1270 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1271
1272 stack_frame = (struct handler_ctx_stack_frame_t *)
1273 runtime_data->ctx_stack_ptr;
1274
1275 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1276 stack_frame->partition_state = 0;
1277 tfm_spm_partition_set_caller_partition_idx(
1278 partition_idx, stack_frame->caller_partition_idx);
1279 stack_frame->caller_partition_idx = 0;
1280}
1281
Mingyang Sunda01a972019-07-12 17:32:59 +08001282void tfm_spm_partition_store_context(uint32_t partition_idx,
1283 uint32_t stack_ptr, uint32_t lr)
1284{
1285 g_spm_partition_db.partitions[partition_idx].
1286 runtime_data.stack_ptr = stack_ptr;
1287 g_spm_partition_db.partitions[partition_idx].
1288 runtime_data.lr = lr;
1289}
1290
1291const struct spm_partition_runtime_data_t *
1292 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1293{
1294 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1295}
1296
1297void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1298{
1299 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1300 state;
1301 if (state == SPM_PARTITION_STATE_RUNNING ||
1302 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1303 g_spm_partition_db.running_partition_idx = partition_idx;
1304 }
1305}
1306
1307void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1308 uint32_t caller_partition_idx)
1309{
1310 g_spm_partition_db.partitions[partition_idx].runtime_data.
1311 caller_partition_idx = caller_partition_idx;
1312}
1313
1314void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1315 uint32_t signal_mask)
1316{
1317 g_spm_partition_db.partitions[partition_idx].runtime_data.
1318 signal_mask = signal_mask;
1319}
1320
1321void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1322 int32_t caller_client_id)
1323{
1324 g_spm_partition_db.partitions[partition_idx].runtime_data.
1325 caller_client_id = caller_client_id;
1326}
1327
Mingyang Sunda01a972019-07-12 17:32:59 +08001328enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1329 const int32_t *args)
1330{
1331 struct spm_partition_runtime_data_t *runtime_data =
1332 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1333 size_t i;
1334
1335 if ((args[1] < 0) || (args[3] < 0)) {
1336 return SPM_ERR_INVALID_PARAMETER;
1337 }
1338
1339 runtime_data->iovec_args.in_len = (size_t)args[1];
1340 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1341 runtime_data->iovec_args.in_vec[i].base =
1342 ((psa_invec *)args[0])[i].base;
1343 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1344 }
1345 runtime_data->iovec_args.out_len = (size_t)args[3];
1346 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1347 runtime_data->iovec_args.out_vec[i].base =
1348 ((psa_outvec *)args[2])[i].base;
1349 runtime_data->iovec_args.out_vec[i].len =
1350 ((psa_outvec *)args[2])[i].len;
1351 }
1352 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001353
1354 return SPM_ERR_OK;
1355}
1356
1357uint32_t tfm_spm_partition_get_running_partition_idx(void)
1358{
1359 return g_spm_partition_db.running_partition_idx;
1360}
1361
1362void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1363{
1364 struct spm_partition_desc_t *partition =
1365 &(g_spm_partition_db.partitions[partition_idx]);
1366 int32_t i;
1367
1368 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001369 partition->runtime_data.iovec_args.in_len = 0;
1370 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1371 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1372 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1373 }
1374 partition->runtime_data.iovec_args.out_len = 0;
1375 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1376 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1377 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1378 }
1379 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001380}
Summer Qin830c5542020-02-14 13:44:20 +08001381
1382void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1383{
1384 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1385 uint32_t running_partition_flags = 0;
1386 uint32_t running_partition_idx;
1387
1388 /* Check permissions on request type basis */
1389
1390 switch (svc_ctx->r0) {
1391 case TFM_SPM_REQUEST_RESET_VOTE:
1392 running_partition_idx =
1393 tfm_spm_partition_get_running_partition_idx();
1394 running_partition_flags = tfm_spm_partition_get_flags(
1395 running_partition_idx);
1396
1397 /* Currently only PSA Root of Trust services are allowed to make Reset
1398 * vote request
1399 */
1400 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1401 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1402 }
1403
1404 /* FixMe: this is a placeholder for checks to be performed before
1405 * allowing execution of reset
1406 */
1407 *res_ptr = (uint32_t)TFM_SUCCESS;
1408
1409 break;
1410 default:
1411 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1412 }
1413}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001414
1415enum spm_err_t tfm_spm_db_init(void)
1416{
1417 uint32_t i;
1418
1419 /* This function initialises partition db */
1420
1421 /* For the non secure Execution environment */
1422 tfm_nspm_configure_clients();
1423
1424 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1425 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1426 SPM_PARTITION_STATE_UNINIT;
1427 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1428 SPM_INVALID_PARTITION_IDX;
1429 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1430 TFM_INVALID_CLIENT_ID;
1431 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1432 ctx_stack_list[i];
1433 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1434 g_spm_partition_db.partitions[i].platform_data_list =
1435 platform_data_list_list[i];
1436 }
1437 g_spm_partition_db.is_init = 1;
1438
1439 return SPM_ERR_OK;
1440}