blob: 17f0136ec43702d6ddbde2b3ffe74b3aad81c445 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_api.h"
13#include "tfm_arch.h"
14#include "tfm_irq_list.h"
15#include "psa/service.h"
16#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080017#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080018#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080019#include "tfm_spm_hal.h"
Soby Mathew960521a2020-09-29 12:48:50 +010020#include "tfm_core_trustzone.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080021#include "spm_func.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080022#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080023#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080024#include "spm_partition_defs.h"
25#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080026#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080027#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080028
David Hu5da82de2020-12-02 16:41:30 +080029/* Structure to temporarily save iovec parameters from PSA client */
30struct iovec_params_t {
31 psa_invec in_vec[PSA_MAX_IOVEC];
32 size_t in_len;
33 psa_outvec out_vec[PSA_MAX_IOVEC];
34 size_t out_len;
35
36 psa_outvec *orig_outvec;
37};
38
Mingyang Sunabb1aab2020-02-18 13:49:08 +080039#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
40#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
41
42#ifndef TFM_LVL
43#error TFM_LVL is not defined!
44#endif
45
Soby Mathew960521a2020-09-29 12:48:50 +010046#ifdef TFM_MULTI_CORE_TOPOLOGY
47#error Multi core is not supported by Function mode
48#endif
49
Mingyang Sunabb1aab2020-02-18 13:49:08 +080050REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
51REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
52
Soby Mathew960521a2020-09-29 12:48:50 +010053static uint32_t *tfm_secure_stack_seal =
54 ((uint32_t *)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1]) - 2;
55
56REGION_DECLARE_T(Image$$, ARM_LIB_STACK_SEAL, $$ZI$$Base, uint32_t);
57
58/*
59 * Function to seal the psp stacks for Function model of TF-M.
60 */
61void tfm_spm_seal_psp_stacks(void)
62{
63 /*
64 * The top of TFM_SECURE_STACK is used for iovec parameters, we need to
65 * place the seal between iovec parameters and partition stack.
66 *
67 * Image$$TFM_SECURE_STACK$$ZI$$Limit-> +-------------------------+
68 * | |
69 * | iovec parameters for |
70 * | partition |
71 * (Image$$TFM_SECURE_STACK$$ZI$$Limit -| |
72 * sizeof(iovec_args_t)) -> +-------------------------+
73 * | Stack Seal |
74 * +-------------------------+
75 * | |
76 * | Partition stack |
77 * | |
78 * Image$$TFM_SECURE_STACK$$ZI$$Base-> +-------------------------+
79 */
80 *(tfm_secure_stack_seal) = TFM_STACK_SEAL_VALUE;
81 *(tfm_secure_stack_seal + 1) = TFM_STACK_SEAL_VALUE;
82
83 /*
84 * Seal the ARM_LIB_STACK by writing the seal value to the reserved
85 * region.
86 */
87 uint32_t *arm_lib_stck_seal_base = (uint32_t *)&REGION_NAME(Image$$,
88 ARM_LIB_STACK_SEAL, $$ZI$$Base);
89
90 *(arm_lib_stck_seal_base) = TFM_STACK_SEAL_VALUE;
91 *(arm_lib_stck_seal_base + 1) = TFM_STACK_SEAL_VALUE;
92}
93
Mingyang Sunabb1aab2020-02-18 13:49:08 +080094/*
95 * This is the "Big Lock" on the secure side, to guarantee single entry
96 * to SPE
97 */
Summer Qin5fdcf632020-06-22 16:49:24 +080098static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +080099static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +0800100
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800101static uint32_t *prepare_partition_iovec_ctx(
102 const struct tfm_state_context_t *svc_ctx,
103 const struct tfm_sfn_req_s *desc_ptr,
104 const struct iovec_args_t *iovec_args,
105 uint32_t *dst)
106{
107 /* XPSR = as was when called, but make sure it's thread mode */
108 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
109 /* ReturnAddress = resume veneer in new context */
110 *(--dst) = svc_ctx->ra;
111 /* LR = sfn address */
112 *(--dst) = (uint32_t)desc_ptr->sfn;
113 /* R12 = don't care */
114 *(--dst) = 0U;
115
116 /* R0-R3 = sfn arguments */
117 *(--dst) = iovec_args->out_len;
118 *(--dst) = (uint32_t)iovec_args->out_vec;
119 *(--dst) = iovec_args->in_len;
120 *(--dst) = (uint32_t)iovec_args->in_vec;
121
122 return dst;
123}
124
125/**
126 * \brief Create a stack frame that sets the execution environment to thread
127 * mode on exception return.
128 *
129 * \param[in] svc_ctx The stacked SVC context
130 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
131 * \param[in] dst A pointer where the context is to be created. (the
132 * pointer is considered to be a stack pointer, and
133 * the frame is created below it)
134 *
135 * \return A pointer pointing at the created stack frame.
136 */
137static int32_t *prepare_partition_irq_ctx(
138 const struct tfm_state_context_t *svc_ctx,
139 sfn_t unpriv_handler,
140 int32_t *dst)
141{
142 int i;
143
144 /* XPSR = as was when called, but make sure it's thread mode */
145 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
146 /* ReturnAddress = resume to the privileged handler code, but execute it
147 * unprivileged.
148 */
149 *(--dst) = svc_ctx->ra;
150 /* LR = start address */
151 *(--dst) = (int32_t)unpriv_handler;
152
153 /* R12, R0-R3 unused arguments */
154 for (i = 0; i < 5; ++i) {
155 *(--dst) = 0;
156 }
157
158 return dst;
159}
160
161static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
162 struct tfm_state_context_t *target_ctx)
163{
164 /* ReturnAddress = resume veneer after second SVC */
165 target_ctx->ra = svc_ctx->ra;
166
167 /* R0 = function return value */
168 target_ctx->r0 = svc_ctx->r0;
169
170 return;
171}
172
173/**
174 * \brief Check whether the iovec parameters are valid, and the memory ranges
175 * are in the possession of the calling partition.
176 *
David Hu5da82de2020-12-02 16:41:30 +0800177 * \param[in] desc_ptr The secure function request descriptor
178 * \param[out] iovec_ptr The local buffer to store iovec arguments
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800179 *
180 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
181 * otherwise as in /ref tfm_status_e
182 */
183static enum tfm_status_e tfm_core_check_sfn_parameters(
David Hu5da82de2020-12-02 16:41:30 +0800184 const struct tfm_sfn_req_s *desc_ptr,
185 struct iovec_params_t *iovec_ptr)
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800186{
187 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
188 size_t in_len;
189 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
190 size_t out_len;
191 uint32_t i;
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800192 uint32_t privileged_mode = TFM_PARTITION_UNPRIVILEGED_MODE;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800193
194 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
195 return TFM_ERROR_INVALID_PARAMETER;
196 }
197
198 in_len = (size_t)(desc_ptr->args[1]);
199 out_len = (size_t)(desc_ptr->args[3]);
200
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800201 /*
202 * Get caller's privileged mode:
203 * The privileged mode of NS Secure Service caller will be decided by the
204 * tfm_core_has_xxx_access_to_region functions.
205 * Secure caller can be only privileged mode because the whole SPE is
206 * running under privileged mode
207 */
208 if (!desc_ptr->ns_caller) {
209 privileged_mode = TFM_PARTITION_PRIVILEGED_MODE;
210 }
211
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800212 /* The number of vectors are within range. Extra checks to avoid overflow */
213 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
214 (in_len + out_len > PSA_MAX_IOVEC)) {
215 return TFM_ERROR_INVALID_PARAMETER;
216 }
217
218 /* Check whether the caller partition has at write access to the iovec
219 * structures themselves. Use the TT instruction for this.
220 */
221 if (in_len > 0) {
222 if ((in_vec == NULL) ||
223 (tfm_core_has_write_access_to_region(in_vec,
224 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800225 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800226 return TFM_ERROR_INVALID_PARAMETER;
227 }
228 } else {
229 if (in_vec != NULL) {
230 return TFM_ERROR_INVALID_PARAMETER;
231 }
232 }
233 if (out_len > 0) {
234 if ((out_vec == NULL) ||
235 (tfm_core_has_write_access_to_region(out_vec,
236 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800237 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800238 return TFM_ERROR_INVALID_PARAMETER;
239 }
240 } else {
241 if (out_vec != NULL) {
242 return TFM_ERROR_INVALID_PARAMETER;
243 }
244 }
245
David Hu5da82de2020-12-02 16:41:30 +0800246 /* Copy iovec parameters into a local buffer before validating them */
247 iovec_ptr->in_len = in_len;
248 for (i = 0; i < in_len; ++i) {
249 iovec_ptr->in_vec[i].base = in_vec[i].base;
250 iovec_ptr->in_vec[i].len = in_vec[i].len;
251 }
252 iovec_ptr->out_len = out_len;
253 for (i = 0; i < out_len; ++i) {
254 iovec_ptr->out_vec[i].base = out_vec[i].base;
255 iovec_ptr->out_vec[i].len = out_vec[i].len;
256 }
257 iovec_ptr->orig_outvec = out_vec;
258
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800259 /* Check whether the caller partition has access to the data inside the
260 * iovecs
261 */
262 for (i = 0; i < in_len; ++i) {
David Hu5da82de2020-12-02 16:41:30 +0800263 if (iovec_ptr->in_vec[i].len > 0) {
264 if ((iovec_ptr->in_vec[i].base == NULL) ||
265 (tfm_core_has_read_access_to_region(iovec_ptr->in_vec[i].base,
266 iovec_ptr->in_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800267 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800268 return TFM_ERROR_INVALID_PARAMETER;
269 }
270 }
271 }
272 for (i = 0; i < out_len; ++i) {
David Hu5da82de2020-12-02 16:41:30 +0800273 if (iovec_ptr->out_vec[i].len > 0) {
274 if ((iovec_ptr->out_vec[i].base == NULL) ||
275 (tfm_core_has_write_access_to_region(iovec_ptr->out_vec[i].base,
276 iovec_ptr->out_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800277 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800278 return TFM_ERROR_INVALID_PARAMETER;
279 }
280 }
281 }
282
283 return TFM_SUCCESS;
284}
285
286static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
287 const struct iovec_args_t *source)
288{
289 size_t i;
290
291 /* The vectors have been sanity checked already, and since then the
292 * interrupts have been kept disabled. So we can be sure that the
293 * vectors haven't been tampered with since the check. So it is safe to pass
294 * it to the called partition.
295 */
296
297 target->in_len = source->in_len;
298 for (i = 0; i < source->in_len; ++i) {
299 target->in_vec[i].base = source->in_vec[i].base;
300 target->in_vec[i].len = source->in_vec[i].len;
301 }
302 target->out_len = source->out_len;
303 for (i = 0; i < source->out_len; ++i) {
304 target->out_vec[i].base = source->out_vec[i].base;
305 target->out_vec[i].len = source->out_vec[i].len;
306 }
307}
308
309static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
310{
311 int i;
312
313 args->in_len = 0;
314 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
315 args->in_vec[i].base = NULL;
316 args->in_vec[i].len = 0;
317 }
318 args->out_len = 0;
319 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
320 args->out_vec[i].base = NULL;
321 args->out_vec[i].len = 0;
322 }
323}
324
325/**
326 * \brief Check whether the partitions for the secure function call are in a
327 * proper state.
328 *
329 * \param[in] curr_partition_state State of the partition to be called
330 * \param[in] caller_partition_state State of the caller partition
331 *
332 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
333 */
334static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
335 uint32_t caller_partition_state)
336{
337 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
338 /* Calling partition from non-running state (e.g. during handling IRQ)
339 * is not allowed.
340 */
341 return TFM_ERROR_INVALID_EXC_MODE;
342 }
343
344 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
345 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
346 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
347 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
348 /* Active partitions cannot be called! */
349 return TFM_ERROR_PARTITION_NON_REENTRANT;
350 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
351 /* The partition to be called is not in a proper state */
352 return TFM_SECURE_LOCK_FAILED;
353 }
354 return TFM_SUCCESS;
355}
356
357/**
358 * \brief Check whether the partitions for the secure function call of irq are
359 * in a proper state.
360 *
361 * \param[in] called_partition_state State of the partition to be called
362 *
363 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
364 */
365static enum tfm_status_e check_irq_partition_state(
366 uint32_t called_partition_state)
367{
368 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
369 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
370 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
371 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
372 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
373 return TFM_SUCCESS;
374 }
375 return TFM_SECURE_LOCK_FAILED;
376}
377
378/**
379 * \brief Calculate the address where the iovec parameters are to be saved for
380 * the called partition.
381 *
382 * \param[in] partition_idx The index of the partition to be called.
383 *
384 * \return The address where the iovec parameters should be saved.
385 */
386static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
387{
388 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100389 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800390}
391
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800392/**
393 * \brief Returns the index of the partition with the given partition ID.
394 *
395 * \param[in] partition_id Partition id
396 *
397 * \return the partition idx if partition_id is valid,
398 * \ref SPM_INVALID_PARTITION_IDX othervise
399 */
400static uint32_t get_partition_idx(uint32_t partition_id)
401{
402 uint32_t i;
403
404 if (partition_id == INVALID_PARTITION_ID) {
405 return SPM_INVALID_PARTITION_IDX;
406 }
407
408 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
409 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
410 partition_id) {
411 return i;
412 }
413 }
414 return SPM_INVALID_PARTITION_IDX;
415}
416
417/**
David Hu5da82de2020-12-02 16:41:30 +0800418 * \brief Set the iovec parameters for the partition
419 *
420 * \param[in] partition_idx Partition index
421 * \param[in] iovec_ptr The arguments of the secure function
422 *
423 * \return Error code \ref spm_err_t
424 *
425 * \note This function doesn't check if partition_idx is valid.
426 * \note This function assumes that the iovecs that are passed in iovec_ptr are
427 * valid, and does no sanity check on them at all.
428 */
429static enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
430 const struct iovec_params_t *iovec_ptr)
431{
432 struct spm_partition_runtime_data_t *runtime_data =
433 &g_spm_partition_db.partitions[partition_idx].runtime_data;
434 size_t i;
435
436 if ((iovec_ptr->in_len < 0) || (iovec_ptr->out_len < 0)) {
437 return SPM_ERR_INVALID_PARAMETER;
438 }
439
440 runtime_data->iovec_args.in_len = iovec_ptr->in_len;
441 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
442 runtime_data->iovec_args.in_vec[i].base = iovec_ptr->in_vec[i].base;
443 runtime_data->iovec_args.in_vec[i].len = iovec_ptr->in_vec[i].len;
444 }
445 runtime_data->iovec_args.out_len = iovec_ptr->out_len;
446 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
447 runtime_data->iovec_args.out_vec[i].base = iovec_ptr->out_vec[i].base;
448 runtime_data->iovec_args.out_vec[i].len = iovec_ptr->out_vec[i].len;
449 }
450 runtime_data->orig_outvec = iovec_ptr->orig_outvec;
451
452 return SPM_ERR_OK;
453}
454
455/**
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800456 * \brief Get the flags associated with a partition
457 *
458 * \param[in] partition_idx Partition index
459 *
460 * \return Flags associated with the partition
461 *
462 * \note This function doesn't check if partition_idx is valid.
463 */
464static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
465{
466 return g_spm_partition_db.partitions[partition_idx].static_data->
467 partition_flags;
468}
469
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800470static enum tfm_status_e tfm_start_partition(
David Hu5da82de2020-12-02 16:41:30 +0800471 const struct tfm_sfn_req_s *desc_ptr,
472 const struct iovec_params_t *iovec_ptr,
473 uint32_t excReturn)
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800474{
475 enum tfm_status_e res;
476 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
477 const struct spm_partition_runtime_data_t *curr_part_data;
478 const struct spm_partition_runtime_data_t *caller_part_data;
479 uint32_t caller_flags;
480 register uint32_t partition_idx;
481 uint32_t psp;
482 uint32_t partition_psp, partition_psplim;
483 uint32_t partition_state;
484 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800485 struct tfm_state_context_t *svc_ctx;
486 uint32_t caller_partition_id;
487 int32_t client_id;
488 struct iovec_args_t *iovec_args;
489
490 psp = __get_PSP();
491 svc_ctx = (struct tfm_state_context_t *)psp;
492 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
493
494 /* Check partition state consistency */
495 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
496 != (!desc_ptr->ns_caller)) {
497 /* Partition state inconsistency detected */
498 return TFM_SECURE_LOCK_FAILED;
499 }
500
501 partition_idx = get_partition_idx(desc_ptr->sp_id);
502
503 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
504 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
505 partition_state = curr_part_data->partition_state;
506 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800507 caller_partition_id = tfm_spm_partition_get_partition_id(
508 caller_partition_idx);
509
510 if (!tfm_secure_api_initializing) {
511 res = check_partition_state(partition_state, caller_partition_state);
512 if (res != TFM_SUCCESS) {
513 return res;
514 }
515 }
516
517 /* Prepare switch to shared secure partition stack */
518 /* In case the call is coming from the non-secure world, we save the iovecs
Soby Mathew960521a2020-09-29 12:48:50 +0100519 * on the stop of the stack. Also the stack seal is present below this region.
520 * So the memory area, that can actually be used as stack by the partitions
521 * starts at a lower address.
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800522 */
Soby Mathew960521a2020-09-29 12:48:50 +0100523 partition_psp = (uint32_t) tfm_secure_stack_seal;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800524 partition_psplim =
525 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
526
527 /* Store the context for the partition call */
528 tfm_spm_partition_set_caller_partition_idx(partition_idx,
529 caller_partition_idx);
530 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
531
532 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
533 tfm_spm_partition_set_caller_client_id(partition_idx,
534 caller_partition_id);
535 } else {
536 client_id = tfm_nspm_get_current_client_id();
537 if (client_id >= 0) {
538 return TFM_SECURE_LOCK_FAILED;
539 }
540 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
541 }
542
543 /* In level one, only switch context and return from exception if in
544 * handler mode
545 */
546 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
David Hu5da82de2020-12-02 16:41:30 +0800547 if (tfm_spm_partition_set_iovec(partition_idx, iovec_ptr) !=
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800548 SPM_ERR_OK) {
549 return TFM_ERROR_GENERIC;
550 }
551 iovec_args = get_iovec_args_stack_address(partition_idx);
552 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
553
554 /* Prepare the partition context, update stack ptr */
555 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
556 iovec_args,
557 (uint32_t *)partition_psp);
558 __set_PSP(psp);
559 tfm_arch_set_psplim(partition_psplim);
560 }
561
562 tfm_spm_partition_set_state(caller_partition_idx,
563 SPM_PARTITION_STATE_BLOCKED);
564 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
565 tfm_secure_lock++;
566
567 return TFM_SUCCESS;
568}
569
570static enum tfm_status_e tfm_start_partition_for_irq_handling(
571 uint32_t excReturn,
572 struct tfm_state_context_t *svc_ctx)
573{
574 uint32_t handler_partition_id = svc_ctx->r0;
575 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
576 uint32_t irq_signal = svc_ctx->r2;
Kevin Penga20b5af2021-01-11 11:20:52 +0800577 uint32_t irq_line = svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800578 enum tfm_status_e res;
579 uint32_t psp = __get_PSP();
580 uint32_t handler_partition_psp;
581 uint32_t handler_partition_state;
582 uint32_t interrupted_partition_idx =
583 tfm_spm_partition_get_running_partition_idx();
584 const struct spm_partition_runtime_data_t *handler_part_data;
585 uint32_t handler_partition_idx;
586
587 handler_partition_idx = get_partition_idx(handler_partition_id);
588 handler_part_data = tfm_spm_partition_get_runtime_data(
589 handler_partition_idx);
590 handler_partition_state = handler_part_data->partition_state;
591
592 res = check_irq_partition_state(handler_partition_state);
593 if (res != TFM_SUCCESS) {
594 return res;
595 }
596
597 /* set mask for the partition */
598 tfm_spm_partition_set_signal_mask(
599 handler_partition_idx,
600 handler_part_data->signal_mask | irq_signal);
601
602 tfm_spm_hal_disable_irq(irq_line);
603
604 /* save the current context of the interrupted partition */
605 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
606
607 handler_partition_psp = psp;
608
609 /* save the current context of the handler partition */
610 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
611
612 /* Store caller for the partition */
613 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
614 interrupted_partition_idx);
615
616 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
617 (int32_t *)handler_partition_psp);
618 __set_PSP(psp);
619
620 tfm_spm_partition_set_state(interrupted_partition_idx,
621 SPM_PARTITION_STATE_SUSPENDED);
622 tfm_spm_partition_set_state(handler_partition_idx,
623 SPM_PARTITION_STATE_HANDLING_IRQ);
624
625 return TFM_SUCCESS;
626}
627
628static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
629{
630 uint32_t current_partition_idx =
631 tfm_spm_partition_get_running_partition_idx();
632 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800633 uint32_t return_partition_idx;
634 uint32_t return_partition_flags;
635 uint32_t psp = __get_PSP();
636 size_t i;
637 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
638 struct iovec_args_t *iovec_args;
639
640 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
641 return TFM_SECURE_UNLOCK_FAILED;
642 }
643
644 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
645 return_partition_idx = curr_part_data->caller_partition_idx;
646
647 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
648 return TFM_SECURE_UNLOCK_FAILED;
649 }
650
651 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
652
653 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800654
655 tfm_secure_lock--;
656
657 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
658 (tfm_secure_api_initializing)) {
659 /* In TFM level 1 context restore is only done when
660 * returning to NS or after initialization
661 */
662 /* Restore caller context */
663 restore_caller_ctx(svc_ctx,
664 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
665 *excReturn = ret_part_data->lr;
666 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100667 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800668 uint32_t psp_stack_bottom =
669 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
670 tfm_arch_set_psplim(psp_stack_bottom);
671
TTornblom99f0be22019-12-17 16:22:38 +0100672 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800673
674 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
675 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
676 }
677 tfm_clear_iovec_parameters(iovec_args);
678 }
679
680 tfm_spm_partition_cleanup_context(current_partition_idx);
681
682 tfm_spm_partition_set_state(current_partition_idx,
683 SPM_PARTITION_STATE_IDLE);
684 tfm_spm_partition_set_state(return_partition_idx,
685 SPM_PARTITION_STATE_RUNNING);
686
687 return TFM_SUCCESS;
688}
689
690static enum tfm_status_e tfm_return_from_partition_irq_handling(
691 uint32_t *excReturn)
692{
693 uint32_t handler_partition_idx =
694 tfm_spm_partition_get_running_partition_idx();
695 const struct spm_partition_runtime_data_t *handler_part_data;
696 uint32_t interrupted_partition_idx;
697 uint32_t psp = __get_PSP();
698 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
699
700 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
701 return TFM_SECURE_UNLOCK_FAILED;
702 }
703
704 handler_part_data = tfm_spm_partition_get_runtime_data(
705 handler_partition_idx);
706 interrupted_partition_idx = handler_part_data->caller_partition_idx;
707
708 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
709 return TFM_SECURE_UNLOCK_FAILED;
710 }
711
712 /* For level 1, modify PSP, so that the SVC stack frame disappears,
713 * and return to the privileged handler using the stack frame still on the
714 * MSP stack.
715 */
716 *excReturn = svc_ctx->ra;
717 psp += sizeof(struct tfm_state_context_t);
718
719 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
720 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
721
722 __set_PSP(psp);
723
724 return TFM_SUCCESS;
725}
726
727static enum tfm_status_e tfm_check_sfn_req_integrity(
728 const struct tfm_sfn_req_s *desc_ptr)
729{
730 if ((desc_ptr == NULL) ||
731 (desc_ptr->sp_id == 0) ||
732 (desc_ptr->sfn == NULL)) {
733 /* invalid parameter */
734 return TFM_ERROR_INVALID_PARAMETER;
735 }
736 return TFM_SUCCESS;
737}
738
739static enum tfm_status_e tfm_core_check_sfn_req_rules(
740 const struct tfm_sfn_req_s *desc_ptr)
741{
742 /* Check partition idx validity */
743 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
744 return TFM_ERROR_NO_ACTIVE_PARTITION;
745 }
746
747 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
748 /* Secure domain is already locked!
749 * This should only happen if caller is secure partition!
750 */
751 /* This scenario is a potential security breach.
752 * Error is handled in caller.
753 */
754 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
755 }
756
757 if (tfm_secure_api_initializing) {
758 int32_t id =
759 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
760
761 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
762 /* Invalid request during system initialization */
763 ERROR_MSG("Invalid service request during initialization!");
764 return TFM_ERROR_NOT_INITIALIZED;
765 }
766 }
767
768 return TFM_SUCCESS;
769}
770
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800771uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
772{
773 return g_spm_partition_db.partitions[partition_idx].static_data->
774 partition_id;
775}
776
777uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
778{
779 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
780 return TFM_PARTITION_PRIVILEGED_MODE;
781 } else {
782 return TFM_PARTITION_UNPRIVILEGED_MODE;
783 }
784}
785
786bool tfm_is_partition_privileged(uint32_t partition_idx)
787{
788 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
789
790 return tfm_spm_partition_get_privileged_mode(flags) ==
791 TFM_PARTITION_PRIVILEGED_MODE;
792}
793
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800794void tfm_spm_secure_api_init_done(void)
795{
796 tfm_secure_api_initializing = 0;
797}
798
799enum tfm_status_e tfm_spm_sfn_request_handler(
800 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
801{
802 enum tfm_status_e res;
David Hu5da82de2020-12-02 16:41:30 +0800803 struct iovec_params_t iovecs;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800804
805 res = tfm_check_sfn_req_integrity(desc_ptr);
806 if (res != TFM_SUCCESS) {
807 ERROR_MSG("Invalid service request!");
808 tfm_secure_api_error_handler();
809 }
810
811 __disable_irq();
812
813 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
814
David Hu5da82de2020-12-02 16:41:30 +0800815 res = tfm_core_check_sfn_parameters(desc_ptr, &iovecs);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800816 if (res != TFM_SUCCESS) {
817 /* The sanity check of iovecs failed. */
818 __enable_irq();
819 tfm_secure_api_error_handler();
820 }
821
822 res = tfm_core_check_sfn_req_rules(desc_ptr);
823 if (res != TFM_SUCCESS) {
824 /* FixMe: error compartmentalization TBD */
825 tfm_spm_partition_set_state(
826 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
827 __enable_irq();
828 ERROR_MSG("Unauthorized service request!");
829 tfm_secure_api_error_handler();
830 }
831
David Hu5da82de2020-12-02 16:41:30 +0800832 res = tfm_start_partition(desc_ptr, &iovecs, excReturn);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800833 if (res != TFM_SUCCESS) {
834 /* FixMe: consider possible fault scenarios */
835 __enable_irq();
836 ERROR_MSG("Failed to process service request!");
837 tfm_secure_api_error_handler();
838 }
839
840 __enable_irq();
841
842 return res;
843}
844
845int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
846{
847 enum tfm_status_e res;
848 int32_t *args;
849 int32_t retVal;
David Hu5da82de2020-12-02 16:41:30 +0800850 struct iovec_params_t iovecs;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800851
David Hu5da82de2020-12-02 16:41:30 +0800852 res = tfm_core_check_sfn_parameters(desc_ptr, &iovecs);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800853 if (res != TFM_SUCCESS) {
854 /* The sanity check of iovecs failed. */
855 return (int32_t)res;
856 }
857
858 /* No excReturn value is needed as no exception handling is used */
859 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
860
861 if (res != TFM_SUCCESS) {
862 tfm_secure_api_error_handler();
863 }
864
865 /* Secure partition to secure partition call in TFM level 1 */
866 args = desc_ptr->args;
867 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
868
869 /* return handler should restore original exc_return value... */
870 res = tfm_return_from_partition(NULL);
871 if (res == TFM_SUCCESS) {
872 /* If unlock successful, pass SS return value to caller */
873 return retVal;
874 } else {
875 /* Unlock errors indicate ctx database corruption or unknown
876 * anomalies. Halt execution
877 */
878 ERROR_MSG("Secure API error during unlock!");
879 tfm_secure_api_error_handler();
880 }
881 return (int32_t)res;
882}
883
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800884int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
885 void *start_addr,
886 size_t len,
887 uint32_t alignment)
888{
889 uintptr_t start_addr_value = (uintptr_t)start_addr;
890 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
891 uintptr_t alignment_mask;
892
893 alignment_mask = (((uintptr_t)1) << alignment) - 1;
894
895 /* Check that the pointer is aligned properly */
896 if (start_addr_value & alignment_mask) {
897 /* not aligned, return error */
898 return 0;
899 }
900
901 /* Protect against overflow (and zero len) */
902 if (end_addr_value <= start_addr_value) {
903 return 0;
904 }
905
906 /* For privileged partition execution, all secure data memory and stack
907 * is accessible
908 */
909 if (start_addr_value >= S_DATA_START &&
910 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
911 return 1;
912 }
913
914 return 0;
915}
916
917void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
918{
919 uintptr_t result_ptr_value = svc_args[0];
920 uint32_t running_partition_idx =
921 tfm_spm_partition_get_running_partition_idx();
922 const uint32_t running_partition_flags =
923 tfm_spm_partition_get_flags(running_partition_idx);
924 const struct spm_partition_runtime_data_t *curr_part_data =
925 tfm_spm_partition_get_runtime_data(running_partition_idx);
926 int res = 0;
927
928 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
929 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
930 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
931 /* This handler shouldn't be called from outside partition context.
932 * Also if the current partition is handling IRQ, the caller partition
933 * index might not be valid;
934 * Partitions are only allowed to run while S domain is locked.
935 */
936 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
937 return;
938 }
939
940 /* Make sure that the output pointer points to a memory area that is owned
941 * by the partition
942 */
943 res = tfm_spm_check_buffer_access(running_partition_idx,
944 (void *)result_ptr_value,
945 sizeof(curr_part_data->caller_client_id),
946 2);
947 if (!res) {
948 /* Not in accessible range, return error */
949 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
950 return;
951 }
952
953 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
954
955 /* Store return value in r0 */
956 svc_args[0] = (uint32_t)TFM_SUCCESS;
957}
958
959/* This SVC handler is called if veneer is running in thread mode */
960uint32_t tfm_spm_partition_request_svc_handler(
961 const uint32_t *svc_ctx, uint32_t excReturn)
962{
963 struct tfm_sfn_req_s *desc_ptr;
964
965 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
966 /* Service request SVC called with MSP active.
967 * Either invalid configuration for Thread mode or SVC called
968 * from Handler mode, which is not supported.
969 * FixMe: error severity TBD
970 */
971 ERROR_MSG("Service request SVC called with MSP active!");
972 tfm_secure_api_error_handler();
973 }
974
975 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
976
977 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
978 tfm_secure_api_error_handler();
979 }
980
981 return EXC_RETURN_SECURE_FUNCTION;
982}
983
984/* This SVC handler is called, if a thread mode execution environment is to
985 * be set up, to run an unprivileged IRQ handler
986 */
987uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
988{
989 struct tfm_state_context_t *svc_ctx =
990 (struct tfm_state_context_t *)svc_args;
991
992 enum tfm_status_e res;
993
994 if (excReturn & EXC_RETURN_STACK_PROCESS) {
995 /* FixMe: error severity TBD */
996 ERROR_MSG("Partition request SVC called with PSP active!");
997 tfm_secure_api_error_handler();
998 }
999
1000 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
1001 if (res != TFM_SUCCESS) {
1002 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
1003 * its code can be run
1004 */
1005 /* FixMe: For now this case is handled with TF-M panic, however it would
1006 * be possible to skip the execution of the interrupt handler, and
1007 * resume the execution of the interrupted code.
1008 */
1009 tfm_secure_api_error_handler();
1010 }
1011 return EXC_RETURN_SECURE_FUNCTION;
1012}
1013
1014/* This SVC handler is called when sfn returns */
1015uint32_t tfm_spm_partition_return_handler(uint32_t lr)
1016{
1017 enum tfm_status_e res;
1018
1019 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
1020 /* Partition return SVC called with MSP active.
1021 * This should not happen!
1022 */
1023 ERROR_MSG("Partition return SVC called with MSP active!");
1024 tfm_secure_api_error_handler();
1025 }
1026
1027 res = tfm_return_from_partition(&lr);
1028 if (res != TFM_SUCCESS) {
1029 /* Unlock errors indicate ctx database corruption or unknown anomalies
1030 * Halt execution
1031 */
1032 ERROR_MSG("Secure API error during unlock!");
1033 tfm_secure_api_error_handler();
1034 }
1035
1036 return lr;
1037}
1038
1039/* This SVC handler is called if a deprivileged IRQ handler was executed, and
1040 * the execution environment is to be set back for the privileged handler mode
1041 */
1042uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
1043{
1044 enum tfm_status_e res;
Ken Liue0af44c2020-07-25 22:51:30 +08001045 struct tfm_state_context_t *irq_svc_ctx;
1046
1047 /* Take into account the sealed stack*/
1048 irq_svc_args += 2;
1049
1050 irq_svc_ctx = (struct tfm_state_context_t *)irq_svc_args;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001051
1052 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
1053 /* Partition request SVC called with MSP active.
1054 * FixMe: error severity TBD
1055 */
1056 ERROR_MSG("Partition request SVC called with MSP active!");
1057 tfm_secure_api_error_handler();
1058 }
1059
1060 res = tfm_return_from_partition_irq_handling(&lr);
1061 if (res != TFM_SUCCESS) {
1062 /* Unlock errors indicate ctx database corruption or unknown anomalies
1063 * Halt execution
1064 */
1065 ERROR_MSG("Secure API error during unlock!");
1066 tfm_secure_api_error_handler();
1067 }
1068
1069 irq_svc_ctx->ra = lr;
1070
1071 return EXC_RETURN_SECURE_HANDLER;
1072}
1073
1074/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
1075/**
1076 * \brief Return the IRQ line number associated with a signal
1077 *
1078 * \param[in] partition_id The ID of the partition in which we look for the
1079 * signal
1080 * \param[in] signal The signal we do the query for
1081 *
1082 * \retval >=0 The IRQ line number associated with a signal in the partition
1083 * \retval <0 error
1084 */
Kevin Penga20b5af2021-01-11 11:20:52 +08001085static int32_t get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001086 psa_signal_t signal)
1087{
1088 size_t i;
1089
Kevin Peng410bee52021-01-13 16:27:17 +08001090 if (!tfm_is_one_bit_set(signal)) {
1091 return -1;
1092 }
1093
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001094 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1095 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1096 tfm_core_irq_signals[i].signal_value == signal) {
1097 return tfm_core_irq_signals[i].irq_line;
1098 }
1099 }
Kevin Penga20b5af2021-01-11 11:20:52 +08001100 return -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001101}
1102
1103void tfm_spm_enable_irq_handler(uint32_t *svc_args)
1104{
1105 struct tfm_state_context_t *svc_ctx =
1106 (struct tfm_state_context_t *)svc_args;
1107 psa_signal_t irq_signal = svc_ctx->r0;
1108 uint32_t running_partition_idx =
1109 tfm_spm_partition_get_running_partition_idx();
1110 uint32_t running_partition_id =
1111 tfm_spm_partition_get_partition_id(running_partition_idx);
Kevin Penga20b5af2021-01-11 11:20:52 +08001112 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001113
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001114 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1115
1116 if (irq_line < 0) {
1117 /* FixMe: error severity TBD */
1118 tfm_secure_api_error_handler();
1119 }
1120
1121 tfm_spm_hal_enable_irq(irq_line);
1122}
1123
1124void tfm_spm_disable_irq_handler(uint32_t *svc_args)
1125{
1126 struct tfm_state_context_t *svc_ctx =
1127 (struct tfm_state_context_t *)svc_args;
1128 psa_signal_t irq_signal = svc_ctx->r0;
1129 uint32_t running_partition_idx =
1130 tfm_spm_partition_get_running_partition_idx();
1131 uint32_t running_partition_id =
1132 tfm_spm_partition_get_partition_id(running_partition_idx);
Kevin Penga20b5af2021-01-11 11:20:52 +08001133 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001134
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001135 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1136
1137 if (irq_line < 0) {
1138 /* FixMe: error severity TBD */
1139 tfm_secure_api_error_handler();
1140 }
1141
1142 tfm_spm_hal_disable_irq(irq_line);
1143}
1144
1145void tfm_spm_psa_wait(uint32_t *svc_args)
1146{
1147 /* Look for partition that is ready for run */
1148 struct tfm_state_context_t *svc_ctx =
1149 (struct tfm_state_context_t *)svc_args;
1150 uint32_t running_partition_idx;
1151 const struct spm_partition_runtime_data_t *curr_part_data;
1152
1153 psa_signal_t signal_mask = svc_ctx->r0;
1154 uint32_t timeout = svc_ctx->r1;
1155
1156 /*
1157 * Timeout[30:0] are reserved for future use.
1158 * SPM must ignore the value of RES.
1159 */
1160 timeout &= PSA_TIMEOUT_MASK;
1161
1162 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1163 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1164
1165 if (timeout == PSA_BLOCK) {
1166 /* FIXME: Scheduling is not available in library model, and busy wait is
1167 * also not possible as this code is running in SVC context, and it
1168 * cannot be pre-empted by interrupts. So do nothing here for now
1169 */
1170 (void) signal_mask;
1171 }
1172
1173 svc_ctx->r0 = curr_part_data->signal_mask;
1174}
1175
1176void tfm_spm_psa_eoi(uint32_t *svc_args)
1177{
1178 struct tfm_state_context_t *svc_ctx =
1179 (struct tfm_state_context_t *)svc_args;
1180 psa_signal_t irq_signal = svc_ctx->r0;
1181 uint32_t signal_mask;
1182 uint32_t running_partition_idx;
1183 uint32_t running_partition_id;
1184 const struct spm_partition_runtime_data_t *curr_part_data;
Kevin Penga20b5af2021-01-11 11:20:52 +08001185 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001186
1187 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1188 running_partition_id =
1189 tfm_spm_partition_get_partition_id(running_partition_idx);
1190 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1191
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001192 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1193
1194 if (irq_line < 0) {
1195 /* FixMe: error severity TBD */
1196 tfm_secure_api_error_handler();
1197 }
1198
1199 tfm_spm_hal_clear_pending_irq(irq_line);
1200 tfm_spm_hal_enable_irq(irq_line);
1201
1202 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1203 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1204}
Mingyang Sunda01a972019-07-12 17:32:59 +08001205
1206/*
1207 * This function is called when a secure partition causes an error.
1208 * In case of an error in the error handling, a non-zero value have to be
1209 * returned.
1210 */
1211static void tfm_spm_partition_err_handler(
1212 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001213 int32_t err_code)
1214{
Mingyang Sunda01a972019-07-12 17:32:59 +08001215 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001216
Summer Qin423dbef2019-08-22 15:59:35 +08001217 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001218 SPM_PARTITION_STATE_CLOSED);
1219}
1220
1221enum spm_err_t tfm_spm_partition_init(void)
1222{
1223 struct spm_partition_desc_t *part;
1224 struct tfm_sfn_req_s desc;
1225 int32_t args[4] = {0};
1226 int32_t fail_cnt = 0;
1227 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001228 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001229
1230 /* Call the init function for each partition */
1231 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1232 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001233 platform_data_p = part->platform_data_list;
1234 if (platform_data_p != NULL) {
1235 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001236 if (tfm_spm_hal_configure_default_isolation(idx,
1237 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1238 fail_cnt++;
1239 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001240 ++platform_data_p;
1241 }
1242 }
Summer Qin423dbef2019-08-22 15:59:35 +08001243 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001244 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1245 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001246 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001247 } else {
1248 int32_t res;
1249
1250 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001251 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001252 desc.sfn = (sfn_t)part->static_data->partition_init;
1253 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001254 res = tfm_core_sfn_request(&desc);
1255 if (res == TFM_SUCCESS) {
1256 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1257 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001258 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001259 fail_cnt++;
1260 }
1261 }
1262 }
1263
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001264 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001265
1266 if (fail_cnt == 0) {
1267 return SPM_ERR_OK;
1268 } else {
1269 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1270 }
1271}
1272
1273void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1274{
1275 struct spm_partition_runtime_data_t *runtime_data =
1276 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1277 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001278 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001279
1280 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001281
1282 runtime_data->ctx_stack_ptr +=
1283 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001284}
1285
1286void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1287{
1288 struct spm_partition_runtime_data_t *runtime_data =
1289 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1290 struct interrupted_ctx_stack_frame_t *stack_frame;
1291
Matt463ed582019-12-20 12:31:25 +08001292 runtime_data->ctx_stack_ptr -=
1293 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1294
Mingyang Sunda01a972019-07-12 17:32:59 +08001295 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1296 runtime_data->ctx_stack_ptr;
1297 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1298 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001299}
1300
1301void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1302{
1303 struct spm_partition_runtime_data_t *runtime_data =
1304 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1305 struct handler_ctx_stack_frame_t *stack_frame =
1306 (struct handler_ctx_stack_frame_t *)
1307 runtime_data->ctx_stack_ptr;
1308
1309 stack_frame->partition_state = runtime_data->partition_state;
1310 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1311
1312 runtime_data->ctx_stack_ptr +=
1313 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1314}
1315
1316void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1317{
1318 struct spm_partition_runtime_data_t *runtime_data =
1319 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1320 struct handler_ctx_stack_frame_t *stack_frame;
1321
1322 runtime_data->ctx_stack_ptr -=
1323 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1324
1325 stack_frame = (struct handler_ctx_stack_frame_t *)
1326 runtime_data->ctx_stack_ptr;
1327
1328 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1329 stack_frame->partition_state = 0;
1330 tfm_spm_partition_set_caller_partition_idx(
1331 partition_idx, stack_frame->caller_partition_idx);
1332 stack_frame->caller_partition_idx = 0;
1333}
1334
Mingyang Sunda01a972019-07-12 17:32:59 +08001335void tfm_spm_partition_store_context(uint32_t partition_idx,
1336 uint32_t stack_ptr, uint32_t lr)
1337{
1338 g_spm_partition_db.partitions[partition_idx].
1339 runtime_data.stack_ptr = stack_ptr;
1340 g_spm_partition_db.partitions[partition_idx].
1341 runtime_data.lr = lr;
1342}
1343
1344const struct spm_partition_runtime_data_t *
1345 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1346{
1347 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1348}
1349
1350void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1351{
1352 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1353 state;
1354 if (state == SPM_PARTITION_STATE_RUNNING ||
1355 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1356 g_spm_partition_db.running_partition_idx = partition_idx;
1357 }
1358}
1359
1360void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1361 uint32_t caller_partition_idx)
1362{
1363 g_spm_partition_db.partitions[partition_idx].runtime_data.
1364 caller_partition_idx = caller_partition_idx;
1365}
1366
1367void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1368 uint32_t signal_mask)
1369{
1370 g_spm_partition_db.partitions[partition_idx].runtime_data.
1371 signal_mask = signal_mask;
1372}
1373
1374void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1375 int32_t caller_client_id)
1376{
1377 g_spm_partition_db.partitions[partition_idx].runtime_data.
1378 caller_client_id = caller_client_id;
1379}
1380
Mingyang Sunda01a972019-07-12 17:32:59 +08001381uint32_t tfm_spm_partition_get_running_partition_idx(void)
1382{
1383 return g_spm_partition_db.running_partition_idx;
1384}
1385
1386void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1387{
1388 struct spm_partition_desc_t *partition =
1389 &(g_spm_partition_db.partitions[partition_idx]);
1390 int32_t i;
1391
1392 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001393 partition->runtime_data.iovec_args.in_len = 0;
1394 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1395 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1396 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1397 }
1398 partition->runtime_data.iovec_args.out_len = 0;
1399 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1400 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1401 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1402 }
1403 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001404}
Summer Qin830c5542020-02-14 13:44:20 +08001405
1406void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1407{
1408 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1409 uint32_t running_partition_flags = 0;
1410 uint32_t running_partition_idx;
1411
1412 /* Check permissions on request type basis */
1413
1414 switch (svc_ctx->r0) {
1415 case TFM_SPM_REQUEST_RESET_VOTE:
1416 running_partition_idx =
1417 tfm_spm_partition_get_running_partition_idx();
1418 running_partition_flags = tfm_spm_partition_get_flags(
1419 running_partition_idx);
1420
1421 /* Currently only PSA Root of Trust services are allowed to make Reset
1422 * vote request
1423 */
1424 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1425 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1426 }
1427
1428 /* FixMe: this is a placeholder for checks to be performed before
1429 * allowing execution of reset
1430 */
1431 *res_ptr = (uint32_t)TFM_SUCCESS;
1432
1433 break;
1434 default:
1435 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1436 }
1437}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001438
1439enum spm_err_t tfm_spm_db_init(void)
1440{
1441 uint32_t i;
1442
1443 /* This function initialises partition db */
1444
1445 /* For the non secure Execution environment */
1446 tfm_nspm_configure_clients();
1447
1448 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1449 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1450 SPM_PARTITION_STATE_UNINIT;
1451 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1452 SPM_INVALID_PARTITION_IDX;
1453 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1454 TFM_INVALID_CLIENT_ID;
1455 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1456 ctx_stack_list[i];
1457 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1458 g_spm_partition_db.partitions[i].platform_data_list =
1459 platform_data_list_list[i];
1460 }
1461 g_spm_partition_db.is_init = 1;
1462
1463 return SPM_ERR_OK;
1464}