blob: da107f3600c519278b758c68851dd48adc31b192 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
Ken Liu24dffb22021-02-10 11:03:58 +080011#include "bitops.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080012#include "tfm_nspm.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080013#include "tfm_api.h"
14#include "tfm_arch.h"
15#include "tfm_irq_list.h"
16#include "psa/service.h"
17#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080018#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080019#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080020#include "tfm_spm_hal.h"
Soby Mathew960521a2020-09-29 12:48:50 +010021#include "tfm_core_trustzone.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080022#include "spm_func.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080023#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080024#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080025#include "spm_partition_defs.h"
26#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080027#include "tfm/tfm_spm_services.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080028#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080029
David Hu5da82de2020-12-02 16:41:30 +080030/* Structure to temporarily save iovec parameters from PSA client */
31struct iovec_params_t {
32 psa_invec in_vec[PSA_MAX_IOVEC];
33 size_t in_len;
34 psa_outvec out_vec[PSA_MAX_IOVEC];
35 size_t out_len;
36
37 psa_outvec *orig_outvec;
38};
39
Mingyang Sunabb1aab2020-02-18 13:49:08 +080040#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
41#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
42
43#ifndef TFM_LVL
44#error TFM_LVL is not defined!
45#endif
46
Soby Mathew960521a2020-09-29 12:48:50 +010047#ifdef TFM_MULTI_CORE_TOPOLOGY
48#error Multi core is not supported by Function mode
49#endif
50
Mingyang Sunabb1aab2020-02-18 13:49:08 +080051REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
52REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
53
Soby Mathew960521a2020-09-29 12:48:50 +010054static uint32_t *tfm_secure_stack_seal =
55 ((uint32_t *)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1]) - 2;
56
57REGION_DECLARE_T(Image$$, ARM_LIB_STACK_SEAL, $$ZI$$Base, uint32_t);
58
59/*
60 * Function to seal the psp stacks for Function model of TF-M.
61 */
62void tfm_spm_seal_psp_stacks(void)
63{
64 /*
65 * The top of TFM_SECURE_STACK is used for iovec parameters, we need to
66 * place the seal between iovec parameters and partition stack.
67 *
68 * Image$$TFM_SECURE_STACK$$ZI$$Limit-> +-------------------------+
69 * | |
70 * | iovec parameters for |
71 * | partition |
72 * (Image$$TFM_SECURE_STACK$$ZI$$Limit -| |
73 * sizeof(iovec_args_t)) -> +-------------------------+
74 * | Stack Seal |
75 * +-------------------------+
76 * | |
77 * | Partition stack |
78 * | |
79 * Image$$TFM_SECURE_STACK$$ZI$$Base-> +-------------------------+
80 */
81 *(tfm_secure_stack_seal) = TFM_STACK_SEAL_VALUE;
82 *(tfm_secure_stack_seal + 1) = TFM_STACK_SEAL_VALUE;
83
84 /*
85 * Seal the ARM_LIB_STACK by writing the seal value to the reserved
86 * region.
87 */
88 uint32_t *arm_lib_stck_seal_base = (uint32_t *)&REGION_NAME(Image$$,
89 ARM_LIB_STACK_SEAL, $$ZI$$Base);
90
91 *(arm_lib_stck_seal_base) = TFM_STACK_SEAL_VALUE;
92 *(arm_lib_stck_seal_base + 1) = TFM_STACK_SEAL_VALUE;
93}
94
Mingyang Sunabb1aab2020-02-18 13:49:08 +080095/*
96 * This is the "Big Lock" on the secure side, to guarantee single entry
97 * to SPE
98 */
Summer Qin5fdcf632020-06-22 16:49:24 +080099static int32_t tfm_secure_lock;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800100static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +0800101
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800102static uint32_t *prepare_partition_iovec_ctx(
103 const struct tfm_state_context_t *svc_ctx,
104 const struct tfm_sfn_req_s *desc_ptr,
105 const struct iovec_args_t *iovec_args,
106 uint32_t *dst)
107{
108 /* XPSR = as was when called, but make sure it's thread mode */
109 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
110 /* ReturnAddress = resume veneer in new context */
111 *(--dst) = svc_ctx->ra;
112 /* LR = sfn address */
113 *(--dst) = (uint32_t)desc_ptr->sfn;
114 /* R12 = don't care */
115 *(--dst) = 0U;
116
117 /* R0-R3 = sfn arguments */
118 *(--dst) = iovec_args->out_len;
119 *(--dst) = (uint32_t)iovec_args->out_vec;
120 *(--dst) = iovec_args->in_len;
121 *(--dst) = (uint32_t)iovec_args->in_vec;
122
123 return dst;
124}
125
126/**
127 * \brief Create a stack frame that sets the execution environment to thread
128 * mode on exception return.
129 *
130 * \param[in] svc_ctx The stacked SVC context
131 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
132 * \param[in] dst A pointer where the context is to be created. (the
133 * pointer is considered to be a stack pointer, and
134 * the frame is created below it)
135 *
136 * \return A pointer pointing at the created stack frame.
137 */
138static int32_t *prepare_partition_irq_ctx(
139 const struct tfm_state_context_t *svc_ctx,
140 sfn_t unpriv_handler,
141 int32_t *dst)
142{
143 int i;
144
145 /* XPSR = as was when called, but make sure it's thread mode */
146 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
147 /* ReturnAddress = resume to the privileged handler code, but execute it
148 * unprivileged.
149 */
150 *(--dst) = svc_ctx->ra;
151 /* LR = start address */
152 *(--dst) = (int32_t)unpriv_handler;
153
154 /* R12, R0-R3 unused arguments */
155 for (i = 0; i < 5; ++i) {
156 *(--dst) = 0;
157 }
158
159 return dst;
160}
161
162static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
163 struct tfm_state_context_t *target_ctx)
164{
165 /* ReturnAddress = resume veneer after second SVC */
166 target_ctx->ra = svc_ctx->ra;
167
168 /* R0 = function return value */
169 target_ctx->r0 = svc_ctx->r0;
170
171 return;
172}
173
174/**
175 * \brief Check whether the iovec parameters are valid, and the memory ranges
176 * are in the possession of the calling partition.
177 *
David Hu5da82de2020-12-02 16:41:30 +0800178 * \param[in] desc_ptr The secure function request descriptor
179 * \param[out] iovec_ptr The local buffer to store iovec arguments
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800180 *
181 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
182 * otherwise as in /ref tfm_status_e
183 */
184static enum tfm_status_e tfm_core_check_sfn_parameters(
David Hu5da82de2020-12-02 16:41:30 +0800185 const struct tfm_sfn_req_s *desc_ptr,
186 struct iovec_params_t *iovec_ptr)
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800187{
188 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
189 size_t in_len;
190 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
191 size_t out_len;
192 uint32_t i;
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800193 uint32_t privileged_mode = TFM_PARTITION_UNPRIVILEGED_MODE;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800194
195 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
196 return TFM_ERROR_INVALID_PARAMETER;
197 }
198
199 in_len = (size_t)(desc_ptr->args[1]);
200 out_len = (size_t)(desc_ptr->args[3]);
201
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800202 /*
203 * Get caller's privileged mode:
204 * The privileged mode of NS Secure Service caller will be decided by the
205 * tfm_core_has_xxx_access_to_region functions.
206 * Secure caller can be only privileged mode because the whole SPE is
207 * running under privileged mode
208 */
209 if (!desc_ptr->ns_caller) {
210 privileged_mode = TFM_PARTITION_PRIVILEGED_MODE;
211 }
212
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800213 /* The number of vectors are within range. Extra checks to avoid overflow */
214 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
215 (in_len + out_len > PSA_MAX_IOVEC)) {
216 return TFM_ERROR_INVALID_PARAMETER;
217 }
218
219 /* Check whether the caller partition has at write access to the iovec
220 * structures themselves. Use the TT instruction for this.
221 */
222 if (in_len > 0) {
223 if ((in_vec == NULL) ||
224 (tfm_core_has_write_access_to_region(in_vec,
225 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800226 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800227 return TFM_ERROR_INVALID_PARAMETER;
228 }
229 } else {
230 if (in_vec != NULL) {
231 return TFM_ERROR_INVALID_PARAMETER;
232 }
233 }
234 if (out_len > 0) {
235 if ((out_vec == NULL) ||
236 (tfm_core_has_write_access_to_region(out_vec,
237 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800238 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800239 return TFM_ERROR_INVALID_PARAMETER;
240 }
241 } else {
242 if (out_vec != NULL) {
243 return TFM_ERROR_INVALID_PARAMETER;
244 }
245 }
246
David Hu5da82de2020-12-02 16:41:30 +0800247 /* Copy iovec parameters into a local buffer before validating them */
248 iovec_ptr->in_len = in_len;
249 for (i = 0; i < in_len; ++i) {
250 iovec_ptr->in_vec[i].base = in_vec[i].base;
251 iovec_ptr->in_vec[i].len = in_vec[i].len;
252 }
253 iovec_ptr->out_len = out_len;
254 for (i = 0; i < out_len; ++i) {
255 iovec_ptr->out_vec[i].base = out_vec[i].base;
256 iovec_ptr->out_vec[i].len = out_vec[i].len;
257 }
258 iovec_ptr->orig_outvec = out_vec;
259
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800260 /* Check whether the caller partition has access to the data inside the
261 * iovecs
262 */
263 for (i = 0; i < in_len; ++i) {
David Hu5da82de2020-12-02 16:41:30 +0800264 if (iovec_ptr->in_vec[i].len > 0) {
265 if ((iovec_ptr->in_vec[i].base == NULL) ||
266 (tfm_core_has_read_access_to_region(iovec_ptr->in_vec[i].base,
267 iovec_ptr->in_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800268 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800269 return TFM_ERROR_INVALID_PARAMETER;
270 }
271 }
272 }
273 for (i = 0; i < out_len; ++i) {
David Hu5da82de2020-12-02 16:41:30 +0800274 if (iovec_ptr->out_vec[i].len > 0) {
275 if ((iovec_ptr->out_vec[i].base == NULL) ||
276 (tfm_core_has_write_access_to_region(iovec_ptr->out_vec[i].base,
277 iovec_ptr->out_vec[i].len, desc_ptr->ns_caller,
Kevin Peng1f6f5af2020-08-27 15:07:58 +0800278 privileged_mode) != TFM_SUCCESS)) {
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800279 return TFM_ERROR_INVALID_PARAMETER;
280 }
281 }
282 }
283
284 return TFM_SUCCESS;
285}
286
287static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
288 const struct iovec_args_t *source)
289{
290 size_t i;
291
292 /* The vectors have been sanity checked already, and since then the
293 * interrupts have been kept disabled. So we can be sure that the
294 * vectors haven't been tampered with since the check. So it is safe to pass
295 * it to the called partition.
296 */
297
298 target->in_len = source->in_len;
299 for (i = 0; i < source->in_len; ++i) {
300 target->in_vec[i].base = source->in_vec[i].base;
301 target->in_vec[i].len = source->in_vec[i].len;
302 }
303 target->out_len = source->out_len;
304 for (i = 0; i < source->out_len; ++i) {
305 target->out_vec[i].base = source->out_vec[i].base;
306 target->out_vec[i].len = source->out_vec[i].len;
307 }
308}
309
310static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
311{
312 int i;
313
314 args->in_len = 0;
315 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
316 args->in_vec[i].base = NULL;
317 args->in_vec[i].len = 0;
318 }
319 args->out_len = 0;
320 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
321 args->out_vec[i].base = NULL;
322 args->out_vec[i].len = 0;
323 }
324}
325
326/**
327 * \brief Check whether the partitions for the secure function call are in a
328 * proper state.
329 *
330 * \param[in] curr_partition_state State of the partition to be called
331 * \param[in] caller_partition_state State of the caller partition
332 *
333 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
334 */
335static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
336 uint32_t caller_partition_state)
337{
338 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
339 /* Calling partition from non-running state (e.g. during handling IRQ)
340 * is not allowed.
341 */
342 return TFM_ERROR_INVALID_EXC_MODE;
343 }
344
345 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
346 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
347 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
348 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
349 /* Active partitions cannot be called! */
350 return TFM_ERROR_PARTITION_NON_REENTRANT;
351 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
352 /* The partition to be called is not in a proper state */
353 return TFM_SECURE_LOCK_FAILED;
354 }
355 return TFM_SUCCESS;
356}
357
358/**
359 * \brief Check whether the partitions for the secure function call of irq are
360 * in a proper state.
361 *
362 * \param[in] called_partition_state State of the partition to be called
363 *
364 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
365 */
366static enum tfm_status_e check_irq_partition_state(
367 uint32_t called_partition_state)
368{
369 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
370 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
371 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
372 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
373 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
374 return TFM_SUCCESS;
375 }
376 return TFM_SECURE_LOCK_FAILED;
377}
378
379/**
380 * \brief Calculate the address where the iovec parameters are to be saved for
381 * the called partition.
382 *
383 * \param[in] partition_idx The index of the partition to be called.
384 *
385 * \return The address where the iovec parameters should be saved.
386 */
387static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
388{
389 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100390 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800391}
392
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800393/**
394 * \brief Returns the index of the partition with the given partition ID.
395 *
396 * \param[in] partition_id Partition id
397 *
398 * \return the partition idx if partition_id is valid,
399 * \ref SPM_INVALID_PARTITION_IDX othervise
400 */
401static uint32_t get_partition_idx(uint32_t partition_id)
402{
403 uint32_t i;
404
405 if (partition_id == INVALID_PARTITION_ID) {
406 return SPM_INVALID_PARTITION_IDX;
407 }
408
409 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
410 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
411 partition_id) {
412 return i;
413 }
414 }
415 return SPM_INVALID_PARTITION_IDX;
416}
417
418/**
David Hu5da82de2020-12-02 16:41:30 +0800419 * \brief Set the iovec parameters for the partition
420 *
421 * \param[in] partition_idx Partition index
422 * \param[in] iovec_ptr The arguments of the secure function
423 *
424 * \return Error code \ref spm_err_t
425 *
426 * \note This function doesn't check if partition_idx is valid.
427 * \note This function assumes that the iovecs that are passed in iovec_ptr are
428 * valid, and does no sanity check on them at all.
429 */
430static enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
431 const struct iovec_params_t *iovec_ptr)
432{
433 struct spm_partition_runtime_data_t *runtime_data =
434 &g_spm_partition_db.partitions[partition_idx].runtime_data;
435 size_t i;
436
437 if ((iovec_ptr->in_len < 0) || (iovec_ptr->out_len < 0)) {
438 return SPM_ERR_INVALID_PARAMETER;
439 }
440
441 runtime_data->iovec_args.in_len = iovec_ptr->in_len;
442 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
443 runtime_data->iovec_args.in_vec[i].base = iovec_ptr->in_vec[i].base;
444 runtime_data->iovec_args.in_vec[i].len = iovec_ptr->in_vec[i].len;
445 }
446 runtime_data->iovec_args.out_len = iovec_ptr->out_len;
447 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
448 runtime_data->iovec_args.out_vec[i].base = iovec_ptr->out_vec[i].base;
449 runtime_data->iovec_args.out_vec[i].len = iovec_ptr->out_vec[i].len;
450 }
451 runtime_data->orig_outvec = iovec_ptr->orig_outvec;
452
453 return SPM_ERR_OK;
454}
455
456/**
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800457 * \brief Get the flags associated with a partition
458 *
459 * \param[in] partition_idx Partition index
460 *
461 * \return Flags associated with the partition
462 *
463 * \note This function doesn't check if partition_idx is valid.
464 */
465static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
466{
467 return g_spm_partition_db.partitions[partition_idx].static_data->
468 partition_flags;
469}
470
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800471static enum tfm_status_e tfm_start_partition(
David Hu5da82de2020-12-02 16:41:30 +0800472 const struct tfm_sfn_req_s *desc_ptr,
473 const struct iovec_params_t *iovec_ptr,
474 uint32_t excReturn)
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800475{
476 enum tfm_status_e res;
477 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
478 const struct spm_partition_runtime_data_t *curr_part_data;
479 const struct spm_partition_runtime_data_t *caller_part_data;
480 uint32_t caller_flags;
481 register uint32_t partition_idx;
482 uint32_t psp;
483 uint32_t partition_psp, partition_psplim;
484 uint32_t partition_state;
485 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800486 struct tfm_state_context_t *svc_ctx;
487 uint32_t caller_partition_id;
488 int32_t client_id;
489 struct iovec_args_t *iovec_args;
490
491 psp = __get_PSP();
492 svc_ctx = (struct tfm_state_context_t *)psp;
493 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
494
495 /* Check partition state consistency */
496 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
497 != (!desc_ptr->ns_caller)) {
498 /* Partition state inconsistency detected */
499 return TFM_SECURE_LOCK_FAILED;
500 }
501
502 partition_idx = get_partition_idx(desc_ptr->sp_id);
503
504 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
505 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
506 partition_state = curr_part_data->partition_state;
507 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800508 caller_partition_id = tfm_spm_partition_get_partition_id(
509 caller_partition_idx);
510
511 if (!tfm_secure_api_initializing) {
512 res = check_partition_state(partition_state, caller_partition_state);
513 if (res != TFM_SUCCESS) {
514 return res;
515 }
516 }
517
518 /* Prepare switch to shared secure partition stack */
519 /* In case the call is coming from the non-secure world, we save the iovecs
Soby Mathew960521a2020-09-29 12:48:50 +0100520 * on the stop of the stack. Also the stack seal is present below this region.
521 * So the memory area, that can actually be used as stack by the partitions
522 * starts at a lower address.
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800523 */
Soby Mathew960521a2020-09-29 12:48:50 +0100524 partition_psp = (uint32_t) tfm_secure_stack_seal;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800525 partition_psplim =
526 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
527
528 /* Store the context for the partition call */
529 tfm_spm_partition_set_caller_partition_idx(partition_idx,
530 caller_partition_idx);
531 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
532
533 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
534 tfm_spm_partition_set_caller_client_id(partition_idx,
535 caller_partition_id);
536 } else {
537 client_id = tfm_nspm_get_current_client_id();
538 if (client_id >= 0) {
539 return TFM_SECURE_LOCK_FAILED;
540 }
541 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
542 }
543
544 /* In level one, only switch context and return from exception if in
545 * handler mode
546 */
547 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
David Hu5da82de2020-12-02 16:41:30 +0800548 if (tfm_spm_partition_set_iovec(partition_idx, iovec_ptr) !=
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800549 SPM_ERR_OK) {
550 return TFM_ERROR_GENERIC;
551 }
552 iovec_args = get_iovec_args_stack_address(partition_idx);
553 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
554
555 /* Prepare the partition context, update stack ptr */
556 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
557 iovec_args,
558 (uint32_t *)partition_psp);
559 __set_PSP(psp);
560 tfm_arch_set_psplim(partition_psplim);
561 }
562
563 tfm_spm_partition_set_state(caller_partition_idx,
564 SPM_PARTITION_STATE_BLOCKED);
565 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
566 tfm_secure_lock++;
567
568 return TFM_SUCCESS;
569}
570
571static enum tfm_status_e tfm_start_partition_for_irq_handling(
572 uint32_t excReturn,
573 struct tfm_state_context_t *svc_ctx)
574{
575 uint32_t handler_partition_id = svc_ctx->r0;
576 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
577 uint32_t irq_signal = svc_ctx->r2;
Kevin Penga20b5af2021-01-11 11:20:52 +0800578 uint32_t irq_line = svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800579 enum tfm_status_e res;
580 uint32_t psp = __get_PSP();
581 uint32_t handler_partition_psp;
582 uint32_t handler_partition_state;
583 uint32_t interrupted_partition_idx =
584 tfm_spm_partition_get_running_partition_idx();
585 const struct spm_partition_runtime_data_t *handler_part_data;
586 uint32_t handler_partition_idx;
587
588 handler_partition_idx = get_partition_idx(handler_partition_id);
589 handler_part_data = tfm_spm_partition_get_runtime_data(
590 handler_partition_idx);
591 handler_partition_state = handler_part_data->partition_state;
592
593 res = check_irq_partition_state(handler_partition_state);
594 if (res != TFM_SUCCESS) {
595 return res;
596 }
597
598 /* set mask for the partition */
599 tfm_spm_partition_set_signal_mask(
600 handler_partition_idx,
601 handler_part_data->signal_mask | irq_signal);
602
603 tfm_spm_hal_disable_irq(irq_line);
604
605 /* save the current context of the interrupted partition */
606 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
607
608 handler_partition_psp = psp;
609
610 /* save the current context of the handler partition */
611 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
612
613 /* Store caller for the partition */
614 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
615 interrupted_partition_idx);
616
617 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
618 (int32_t *)handler_partition_psp);
619 __set_PSP(psp);
620
621 tfm_spm_partition_set_state(interrupted_partition_idx,
622 SPM_PARTITION_STATE_SUSPENDED);
623 tfm_spm_partition_set_state(handler_partition_idx,
624 SPM_PARTITION_STATE_HANDLING_IRQ);
625
626 return TFM_SUCCESS;
627}
628
629static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
630{
631 uint32_t current_partition_idx =
632 tfm_spm_partition_get_running_partition_idx();
633 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800634 uint32_t return_partition_idx;
635 uint32_t return_partition_flags;
636 uint32_t psp = __get_PSP();
637 size_t i;
638 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
639 struct iovec_args_t *iovec_args;
640
641 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
642 return TFM_SECURE_UNLOCK_FAILED;
643 }
644
645 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
646 return_partition_idx = curr_part_data->caller_partition_idx;
647
648 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
649 return TFM_SECURE_UNLOCK_FAILED;
650 }
651
652 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
653
654 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800655
656 tfm_secure_lock--;
657
658 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
659 (tfm_secure_api_initializing)) {
660 /* In TFM level 1 context restore is only done when
661 * returning to NS or after initialization
662 */
663 /* Restore caller context */
664 restore_caller_ctx(svc_ctx,
665 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
666 *excReturn = ret_part_data->lr;
667 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100668 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800669 uint32_t psp_stack_bottom =
670 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
671 tfm_arch_set_psplim(psp_stack_bottom);
672
TTornblom99f0be22019-12-17 16:22:38 +0100673 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800674
675 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
676 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
677 }
678 tfm_clear_iovec_parameters(iovec_args);
679 }
680
681 tfm_spm_partition_cleanup_context(current_partition_idx);
682
683 tfm_spm_partition_set_state(current_partition_idx,
684 SPM_PARTITION_STATE_IDLE);
685 tfm_spm_partition_set_state(return_partition_idx,
686 SPM_PARTITION_STATE_RUNNING);
687
688 return TFM_SUCCESS;
689}
690
691static enum tfm_status_e tfm_return_from_partition_irq_handling(
692 uint32_t *excReturn)
693{
694 uint32_t handler_partition_idx =
695 tfm_spm_partition_get_running_partition_idx();
696 const struct spm_partition_runtime_data_t *handler_part_data;
697 uint32_t interrupted_partition_idx;
698 uint32_t psp = __get_PSP();
699 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
700
701 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
702 return TFM_SECURE_UNLOCK_FAILED;
703 }
704
705 handler_part_data = tfm_spm_partition_get_runtime_data(
706 handler_partition_idx);
707 interrupted_partition_idx = handler_part_data->caller_partition_idx;
708
709 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
710 return TFM_SECURE_UNLOCK_FAILED;
711 }
712
713 /* For level 1, modify PSP, so that the SVC stack frame disappears,
714 * and return to the privileged handler using the stack frame still on the
715 * MSP stack.
716 */
717 *excReturn = svc_ctx->ra;
718 psp += sizeof(struct tfm_state_context_t);
719
720 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
721 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
722
723 __set_PSP(psp);
724
725 return TFM_SUCCESS;
726}
727
728static enum tfm_status_e tfm_check_sfn_req_integrity(
729 const struct tfm_sfn_req_s *desc_ptr)
730{
731 if ((desc_ptr == NULL) ||
732 (desc_ptr->sp_id == 0) ||
733 (desc_ptr->sfn == NULL)) {
734 /* invalid parameter */
735 return TFM_ERROR_INVALID_PARAMETER;
736 }
737 return TFM_SUCCESS;
738}
739
740static enum tfm_status_e tfm_core_check_sfn_req_rules(
741 const struct tfm_sfn_req_s *desc_ptr)
742{
743 /* Check partition idx validity */
744 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
745 return TFM_ERROR_NO_ACTIVE_PARTITION;
746 }
747
748 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
749 /* Secure domain is already locked!
750 * This should only happen if caller is secure partition!
751 */
752 /* This scenario is a potential security breach.
753 * Error is handled in caller.
754 */
755 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
756 }
757
758 if (tfm_secure_api_initializing) {
759 int32_t id =
760 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
761
762 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
763 /* Invalid request during system initialization */
764 ERROR_MSG("Invalid service request during initialization!");
765 return TFM_ERROR_NOT_INITIALIZED;
766 }
767 }
768
769 return TFM_SUCCESS;
770}
771
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800772uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
773{
774 return g_spm_partition_db.partitions[partition_idx].static_data->
775 partition_id;
776}
777
778uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
779{
780 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
781 return TFM_PARTITION_PRIVILEGED_MODE;
782 } else {
783 return TFM_PARTITION_UNPRIVILEGED_MODE;
784 }
785}
786
787bool tfm_is_partition_privileged(uint32_t partition_idx)
788{
789 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
790
791 return tfm_spm_partition_get_privileged_mode(flags) ==
792 TFM_PARTITION_PRIVILEGED_MODE;
793}
794
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800795void tfm_spm_secure_api_init_done(void)
796{
797 tfm_secure_api_initializing = 0;
798}
799
800enum tfm_status_e tfm_spm_sfn_request_handler(
801 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
802{
803 enum tfm_status_e res;
David Hu5da82de2020-12-02 16:41:30 +0800804 struct iovec_params_t iovecs;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800805
806 res = tfm_check_sfn_req_integrity(desc_ptr);
807 if (res != TFM_SUCCESS) {
808 ERROR_MSG("Invalid service request!");
809 tfm_secure_api_error_handler();
810 }
811
812 __disable_irq();
813
814 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
815
David Hu5da82de2020-12-02 16:41:30 +0800816 res = tfm_core_check_sfn_parameters(desc_ptr, &iovecs);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800817 if (res != TFM_SUCCESS) {
818 /* The sanity check of iovecs failed. */
819 __enable_irq();
820 tfm_secure_api_error_handler();
821 }
822
823 res = tfm_core_check_sfn_req_rules(desc_ptr);
824 if (res != TFM_SUCCESS) {
825 /* FixMe: error compartmentalization TBD */
826 tfm_spm_partition_set_state(
827 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
828 __enable_irq();
829 ERROR_MSG("Unauthorized service request!");
830 tfm_secure_api_error_handler();
831 }
832
David Hu5da82de2020-12-02 16:41:30 +0800833 res = tfm_start_partition(desc_ptr, &iovecs, excReturn);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800834 if (res != TFM_SUCCESS) {
835 /* FixMe: consider possible fault scenarios */
836 __enable_irq();
837 ERROR_MSG("Failed to process service request!");
838 tfm_secure_api_error_handler();
839 }
840
841 __enable_irq();
842
843 return res;
844}
845
846int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
847{
848 enum tfm_status_e res;
849 int32_t *args;
850 int32_t retVal;
David Hu5da82de2020-12-02 16:41:30 +0800851 struct iovec_params_t iovecs;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800852
David Hu5da82de2020-12-02 16:41:30 +0800853 res = tfm_core_check_sfn_parameters(desc_ptr, &iovecs);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800854 if (res != TFM_SUCCESS) {
855 /* The sanity check of iovecs failed. */
856 return (int32_t)res;
857 }
858
859 /* No excReturn value is needed as no exception handling is used */
860 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
861
862 if (res != TFM_SUCCESS) {
863 tfm_secure_api_error_handler();
864 }
865
866 /* Secure partition to secure partition call in TFM level 1 */
867 args = desc_ptr->args;
868 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
869
870 /* return handler should restore original exc_return value... */
871 res = tfm_return_from_partition(NULL);
872 if (res == TFM_SUCCESS) {
873 /* If unlock successful, pass SS return value to caller */
874 return retVal;
875 } else {
876 /* Unlock errors indicate ctx database corruption or unknown
877 * anomalies. Halt execution
878 */
879 ERROR_MSG("Secure API error during unlock!");
880 tfm_secure_api_error_handler();
881 }
882 return (int32_t)res;
883}
884
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800885int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
886 void *start_addr,
887 size_t len,
888 uint32_t alignment)
889{
890 uintptr_t start_addr_value = (uintptr_t)start_addr;
891 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
892 uintptr_t alignment_mask;
893
894 alignment_mask = (((uintptr_t)1) << alignment) - 1;
895
896 /* Check that the pointer is aligned properly */
897 if (start_addr_value & alignment_mask) {
898 /* not aligned, return error */
899 return 0;
900 }
901
902 /* Protect against overflow (and zero len) */
903 if (end_addr_value <= start_addr_value) {
904 return 0;
905 }
906
907 /* For privileged partition execution, all secure data memory and stack
908 * is accessible
909 */
910 if (start_addr_value >= S_DATA_START &&
911 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
912 return 1;
913 }
914
915 return 0;
916}
917
918void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
919{
920 uintptr_t result_ptr_value = svc_args[0];
921 uint32_t running_partition_idx =
922 tfm_spm_partition_get_running_partition_idx();
923 const uint32_t running_partition_flags =
924 tfm_spm_partition_get_flags(running_partition_idx);
925 const struct spm_partition_runtime_data_t *curr_part_data =
926 tfm_spm_partition_get_runtime_data(running_partition_idx);
927 int res = 0;
928
929 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
930 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
931 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
932 /* This handler shouldn't be called from outside partition context.
933 * Also if the current partition is handling IRQ, the caller partition
934 * index might not be valid;
935 * Partitions are only allowed to run while S domain is locked.
936 */
937 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
938 return;
939 }
940
941 /* Make sure that the output pointer points to a memory area that is owned
942 * by the partition
943 */
944 res = tfm_spm_check_buffer_access(running_partition_idx,
945 (void *)result_ptr_value,
946 sizeof(curr_part_data->caller_client_id),
947 2);
948 if (!res) {
949 /* Not in accessible range, return error */
950 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
951 return;
952 }
953
954 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
955
956 /* Store return value in r0 */
957 svc_args[0] = (uint32_t)TFM_SUCCESS;
958}
959
960/* This SVC handler is called if veneer is running in thread mode */
961uint32_t tfm_spm_partition_request_svc_handler(
962 const uint32_t *svc_ctx, uint32_t excReturn)
963{
964 struct tfm_sfn_req_s *desc_ptr;
965
966 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
967 /* Service request SVC called with MSP active.
968 * Either invalid configuration for Thread mode or SVC called
969 * from Handler mode, which is not supported.
970 * FixMe: error severity TBD
971 */
972 ERROR_MSG("Service request SVC called with MSP active!");
973 tfm_secure_api_error_handler();
974 }
975
976 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
977
978 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
979 tfm_secure_api_error_handler();
980 }
981
982 return EXC_RETURN_SECURE_FUNCTION;
983}
984
985/* This SVC handler is called, if a thread mode execution environment is to
986 * be set up, to run an unprivileged IRQ handler
987 */
988uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
989{
990 struct tfm_state_context_t *svc_ctx =
991 (struct tfm_state_context_t *)svc_args;
992
993 enum tfm_status_e res;
994
995 if (excReturn & EXC_RETURN_STACK_PROCESS) {
996 /* FixMe: error severity TBD */
997 ERROR_MSG("Partition request SVC called with PSP active!");
998 tfm_secure_api_error_handler();
999 }
1000
1001 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
1002 if (res != TFM_SUCCESS) {
1003 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
1004 * its code can be run
1005 */
1006 /* FixMe: For now this case is handled with TF-M panic, however it would
1007 * be possible to skip the execution of the interrupt handler, and
1008 * resume the execution of the interrupted code.
1009 */
1010 tfm_secure_api_error_handler();
1011 }
1012 return EXC_RETURN_SECURE_FUNCTION;
1013}
1014
1015/* This SVC handler is called when sfn returns */
1016uint32_t tfm_spm_partition_return_handler(uint32_t lr)
1017{
1018 enum tfm_status_e res;
1019
1020 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
1021 /* Partition return SVC called with MSP active.
1022 * This should not happen!
1023 */
1024 ERROR_MSG("Partition return SVC called with MSP active!");
1025 tfm_secure_api_error_handler();
1026 }
1027
1028 res = tfm_return_from_partition(&lr);
1029 if (res != TFM_SUCCESS) {
1030 /* Unlock errors indicate ctx database corruption or unknown anomalies
1031 * Halt execution
1032 */
1033 ERROR_MSG("Secure API error during unlock!");
1034 tfm_secure_api_error_handler();
1035 }
1036
1037 return lr;
1038}
1039
1040/* This SVC handler is called if a deprivileged IRQ handler was executed, and
1041 * the execution environment is to be set back for the privileged handler mode
1042 */
1043uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
1044{
1045 enum tfm_status_e res;
Ken Liue0af44c2020-07-25 22:51:30 +08001046 struct tfm_state_context_t *irq_svc_ctx;
1047
1048 /* Take into account the sealed stack*/
1049 irq_svc_args += 2;
1050
1051 irq_svc_ctx = (struct tfm_state_context_t *)irq_svc_args;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001052
1053 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
1054 /* Partition request SVC called with MSP active.
1055 * FixMe: error severity TBD
1056 */
1057 ERROR_MSG("Partition request SVC called with MSP active!");
1058 tfm_secure_api_error_handler();
1059 }
1060
1061 res = tfm_return_from_partition_irq_handling(&lr);
1062 if (res != TFM_SUCCESS) {
1063 /* Unlock errors indicate ctx database corruption or unknown anomalies
1064 * Halt execution
1065 */
1066 ERROR_MSG("Secure API error during unlock!");
1067 tfm_secure_api_error_handler();
1068 }
1069
1070 irq_svc_ctx->ra = lr;
1071
1072 return EXC_RETURN_SECURE_HANDLER;
1073}
1074
1075/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
1076/**
1077 * \brief Return the IRQ line number associated with a signal
1078 *
1079 * \param[in] partition_id The ID of the partition in which we look for the
1080 * signal
1081 * \param[in] signal The signal we do the query for
1082 *
1083 * \retval >=0 The IRQ line number associated with a signal in the partition
1084 * \retval <0 error
1085 */
Kevin Penga20b5af2021-01-11 11:20:52 +08001086static int32_t get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001087 psa_signal_t signal)
1088{
1089 size_t i;
1090
Ken Liu24dffb22021-02-10 11:03:58 +08001091 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng410bee52021-01-13 16:27:17 +08001092 return -1;
1093 }
1094
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001095 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1096 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1097 tfm_core_irq_signals[i].signal_value == signal) {
1098 return tfm_core_irq_signals[i].irq_line;
1099 }
1100 }
Kevin Penga20b5af2021-01-11 11:20:52 +08001101 return -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001102}
1103
1104void tfm_spm_enable_irq_handler(uint32_t *svc_args)
1105{
1106 struct tfm_state_context_t *svc_ctx =
1107 (struct tfm_state_context_t *)svc_args;
1108 psa_signal_t irq_signal = svc_ctx->r0;
1109 uint32_t running_partition_idx =
1110 tfm_spm_partition_get_running_partition_idx();
1111 uint32_t running_partition_id =
1112 tfm_spm_partition_get_partition_id(running_partition_idx);
Kevin Penga20b5af2021-01-11 11:20:52 +08001113 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001114
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001115 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1116
1117 if (irq_line < 0) {
1118 /* FixMe: error severity TBD */
1119 tfm_secure_api_error_handler();
1120 }
1121
1122 tfm_spm_hal_enable_irq(irq_line);
1123}
1124
1125void tfm_spm_disable_irq_handler(uint32_t *svc_args)
1126{
1127 struct tfm_state_context_t *svc_ctx =
1128 (struct tfm_state_context_t *)svc_args;
1129 psa_signal_t irq_signal = svc_ctx->r0;
1130 uint32_t running_partition_idx =
1131 tfm_spm_partition_get_running_partition_idx();
1132 uint32_t running_partition_id =
1133 tfm_spm_partition_get_partition_id(running_partition_idx);
Kevin Penga20b5af2021-01-11 11:20:52 +08001134 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001135
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001136 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1137
1138 if (irq_line < 0) {
1139 /* FixMe: error severity TBD */
1140 tfm_secure_api_error_handler();
1141 }
1142
1143 tfm_spm_hal_disable_irq(irq_line);
1144}
1145
1146void tfm_spm_psa_wait(uint32_t *svc_args)
1147{
1148 /* Look for partition that is ready for run */
1149 struct tfm_state_context_t *svc_ctx =
1150 (struct tfm_state_context_t *)svc_args;
1151 uint32_t running_partition_idx;
1152 const struct spm_partition_runtime_data_t *curr_part_data;
1153
1154 psa_signal_t signal_mask = svc_ctx->r0;
1155 uint32_t timeout = svc_ctx->r1;
1156
1157 /*
1158 * Timeout[30:0] are reserved for future use.
1159 * SPM must ignore the value of RES.
1160 */
1161 timeout &= PSA_TIMEOUT_MASK;
1162
1163 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1164 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1165
1166 if (timeout == PSA_BLOCK) {
1167 /* FIXME: Scheduling is not available in library model, and busy wait is
1168 * also not possible as this code is running in SVC context, and it
1169 * cannot be pre-empted by interrupts. So do nothing here for now
1170 */
1171 (void) signal_mask;
1172 }
1173
1174 svc_ctx->r0 = curr_part_data->signal_mask;
1175}
1176
1177void tfm_spm_psa_eoi(uint32_t *svc_args)
1178{
1179 struct tfm_state_context_t *svc_ctx =
1180 (struct tfm_state_context_t *)svc_args;
1181 psa_signal_t irq_signal = svc_ctx->r0;
1182 uint32_t signal_mask;
1183 uint32_t running_partition_idx;
1184 uint32_t running_partition_id;
1185 const struct spm_partition_runtime_data_t *curr_part_data;
Kevin Penga20b5af2021-01-11 11:20:52 +08001186 int32_t irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001187
1188 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1189 running_partition_id =
1190 tfm_spm_partition_get_partition_id(running_partition_idx);
1191 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1192
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001193 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1194
1195 if (irq_line < 0) {
1196 /* FixMe: error severity TBD */
1197 tfm_secure_api_error_handler();
1198 }
1199
1200 tfm_spm_hal_clear_pending_irq(irq_line);
1201 tfm_spm_hal_enable_irq(irq_line);
1202
1203 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1204 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1205}
Mingyang Sunda01a972019-07-12 17:32:59 +08001206
1207/*
1208 * This function is called when a secure partition causes an error.
1209 * In case of an error in the error handling, a non-zero value have to be
1210 * returned.
1211 */
Xinyu Zhanga8820de2021-01-25 16:41:50 +08001212static void tfm_spm_partition_err_handler(const uint32_t idx, int32_t errcode)
Mingyang Sunda01a972019-07-12 17:32:59 +08001213{
Xinyu Zhanga8820de2021-01-25 16:41:50 +08001214 (void)errcode;
Ken Liuf250b8b2019-12-27 16:31:24 +08001215
Xinyu Zhanga8820de2021-01-25 16:41:50 +08001216 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_CLOSED);
Mingyang Sunda01a972019-07-12 17:32:59 +08001217}
1218
1219enum spm_err_t tfm_spm_partition_init(void)
1220{
1221 struct spm_partition_desc_t *part;
1222 struct tfm_sfn_req_s desc;
1223 int32_t args[4] = {0};
1224 int32_t fail_cnt = 0;
1225 uint32_t idx;
Ken Liu172f1e32021-02-05 16:31:03 +08001226 const struct platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001227
1228 /* Call the init function for each partition */
1229 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1230 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001231 platform_data_p = part->platform_data_list;
1232 if (platform_data_p != NULL) {
1233 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001234 if (tfm_spm_hal_configure_default_isolation(idx,
1235 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1236 fail_cnt++;
1237 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001238 ++platform_data_p;
1239 }
1240 }
Summer Qin423dbef2019-08-22 15:59:35 +08001241 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001242 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1243 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001244 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001245 } else {
1246 int32_t res;
1247
1248 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001249 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001250 desc.sfn = (sfn_t)part->static_data->partition_init;
1251 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001252 res = tfm_core_sfn_request(&desc);
1253 if (res == TFM_SUCCESS) {
1254 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1255 } else {
Xinyu Zhanga8820de2021-01-25 16:41:50 +08001256 tfm_spm_partition_err_handler(idx, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001257 fail_cnt++;
1258 }
1259 }
1260 }
1261
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001262 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001263
1264 if (fail_cnt == 0) {
1265 return SPM_ERR_OK;
1266 } else {
1267 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1268 }
1269}
1270
1271void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1272{
1273 struct spm_partition_runtime_data_t *runtime_data =
1274 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1275 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001276 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001277
1278 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001279
1280 runtime_data->ctx_stack_ptr +=
1281 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001282}
1283
1284void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1285{
1286 struct spm_partition_runtime_data_t *runtime_data =
1287 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1288 struct interrupted_ctx_stack_frame_t *stack_frame;
1289
Matt463ed582019-12-20 12:31:25 +08001290 runtime_data->ctx_stack_ptr -=
1291 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1292
Mingyang Sunda01a972019-07-12 17:32:59 +08001293 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1294 runtime_data->ctx_stack_ptr;
1295 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1296 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001297}
1298
1299void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1300{
1301 struct spm_partition_runtime_data_t *runtime_data =
1302 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1303 struct handler_ctx_stack_frame_t *stack_frame =
1304 (struct handler_ctx_stack_frame_t *)
1305 runtime_data->ctx_stack_ptr;
1306
1307 stack_frame->partition_state = runtime_data->partition_state;
1308 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1309
1310 runtime_data->ctx_stack_ptr +=
1311 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1312}
1313
1314void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1315{
1316 struct spm_partition_runtime_data_t *runtime_data =
1317 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1318 struct handler_ctx_stack_frame_t *stack_frame;
1319
1320 runtime_data->ctx_stack_ptr -=
1321 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1322
1323 stack_frame = (struct handler_ctx_stack_frame_t *)
1324 runtime_data->ctx_stack_ptr;
1325
1326 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1327 stack_frame->partition_state = 0;
1328 tfm_spm_partition_set_caller_partition_idx(
1329 partition_idx, stack_frame->caller_partition_idx);
1330 stack_frame->caller_partition_idx = 0;
1331}
1332
Mingyang Sunda01a972019-07-12 17:32:59 +08001333void tfm_spm_partition_store_context(uint32_t partition_idx,
1334 uint32_t stack_ptr, uint32_t lr)
1335{
1336 g_spm_partition_db.partitions[partition_idx].
1337 runtime_data.stack_ptr = stack_ptr;
1338 g_spm_partition_db.partitions[partition_idx].
1339 runtime_data.lr = lr;
1340}
1341
1342const struct spm_partition_runtime_data_t *
1343 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1344{
1345 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1346}
1347
1348void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1349{
1350 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1351 state;
1352 if (state == SPM_PARTITION_STATE_RUNNING ||
1353 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1354 g_spm_partition_db.running_partition_idx = partition_idx;
1355 }
1356}
1357
1358void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1359 uint32_t caller_partition_idx)
1360{
1361 g_spm_partition_db.partitions[partition_idx].runtime_data.
1362 caller_partition_idx = caller_partition_idx;
1363}
1364
1365void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1366 uint32_t signal_mask)
1367{
1368 g_spm_partition_db.partitions[partition_idx].runtime_data.
1369 signal_mask = signal_mask;
1370}
1371
1372void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1373 int32_t caller_client_id)
1374{
1375 g_spm_partition_db.partitions[partition_idx].runtime_data.
1376 caller_client_id = caller_client_id;
1377}
1378
Mingyang Sunda01a972019-07-12 17:32:59 +08001379uint32_t tfm_spm_partition_get_running_partition_idx(void)
1380{
1381 return g_spm_partition_db.running_partition_idx;
1382}
1383
1384void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1385{
1386 struct spm_partition_desc_t *partition =
1387 &(g_spm_partition_db.partitions[partition_idx]);
1388 int32_t i;
1389
1390 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001391 partition->runtime_data.iovec_args.in_len = 0;
1392 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1393 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1394 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1395 }
1396 partition->runtime_data.iovec_args.out_len = 0;
1397 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1398 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1399 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1400 }
1401 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001402}
Summer Qin830c5542020-02-14 13:44:20 +08001403
1404void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1405{
1406 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1407 uint32_t running_partition_flags = 0;
1408 uint32_t running_partition_idx;
1409
1410 /* Check permissions on request type basis */
1411
1412 switch (svc_ctx->r0) {
1413 case TFM_SPM_REQUEST_RESET_VOTE:
1414 running_partition_idx =
1415 tfm_spm_partition_get_running_partition_idx();
1416 running_partition_flags = tfm_spm_partition_get_flags(
1417 running_partition_idx);
1418
1419 /* Currently only PSA Root of Trust services are allowed to make Reset
1420 * vote request
1421 */
1422 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1423 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1424 }
1425
1426 /* FixMe: this is a placeholder for checks to be performed before
1427 * allowing execution of reset
1428 */
1429 *res_ptr = (uint32_t)TFM_SUCCESS;
1430
1431 break;
1432 default:
1433 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1434 }
1435}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001436
1437enum spm_err_t tfm_spm_db_init(void)
1438{
1439 uint32_t i;
1440
1441 /* This function initialises partition db */
1442
1443 /* For the non secure Execution environment */
1444 tfm_nspm_configure_clients();
1445
1446 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1447 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1448 SPM_PARTITION_STATE_UNINIT;
1449 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1450 SPM_INVALID_PARTITION_IDX;
1451 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1452 TFM_INVALID_CLIENT_ID;
1453 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1454 ctx_stack_list[i];
1455 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1456 g_spm_partition_db.partitions[i].platform_data_list =
1457 platform_data_list_list[i];
1458 }
1459 g_spm_partition_db.is_init = 1;
1460
1461 return SPM_ERR_OK;
1462}