blob: 9f040b8805782dd4ae09a2ef438981546baa97e5 [file] [log] [blame]
Mingyang Sunda01a972019-07-12 17:32:59 +08001/*
Mingyang Sunabb1aab2020-02-18 13:49:08 +08002 * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
Mingyang Sunda01a972019-07-12 17:32:59 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Sunabb1aab2020-02-18 13:49:08 +08008#include <stdint.h>
Mingyang Sunda01a972019-07-12 17:32:59 +08009#include <stdbool.h>
Mingyang Sunabb1aab2020-02-18 13:49:08 +080010#include <arm_cmse.h>
11#include "tfm_nspm.h"
12#include "secure_utilities.h"
13#include "tfm_api.h"
14#include "tfm_arch.h"
15#include "tfm_irq_list.h"
16#include "psa/service.h"
17#include "tfm_core_mem_check.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080018#include "tfm_peripherals_def.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080019#include "tfm_secure_api.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080020#include "tfm_spm_hal.h"
Ken Liu1f345b02020-05-30 21:11:05 +080021#include "tfm/spm_api.h"
22#include "tfm/spm_db.h"
Mingyang Sunda01a972019-07-12 17:32:59 +080023#include "region_defs.h"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080024#include "region.h"
Ken Liu1f345b02020-05-30 21:11:05 +080025#include "tfm/tfm_spm_services_api.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080026#include "tfm_spm_db_func.inc"
Mingyang Sunabb1aab2020-02-18 13:49:08 +080027
28#define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
29#define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
30
31#ifndef TFM_LVL
32#error TFM_LVL is not defined!
33#endif
34
35REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
36REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
37
38/*
39 * This is the "Big Lock" on the secure side, to guarantee single entry
40 * to SPE
41 */
42extern int32_t tfm_secure_lock;
43static int32_t tfm_secure_api_initializing = 1;
Mingyang Sunda01a972019-07-12 17:32:59 +080044
Mingyang Sunabb1aab2020-02-18 13:49:08 +080045static uint32_t *prepare_partition_iovec_ctx(
46 const struct tfm_state_context_t *svc_ctx,
47 const struct tfm_sfn_req_s *desc_ptr,
48 const struct iovec_args_t *iovec_args,
49 uint32_t *dst)
50{
51 /* XPSR = as was when called, but make sure it's thread mode */
52 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
53 /* ReturnAddress = resume veneer in new context */
54 *(--dst) = svc_ctx->ra;
55 /* LR = sfn address */
56 *(--dst) = (uint32_t)desc_ptr->sfn;
57 /* R12 = don't care */
58 *(--dst) = 0U;
59
60 /* R0-R3 = sfn arguments */
61 *(--dst) = iovec_args->out_len;
62 *(--dst) = (uint32_t)iovec_args->out_vec;
63 *(--dst) = iovec_args->in_len;
64 *(--dst) = (uint32_t)iovec_args->in_vec;
65
66 return dst;
67}
68
69/**
70 * \brief Create a stack frame that sets the execution environment to thread
71 * mode on exception return.
72 *
73 * \param[in] svc_ctx The stacked SVC context
74 * \param[in] unpriv_handler The unprivileged IRQ handler to be called
75 * \param[in] dst A pointer where the context is to be created. (the
76 * pointer is considered to be a stack pointer, and
77 * the frame is created below it)
78 *
79 * \return A pointer pointing at the created stack frame.
80 */
81static int32_t *prepare_partition_irq_ctx(
82 const struct tfm_state_context_t *svc_ctx,
83 sfn_t unpriv_handler,
84 int32_t *dst)
85{
86 int i;
87
88 /* XPSR = as was when called, but make sure it's thread mode */
89 *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
90 /* ReturnAddress = resume to the privileged handler code, but execute it
91 * unprivileged.
92 */
93 *(--dst) = svc_ctx->ra;
94 /* LR = start address */
95 *(--dst) = (int32_t)unpriv_handler;
96
97 /* R12, R0-R3 unused arguments */
98 for (i = 0; i < 5; ++i) {
99 *(--dst) = 0;
100 }
101
102 return dst;
103}
104
105static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
106 struct tfm_state_context_t *target_ctx)
107{
108 /* ReturnAddress = resume veneer after second SVC */
109 target_ctx->ra = svc_ctx->ra;
110
111 /* R0 = function return value */
112 target_ctx->r0 = svc_ctx->r0;
113
114 return;
115}
116
117/**
118 * \brief Check whether the iovec parameters are valid, and the memory ranges
119 * are in the possession of the calling partition.
120 *
121 * \param[in] desc_ptr The secure function request descriptor
122 *
123 * \return Return /ref TFM_SUCCESS if the iovec parameters are valid, error code
124 * otherwise as in /ref tfm_status_e
125 */
126static enum tfm_status_e tfm_core_check_sfn_parameters(
127 const struct tfm_sfn_req_s *desc_ptr)
128{
129 struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
130 size_t in_len;
131 struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
132 size_t out_len;
133 uint32_t i;
134
135 if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
136 return TFM_ERROR_INVALID_PARAMETER;
137 }
138
139 in_len = (size_t)(desc_ptr->args[1]);
140 out_len = (size_t)(desc_ptr->args[3]);
141
142 /* The number of vectors are within range. Extra checks to avoid overflow */
143 if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
144 (in_len + out_len > PSA_MAX_IOVEC)) {
145 return TFM_ERROR_INVALID_PARAMETER;
146 }
147
148 /* Check whether the caller partition has at write access to the iovec
149 * structures themselves. Use the TT instruction for this.
150 */
151 if (in_len > 0) {
152 if ((in_vec == NULL) ||
153 (tfm_core_has_write_access_to_region(in_vec,
154 sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
155 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
156 return TFM_ERROR_INVALID_PARAMETER;
157 }
158 } else {
159 if (in_vec != NULL) {
160 return TFM_ERROR_INVALID_PARAMETER;
161 }
162 }
163 if (out_len > 0) {
164 if ((out_vec == NULL) ||
165 (tfm_core_has_write_access_to_region(out_vec,
166 sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
167 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
168 return TFM_ERROR_INVALID_PARAMETER;
169 }
170 } else {
171 if (out_vec != NULL) {
172 return TFM_ERROR_INVALID_PARAMETER;
173 }
174 }
175
176 /* Check whether the caller partition has access to the data inside the
177 * iovecs
178 */
179 for (i = 0; i < in_len; ++i) {
180 if (in_vec[i].len > 0) {
181 if ((in_vec[i].base == NULL) ||
182 (tfm_core_has_read_access_to_region(in_vec[i].base,
183 in_vec[i].len, desc_ptr->ns_caller,
184 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
185 return TFM_ERROR_INVALID_PARAMETER;
186 }
187 }
188 }
189 for (i = 0; i < out_len; ++i) {
190 if (out_vec[i].len > 0) {
191 if ((out_vec[i].base == NULL) ||
192 (tfm_core_has_write_access_to_region(out_vec[i].base,
193 out_vec[i].len, desc_ptr->ns_caller,
194 TFM_PARTITION_UNPRIVILEGED_MODE) != TFM_SUCCESS)) {
195 return TFM_ERROR_INVALID_PARAMETER;
196 }
197 }
198 }
199
200 return TFM_SUCCESS;
201}
202
203static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
204 const struct iovec_args_t *source)
205{
206 size_t i;
207
208 /* The vectors have been sanity checked already, and since then the
209 * interrupts have been kept disabled. So we can be sure that the
210 * vectors haven't been tampered with since the check. So it is safe to pass
211 * it to the called partition.
212 */
213
214 target->in_len = source->in_len;
215 for (i = 0; i < source->in_len; ++i) {
216 target->in_vec[i].base = source->in_vec[i].base;
217 target->in_vec[i].len = source->in_vec[i].len;
218 }
219 target->out_len = source->out_len;
220 for (i = 0; i < source->out_len; ++i) {
221 target->out_vec[i].base = source->out_vec[i].base;
222 target->out_vec[i].len = source->out_vec[i].len;
223 }
224}
225
226static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
227{
228 int i;
229
230 args->in_len = 0;
231 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
232 args->in_vec[i].base = NULL;
233 args->in_vec[i].len = 0;
234 }
235 args->out_len = 0;
236 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
237 args->out_vec[i].base = NULL;
238 args->out_vec[i].len = 0;
239 }
240}
241
242/**
243 * \brief Check whether the partitions for the secure function call are in a
244 * proper state.
245 *
246 * \param[in] curr_partition_state State of the partition to be called
247 * \param[in] caller_partition_state State of the caller partition
248 *
249 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
250 */
251static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
252 uint32_t caller_partition_state)
253{
254 if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
255 /* Calling partition from non-running state (e.g. during handling IRQ)
256 * is not allowed.
257 */
258 return TFM_ERROR_INVALID_EXC_MODE;
259 }
260
261 if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
262 curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
263 curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
264 curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
265 /* Active partitions cannot be called! */
266 return TFM_ERROR_PARTITION_NON_REENTRANT;
267 } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
268 /* The partition to be called is not in a proper state */
269 return TFM_SECURE_LOCK_FAILED;
270 }
271 return TFM_SUCCESS;
272}
273
274/**
275 * \brief Check whether the partitions for the secure function call of irq are
276 * in a proper state.
277 *
278 * \param[in] called_partition_state State of the partition to be called
279 *
280 * \return \ref TFM_SUCCESS if the check passes, error otherwise.
281 */
282static enum tfm_status_e check_irq_partition_state(
283 uint32_t called_partition_state)
284{
285 if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
286 called_partition_state == SPM_PARTITION_STATE_RUNNING ||
287 called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
288 called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
289 called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
290 return TFM_SUCCESS;
291 }
292 return TFM_SECURE_LOCK_FAILED;
293}
294
295/**
296 * \brief Calculate the address where the iovec parameters are to be saved for
297 * the called partition.
298 *
299 * \param[in] partition_idx The index of the partition to be called.
300 *
301 * \return The address where the iovec parameters should be saved.
302 */
303static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
304{
305 /* Save the iovecs on the common stack. */
TTornblom99f0be22019-12-17 16:22:38 +0100306 return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800307}
308
309static enum tfm_status_e tfm_start_partition(
310 const struct tfm_sfn_req_s *desc_ptr,
311 uint32_t excReturn)
312{
313 enum tfm_status_e res;
314 uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
315 const struct spm_partition_runtime_data_t *curr_part_data;
316 const struct spm_partition_runtime_data_t *caller_part_data;
317 uint32_t caller_flags;
318 register uint32_t partition_idx;
319 uint32_t psp;
320 uint32_t partition_psp, partition_psplim;
321 uint32_t partition_state;
322 uint32_t caller_partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800323 struct tfm_state_context_t *svc_ctx;
324 uint32_t caller_partition_id;
325 int32_t client_id;
326 struct iovec_args_t *iovec_args;
327
328 psp = __get_PSP();
329 svc_ctx = (struct tfm_state_context_t *)psp;
330 caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
331
332 /* Check partition state consistency */
333 if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
334 != (!desc_ptr->ns_caller)) {
335 /* Partition state inconsistency detected */
336 return TFM_SECURE_LOCK_FAILED;
337 }
338
339 partition_idx = get_partition_idx(desc_ptr->sp_id);
340
341 curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
342 caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
343 partition_state = curr_part_data->partition_state;
344 caller_partition_state = caller_part_data->partition_state;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800345 caller_partition_id = tfm_spm_partition_get_partition_id(
346 caller_partition_idx);
347
348 if (!tfm_secure_api_initializing) {
349 res = check_partition_state(partition_state, caller_partition_state);
350 if (res != TFM_SUCCESS) {
351 return res;
352 }
353 }
354
355 /* Prepare switch to shared secure partition stack */
356 /* In case the call is coming from the non-secure world, we save the iovecs
357 * on the stop of the stack. So the memory area, that can actually be used
358 * as stack by the partitions starts at a lower address
359 */
360 partition_psp =
TTornblom99f0be22019-12-17 16:22:38 +0100361 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800362 partition_psplim =
363 (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
364
365 /* Store the context for the partition call */
366 tfm_spm_partition_set_caller_partition_idx(partition_idx,
367 caller_partition_idx);
368 tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
369
370 if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
371 tfm_spm_partition_set_caller_client_id(partition_idx,
372 caller_partition_id);
373 } else {
374 client_id = tfm_nspm_get_current_client_id();
375 if (client_id >= 0) {
376 return TFM_SECURE_LOCK_FAILED;
377 }
378 tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
379 }
380
381 /* In level one, only switch context and return from exception if in
382 * handler mode
383 */
384 if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
385 if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
386 SPM_ERR_OK) {
387 return TFM_ERROR_GENERIC;
388 }
389 iovec_args = get_iovec_args_stack_address(partition_idx);
390 tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
391
392 /* Prepare the partition context, update stack ptr */
393 psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
394 iovec_args,
395 (uint32_t *)partition_psp);
396 __set_PSP(psp);
397 tfm_arch_set_psplim(partition_psplim);
398 }
399
400 tfm_spm_partition_set_state(caller_partition_idx,
401 SPM_PARTITION_STATE_BLOCKED);
402 tfm_spm_partition_set_state(partition_idx, SPM_PARTITION_STATE_RUNNING);
403 tfm_secure_lock++;
404
405 return TFM_SUCCESS;
406}
407
408static enum tfm_status_e tfm_start_partition_for_irq_handling(
409 uint32_t excReturn,
410 struct tfm_state_context_t *svc_ctx)
411{
412 uint32_t handler_partition_id = svc_ctx->r0;
413 sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
414 uint32_t irq_signal = svc_ctx->r2;
TTornblomfaf74f52020-03-04 17:56:27 +0100415 IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800416 enum tfm_status_e res;
417 uint32_t psp = __get_PSP();
418 uint32_t handler_partition_psp;
419 uint32_t handler_partition_state;
420 uint32_t interrupted_partition_idx =
421 tfm_spm_partition_get_running_partition_idx();
422 const struct spm_partition_runtime_data_t *handler_part_data;
423 uint32_t handler_partition_idx;
424
425 handler_partition_idx = get_partition_idx(handler_partition_id);
426 handler_part_data = tfm_spm_partition_get_runtime_data(
427 handler_partition_idx);
428 handler_partition_state = handler_part_data->partition_state;
429
430 res = check_irq_partition_state(handler_partition_state);
431 if (res != TFM_SUCCESS) {
432 return res;
433 }
434
435 /* set mask for the partition */
436 tfm_spm_partition_set_signal_mask(
437 handler_partition_idx,
438 handler_part_data->signal_mask | irq_signal);
439
440 tfm_spm_hal_disable_irq(irq_line);
441
442 /* save the current context of the interrupted partition */
443 tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
444
445 handler_partition_psp = psp;
446
447 /* save the current context of the handler partition */
448 tfm_spm_partition_push_handler_ctx(handler_partition_idx);
449
450 /* Store caller for the partition */
451 tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
452 interrupted_partition_idx);
453
454 psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
455 (int32_t *)handler_partition_psp);
456 __set_PSP(psp);
457
458 tfm_spm_partition_set_state(interrupted_partition_idx,
459 SPM_PARTITION_STATE_SUSPENDED);
460 tfm_spm_partition_set_state(handler_partition_idx,
461 SPM_PARTITION_STATE_HANDLING_IRQ);
462
463 return TFM_SUCCESS;
464}
465
466static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
467{
468 uint32_t current_partition_idx =
469 tfm_spm_partition_get_running_partition_idx();
470 const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800471 uint32_t return_partition_idx;
472 uint32_t return_partition_flags;
473 uint32_t psp = __get_PSP();
474 size_t i;
475 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
476 struct iovec_args_t *iovec_args;
477
478 if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
479 return TFM_SECURE_UNLOCK_FAILED;
480 }
481
482 curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
483 return_partition_idx = curr_part_data->caller_partition_idx;
484
485 if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
486 return TFM_SECURE_UNLOCK_FAILED;
487 }
488
489 ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
490
491 return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800492
493 tfm_secure_lock--;
494
495 if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
496 (tfm_secure_api_initializing)) {
497 /* In TFM level 1 context restore is only done when
498 * returning to NS or after initialization
499 */
500 /* Restore caller context */
501 restore_caller_ctx(svc_ctx,
502 (struct tfm_state_context_t *)ret_part_data->stack_ptr);
503 *excReturn = ret_part_data->lr;
504 __set_PSP(ret_part_data->stack_ptr);
TTornblom99f0be22019-12-17 16:22:38 +0100505 REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800506 uint32_t psp_stack_bottom =
507 (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
508 tfm_arch_set_psplim(psp_stack_bottom);
509
TTornblom99f0be22019-12-17 16:22:38 +0100510 iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800511
512 for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
513 curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
514 }
515 tfm_clear_iovec_parameters(iovec_args);
516 }
517
518 tfm_spm_partition_cleanup_context(current_partition_idx);
519
520 tfm_spm_partition_set_state(current_partition_idx,
521 SPM_PARTITION_STATE_IDLE);
522 tfm_spm_partition_set_state(return_partition_idx,
523 SPM_PARTITION_STATE_RUNNING);
524
525 return TFM_SUCCESS;
526}
527
528static enum tfm_status_e tfm_return_from_partition_irq_handling(
529 uint32_t *excReturn)
530{
531 uint32_t handler_partition_idx =
532 tfm_spm_partition_get_running_partition_idx();
533 const struct spm_partition_runtime_data_t *handler_part_data;
534 uint32_t interrupted_partition_idx;
535 uint32_t psp = __get_PSP();
536 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
537
538 if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
539 return TFM_SECURE_UNLOCK_FAILED;
540 }
541
542 handler_part_data = tfm_spm_partition_get_runtime_data(
543 handler_partition_idx);
544 interrupted_partition_idx = handler_part_data->caller_partition_idx;
545
546 if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
547 return TFM_SECURE_UNLOCK_FAILED;
548 }
549
550 /* For level 1, modify PSP, so that the SVC stack frame disappears,
551 * and return to the privileged handler using the stack frame still on the
552 * MSP stack.
553 */
554 *excReturn = svc_ctx->ra;
555 psp += sizeof(struct tfm_state_context_t);
556
557 tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
558 tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
559
560 __set_PSP(psp);
561
562 return TFM_SUCCESS;
563}
564
565static enum tfm_status_e tfm_check_sfn_req_integrity(
566 const struct tfm_sfn_req_s *desc_ptr)
567{
568 if ((desc_ptr == NULL) ||
569 (desc_ptr->sp_id == 0) ||
570 (desc_ptr->sfn == NULL)) {
571 /* invalid parameter */
572 return TFM_ERROR_INVALID_PARAMETER;
573 }
574 return TFM_SUCCESS;
575}
576
577static enum tfm_status_e tfm_core_check_sfn_req_rules(
578 const struct tfm_sfn_req_s *desc_ptr)
579{
580 /* Check partition idx validity */
581 if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
582 return TFM_ERROR_NO_ACTIVE_PARTITION;
583 }
584
585 if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
586 /* Secure domain is already locked!
587 * This should only happen if caller is secure partition!
588 */
589 /* This scenario is a potential security breach.
590 * Error is handled in caller.
591 */
592 return TFM_ERROR_SECURE_DOMAIN_LOCKED;
593 }
594
595 if (tfm_secure_api_initializing) {
596 int32_t id =
597 tfm_spm_partition_get_partition_id(desc_ptr->caller_part_idx);
598
599 if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
600 /* Invalid request during system initialization */
601 ERROR_MSG("Invalid service request during initialization!");
602 return TFM_ERROR_NOT_INITIALIZED;
603 }
604 }
605
606 return TFM_SUCCESS;
607}
608
609void tfm_spm_secure_api_init_done(void)
610{
611 tfm_secure_api_initializing = 0;
612}
613
614enum tfm_status_e tfm_spm_sfn_request_handler(
615 struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
616{
617 enum tfm_status_e res;
618
619 res = tfm_check_sfn_req_integrity(desc_ptr);
620 if (res != TFM_SUCCESS) {
621 ERROR_MSG("Invalid service request!");
622 tfm_secure_api_error_handler();
623 }
624
625 __disable_irq();
626
627 desc_ptr->caller_part_idx = tfm_spm_partition_get_running_partition_idx();
628
629 res = tfm_core_check_sfn_parameters(desc_ptr);
630 if (res != TFM_SUCCESS) {
631 /* The sanity check of iovecs failed. */
632 __enable_irq();
633 tfm_secure_api_error_handler();
634 }
635
636 res = tfm_core_check_sfn_req_rules(desc_ptr);
637 if (res != TFM_SUCCESS) {
638 /* FixMe: error compartmentalization TBD */
639 tfm_spm_partition_set_state(
640 desc_ptr->caller_part_idx, SPM_PARTITION_STATE_CLOSED);
641 __enable_irq();
642 ERROR_MSG("Unauthorized service request!");
643 tfm_secure_api_error_handler();
644 }
645
646 res = tfm_start_partition(desc_ptr, excReturn);
647 if (res != TFM_SUCCESS) {
648 /* FixMe: consider possible fault scenarios */
649 __enable_irq();
650 ERROR_MSG("Failed to process service request!");
651 tfm_secure_api_error_handler();
652 }
653
654 __enable_irq();
655
656 return res;
657}
658
659int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
660{
661 enum tfm_status_e res;
662 int32_t *args;
663 int32_t retVal;
664
665 res = tfm_core_check_sfn_parameters(desc_ptr);
666 if (res != TFM_SUCCESS) {
667 /* The sanity check of iovecs failed. */
668 return (int32_t)res;
669 }
670
671 /* No excReturn value is needed as no exception handling is used */
672 res = tfm_spm_sfn_request_handler(desc_ptr, 0);
673
674 if (res != TFM_SUCCESS) {
675 tfm_secure_api_error_handler();
676 }
677
678 /* Secure partition to secure partition call in TFM level 1 */
679 args = desc_ptr->args;
680 retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
681
682 /* return handler should restore original exc_return value... */
683 res = tfm_return_from_partition(NULL);
684 if (res == TFM_SUCCESS) {
685 /* If unlock successful, pass SS return value to caller */
686 return retVal;
687 } else {
688 /* Unlock errors indicate ctx database corruption or unknown
689 * anomalies. Halt execution
690 */
691 ERROR_MSG("Secure API error during unlock!");
692 tfm_secure_api_error_handler();
693 }
694 return (int32_t)res;
695}
696
697void tfm_spm_validate_secure_caller_handler(uint32_t *svc_args)
698{
699
700 enum tfm_status_e res = TFM_ERROR_GENERIC;
701 uint32_t running_partition_idx =
702 tfm_spm_partition_get_running_partition_idx();
703 const struct spm_partition_runtime_data_t *curr_part_data =
704 tfm_spm_partition_get_runtime_data(running_partition_idx);
705 uint32_t running_partition_flags =
706 tfm_spm_partition_get_flags(running_partition_idx);
707 uint32_t caller_partition_flags =
708 tfm_spm_partition_get_flags(curr_part_data->caller_partition_idx);
709
710 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
711 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
712 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
713 /* This handler shouldn't be called from outside partition context.
714 * Also if the current partition is handling IRQ, the caller partition
715 * index might not be valid;
716 * Partitions are only allowed to run while S domain is locked.
717 */
718 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
719 return;
720 }
721
722 /* Store return value in r0 */
723 if (caller_partition_flags & SPM_PART_FLAG_APP_ROT) {
724 res = TFM_SUCCESS;
725 }
726 svc_args[0] = (uint32_t)res;
727}
728
729int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
730 void *start_addr,
731 size_t len,
732 uint32_t alignment)
733{
734 uintptr_t start_addr_value = (uintptr_t)start_addr;
735 uintptr_t end_addr_value = (uintptr_t)start_addr + len;
736 uintptr_t alignment_mask;
737
738 alignment_mask = (((uintptr_t)1) << alignment) - 1;
739
740 /* Check that the pointer is aligned properly */
741 if (start_addr_value & alignment_mask) {
742 /* not aligned, return error */
743 return 0;
744 }
745
746 /* Protect against overflow (and zero len) */
747 if (end_addr_value <= start_addr_value) {
748 return 0;
749 }
750
751 /* For privileged partition execution, all secure data memory and stack
752 * is accessible
753 */
754 if (start_addr_value >= S_DATA_START &&
755 end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
756 return 1;
757 }
758
759 return 0;
760}
761
762void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
763{
764 uintptr_t result_ptr_value = svc_args[0];
765 uint32_t running_partition_idx =
766 tfm_spm_partition_get_running_partition_idx();
767 const uint32_t running_partition_flags =
768 tfm_spm_partition_get_flags(running_partition_idx);
769 const struct spm_partition_runtime_data_t *curr_part_data =
770 tfm_spm_partition_get_runtime_data(running_partition_idx);
771 int res = 0;
772
773 if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
774 curr_part_data->partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
775 curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
776 /* This handler shouldn't be called from outside partition context.
777 * Also if the current partition is handling IRQ, the caller partition
778 * index might not be valid;
779 * Partitions are only allowed to run while S domain is locked.
780 */
781 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
782 return;
783 }
784
785 /* Make sure that the output pointer points to a memory area that is owned
786 * by the partition
787 */
788 res = tfm_spm_check_buffer_access(running_partition_idx,
789 (void *)result_ptr_value,
790 sizeof(curr_part_data->caller_client_id),
791 2);
792 if (!res) {
793 /* Not in accessible range, return error */
794 svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
795 return;
796 }
797
798 *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
799
800 /* Store return value in r0 */
801 svc_args[0] = (uint32_t)TFM_SUCCESS;
802}
803
804/* This SVC handler is called if veneer is running in thread mode */
805uint32_t tfm_spm_partition_request_svc_handler(
806 const uint32_t *svc_ctx, uint32_t excReturn)
807{
808 struct tfm_sfn_req_s *desc_ptr;
809
810 if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
811 /* Service request SVC called with MSP active.
812 * Either invalid configuration for Thread mode or SVC called
813 * from Handler mode, which is not supported.
814 * FixMe: error severity TBD
815 */
816 ERROR_MSG("Service request SVC called with MSP active!");
817 tfm_secure_api_error_handler();
818 }
819
820 desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
821
822 if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
823 tfm_secure_api_error_handler();
824 }
825
826 return EXC_RETURN_SECURE_FUNCTION;
827}
828
829/* This SVC handler is called, if a thread mode execution environment is to
830 * be set up, to run an unprivileged IRQ handler
831 */
832uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
833{
834 struct tfm_state_context_t *svc_ctx =
835 (struct tfm_state_context_t *)svc_args;
836
837 enum tfm_status_e res;
838
839 if (excReturn & EXC_RETURN_STACK_PROCESS) {
840 /* FixMe: error severity TBD */
841 ERROR_MSG("Partition request SVC called with PSP active!");
842 tfm_secure_api_error_handler();
843 }
844
845 res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
846 if (res != TFM_SUCCESS) {
847 /* The partition is in an invalid state (UNINIT or CLOSED), so none of
848 * its code can be run
849 */
850 /* FixMe: For now this case is handled with TF-M panic, however it would
851 * be possible to skip the execution of the interrupt handler, and
852 * resume the execution of the interrupted code.
853 */
854 tfm_secure_api_error_handler();
855 }
856 return EXC_RETURN_SECURE_FUNCTION;
857}
858
859/* This SVC handler is called when sfn returns */
860uint32_t tfm_spm_partition_return_handler(uint32_t lr)
861{
862 enum tfm_status_e res;
863
864 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
865 /* Partition return SVC called with MSP active.
866 * This should not happen!
867 */
868 ERROR_MSG("Partition return SVC called with MSP active!");
869 tfm_secure_api_error_handler();
870 }
871
872 res = tfm_return_from_partition(&lr);
873 if (res != TFM_SUCCESS) {
874 /* Unlock errors indicate ctx database corruption or unknown anomalies
875 * Halt execution
876 */
877 ERROR_MSG("Secure API error during unlock!");
878 tfm_secure_api_error_handler();
879 }
880
881 return lr;
882}
883
884/* This SVC handler is called if a deprivileged IRQ handler was executed, and
885 * the execution environment is to be set back for the privileged handler mode
886 */
887uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
888{
889 enum tfm_status_e res;
890 struct tfm_state_context_t *irq_svc_ctx =
891 (struct tfm_state_context_t *)irq_svc_args;
892
893 if (!(lr & EXC_RETURN_STACK_PROCESS)) {
894 /* Partition request SVC called with MSP active.
895 * FixMe: error severity TBD
896 */
897 ERROR_MSG("Partition request SVC called with MSP active!");
898 tfm_secure_api_error_handler();
899 }
900
901 res = tfm_return_from_partition_irq_handling(&lr);
902 if (res != TFM_SUCCESS) {
903 /* Unlock errors indicate ctx database corruption or unknown anomalies
904 * Halt execution
905 */
906 ERROR_MSG("Secure API error during unlock!");
907 tfm_secure_api_error_handler();
908 }
909
910 irq_svc_ctx->ra = lr;
911
912 return EXC_RETURN_SECURE_HANDLER;
913}
914
915/* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
916/**
917 * \brief Return the IRQ line number associated with a signal
918 *
919 * \param[in] partition_id The ID of the partition in which we look for the
920 * signal
921 * \param[in] signal The signal we do the query for
922 *
923 * \retval >=0 The IRQ line number associated with a signal in the partition
924 * \retval <0 error
925 */
TTornblomfaf74f52020-03-04 17:56:27 +0100926static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800927 psa_signal_t signal)
928{
929 size_t i;
930
931 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
932 if (tfm_core_irq_signals[i].partition_id == partition_id &&
933 tfm_core_irq_signals[i].signal_value == signal) {
934 return tfm_core_irq_signals[i].irq_line;
935 }
936 }
TTornblomfaf74f52020-03-04 17:56:27 +0100937 return (IRQn_Type) -1;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800938}
939
940void tfm_spm_enable_irq_handler(uint32_t *svc_args)
941{
942 struct tfm_state_context_t *svc_ctx =
943 (struct tfm_state_context_t *)svc_args;
944 psa_signal_t irq_signal = svc_ctx->r0;
945 uint32_t running_partition_idx =
946 tfm_spm_partition_get_running_partition_idx();
947 uint32_t running_partition_id =
948 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100949 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800950
951 /* Only a single signal is allowed */
952 if (!tfm_is_one_bit_set(irq_signal)) {
953 /* FixMe: error severity TBD */
954 tfm_secure_api_error_handler();
955 }
956
957 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
958
959 if (irq_line < 0) {
960 /* FixMe: error severity TBD */
961 tfm_secure_api_error_handler();
962 }
963
964 tfm_spm_hal_enable_irq(irq_line);
965}
966
967void tfm_spm_disable_irq_handler(uint32_t *svc_args)
968{
969 struct tfm_state_context_t *svc_ctx =
970 (struct tfm_state_context_t *)svc_args;
971 psa_signal_t irq_signal = svc_ctx->r0;
972 uint32_t running_partition_idx =
973 tfm_spm_partition_get_running_partition_idx();
974 uint32_t running_partition_id =
975 tfm_spm_partition_get_partition_id(running_partition_idx);
TTornblomfaf74f52020-03-04 17:56:27 +0100976 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +0800977
978 /* Only a single signal is allowed */
979 if (!tfm_is_one_bit_set(irq_signal)) {
980 /* FixMe: error severity TBD */
981 tfm_secure_api_error_handler();
982 }
983
984 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
985
986 if (irq_line < 0) {
987 /* FixMe: error severity TBD */
988 tfm_secure_api_error_handler();
989 }
990
991 tfm_spm_hal_disable_irq(irq_line);
992}
993
994void tfm_spm_psa_wait(uint32_t *svc_args)
995{
996 /* Look for partition that is ready for run */
997 struct tfm_state_context_t *svc_ctx =
998 (struct tfm_state_context_t *)svc_args;
999 uint32_t running_partition_idx;
1000 const struct spm_partition_runtime_data_t *curr_part_data;
1001
1002 psa_signal_t signal_mask = svc_ctx->r0;
1003 uint32_t timeout = svc_ctx->r1;
1004
1005 /*
1006 * Timeout[30:0] are reserved for future use.
1007 * SPM must ignore the value of RES.
1008 */
1009 timeout &= PSA_TIMEOUT_MASK;
1010
1011 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1012 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1013
1014 if (timeout == PSA_BLOCK) {
1015 /* FIXME: Scheduling is not available in library model, and busy wait is
1016 * also not possible as this code is running in SVC context, and it
1017 * cannot be pre-empted by interrupts. So do nothing here for now
1018 */
1019 (void) signal_mask;
1020 }
1021
1022 svc_ctx->r0 = curr_part_data->signal_mask;
1023}
1024
1025void tfm_spm_psa_eoi(uint32_t *svc_args)
1026{
1027 struct tfm_state_context_t *svc_ctx =
1028 (struct tfm_state_context_t *)svc_args;
1029 psa_signal_t irq_signal = svc_ctx->r0;
1030 uint32_t signal_mask;
1031 uint32_t running_partition_idx;
1032 uint32_t running_partition_id;
1033 const struct spm_partition_runtime_data_t *curr_part_data;
TTornblomfaf74f52020-03-04 17:56:27 +01001034 IRQn_Type irq_line;
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001035
1036 running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1037 running_partition_id =
1038 tfm_spm_partition_get_partition_id(running_partition_idx);
1039 curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1040
1041 /* Only a single signal is allowed */
1042 if (!tfm_is_one_bit_set(irq_signal)) {
1043 tfm_secure_api_error_handler();
1044 }
1045
1046 irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1047
1048 if (irq_line < 0) {
1049 /* FixMe: error severity TBD */
1050 tfm_secure_api_error_handler();
1051 }
1052
1053 tfm_spm_hal_clear_pending_irq(irq_line);
1054 tfm_spm_hal_enable_irq(irq_line);
1055
1056 signal_mask = curr_part_data->signal_mask & ~irq_signal;
1057 tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1058}
Mingyang Sunda01a972019-07-12 17:32:59 +08001059
1060/*
1061 * This function is called when a secure partition causes an error.
1062 * In case of an error in the error handling, a non-zero value have to be
1063 * returned.
1064 */
1065static void tfm_spm_partition_err_handler(
1066 const struct spm_partition_desc_t *partition,
Mingyang Sunda01a972019-07-12 17:32:59 +08001067 int32_t err_code)
1068{
Mingyang Sunda01a972019-07-12 17:32:59 +08001069 (void)err_code;
Ken Liuf250b8b2019-12-27 16:31:24 +08001070
Summer Qin423dbef2019-08-22 15:59:35 +08001071 tfm_spm_partition_set_state(partition->static_data->partition_id,
Mingyang Sunda01a972019-07-12 17:32:59 +08001072 SPM_PARTITION_STATE_CLOSED);
1073}
1074
1075enum spm_err_t tfm_spm_partition_init(void)
1076{
1077 struct spm_partition_desc_t *part;
1078 struct tfm_sfn_req_s desc;
1079 int32_t args[4] = {0};
1080 int32_t fail_cnt = 0;
1081 uint32_t idx;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001082 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Mingyang Sunda01a972019-07-12 17:32:59 +08001083
1084 /* Call the init function for each partition */
1085 for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1086 part = &g_spm_partition_db.partitions[idx];
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001087 platform_data_p = part->platform_data_list;
1088 if (platform_data_p != NULL) {
1089 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +08001090 if (tfm_spm_hal_configure_default_isolation(idx,
1091 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1092 fail_cnt++;
1093 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +01001094 ++platform_data_p;
1095 }
1096 }
Summer Qin423dbef2019-08-22 15:59:35 +08001097 if (part->static_data->partition_init == NULL) {
Mingyang Sunda01a972019-07-12 17:32:59 +08001098 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1099 tfm_spm_partition_set_caller_partition_idx(idx,
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001100 SPM_INVALID_PARTITION_IDX);
Mingyang Sunda01a972019-07-12 17:32:59 +08001101 } else {
1102 int32_t res;
1103
1104 desc.args = args;
Summer Qin43c185d2019-10-10 15:44:42 +08001105 desc.ns_caller = false;
Summer Qin423dbef2019-08-22 15:59:35 +08001106 desc.sfn = (sfn_t)part->static_data->partition_init;
1107 desc.sp_id = part->static_data->partition_id;
Mingyang Sunda01a972019-07-12 17:32:59 +08001108 res = tfm_core_sfn_request(&desc);
1109 if (res == TFM_SUCCESS) {
1110 tfm_spm_partition_set_state(idx, SPM_PARTITION_STATE_IDLE);
1111 } else {
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001112 tfm_spm_partition_err_handler(part, res);
Mingyang Sunda01a972019-07-12 17:32:59 +08001113 fail_cnt++;
1114 }
1115 }
1116 }
1117
Mingyang Sunabb1aab2020-02-18 13:49:08 +08001118 tfm_spm_secure_api_init_done();
Mingyang Sunda01a972019-07-12 17:32:59 +08001119
1120 if (fail_cnt == 0) {
1121 return SPM_ERR_OK;
1122 } else {
1123 return SPM_ERR_PARTITION_NOT_AVAILABLE;
1124 }
1125}
1126
1127void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1128{
1129 struct spm_partition_runtime_data_t *runtime_data =
1130 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1131 struct interrupted_ctx_stack_frame_t *stack_frame =
Edison Ai7aff9e82019-07-11 14:56:46 +08001132 (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
Mingyang Sunda01a972019-07-12 17:32:59 +08001133
1134 stack_frame->partition_state = runtime_data->partition_state;
Matt463ed582019-12-20 12:31:25 +08001135
1136 runtime_data->ctx_stack_ptr +=
1137 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
Mingyang Sunda01a972019-07-12 17:32:59 +08001138}
1139
1140void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1141{
1142 struct spm_partition_runtime_data_t *runtime_data =
1143 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1144 struct interrupted_ctx_stack_frame_t *stack_frame;
1145
Matt463ed582019-12-20 12:31:25 +08001146 runtime_data->ctx_stack_ptr -=
1147 sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1148
Mingyang Sunda01a972019-07-12 17:32:59 +08001149 stack_frame = (struct interrupted_ctx_stack_frame_t *)
1150 runtime_data->ctx_stack_ptr;
1151 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1152 stack_frame->partition_state = 0;
Mingyang Sunda01a972019-07-12 17:32:59 +08001153}
1154
1155void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1156{
1157 struct spm_partition_runtime_data_t *runtime_data =
1158 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1159 struct handler_ctx_stack_frame_t *stack_frame =
1160 (struct handler_ctx_stack_frame_t *)
1161 runtime_data->ctx_stack_ptr;
1162
1163 stack_frame->partition_state = runtime_data->partition_state;
1164 stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1165
1166 runtime_data->ctx_stack_ptr +=
1167 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1168}
1169
1170void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1171{
1172 struct spm_partition_runtime_data_t *runtime_data =
1173 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1174 struct handler_ctx_stack_frame_t *stack_frame;
1175
1176 runtime_data->ctx_stack_ptr -=
1177 sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1178
1179 stack_frame = (struct handler_ctx_stack_frame_t *)
1180 runtime_data->ctx_stack_ptr;
1181
1182 tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1183 stack_frame->partition_state = 0;
1184 tfm_spm_partition_set_caller_partition_idx(
1185 partition_idx, stack_frame->caller_partition_idx);
1186 stack_frame->caller_partition_idx = 0;
1187}
1188
Mingyang Sunda01a972019-07-12 17:32:59 +08001189void tfm_spm_partition_store_context(uint32_t partition_idx,
1190 uint32_t stack_ptr, uint32_t lr)
1191{
1192 g_spm_partition_db.partitions[partition_idx].
1193 runtime_data.stack_ptr = stack_ptr;
1194 g_spm_partition_db.partitions[partition_idx].
1195 runtime_data.lr = lr;
1196}
1197
1198const struct spm_partition_runtime_data_t *
1199 tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1200{
1201 return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1202}
1203
1204void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1205{
1206 g_spm_partition_db.partitions[partition_idx].runtime_data.partition_state =
1207 state;
1208 if (state == SPM_PARTITION_STATE_RUNNING ||
1209 state == SPM_PARTITION_STATE_HANDLING_IRQ) {
1210 g_spm_partition_db.running_partition_idx = partition_idx;
1211 }
1212}
1213
1214void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx,
1215 uint32_t caller_partition_idx)
1216{
1217 g_spm_partition_db.partitions[partition_idx].runtime_data.
1218 caller_partition_idx = caller_partition_idx;
1219}
1220
1221void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1222 uint32_t signal_mask)
1223{
1224 g_spm_partition_db.partitions[partition_idx].runtime_data.
1225 signal_mask = signal_mask;
1226}
1227
1228void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1229 int32_t caller_client_id)
1230{
1231 g_spm_partition_db.partitions[partition_idx].runtime_data.
1232 caller_client_id = caller_client_id;
1233}
1234
Mingyang Sunda01a972019-07-12 17:32:59 +08001235enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1236 const int32_t *args)
1237{
1238 struct spm_partition_runtime_data_t *runtime_data =
1239 &g_spm_partition_db.partitions[partition_idx].runtime_data;
1240 size_t i;
1241
1242 if ((args[1] < 0) || (args[3] < 0)) {
1243 return SPM_ERR_INVALID_PARAMETER;
1244 }
1245
1246 runtime_data->iovec_args.in_len = (size_t)args[1];
1247 for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1248 runtime_data->iovec_args.in_vec[i].base =
1249 ((psa_invec *)args[0])[i].base;
1250 runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1251 }
1252 runtime_data->iovec_args.out_len = (size_t)args[3];
1253 for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1254 runtime_data->iovec_args.out_vec[i].base =
1255 ((psa_outvec *)args[2])[i].base;
1256 runtime_data->iovec_args.out_vec[i].len =
1257 ((psa_outvec *)args[2])[i].len;
1258 }
1259 runtime_data->orig_outvec = (psa_outvec *)args[2];
Mingyang Sunda01a972019-07-12 17:32:59 +08001260
1261 return SPM_ERR_OK;
1262}
1263
1264uint32_t tfm_spm_partition_get_running_partition_idx(void)
1265{
1266 return g_spm_partition_db.running_partition_idx;
1267}
1268
1269void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1270{
1271 struct spm_partition_desc_t *partition =
1272 &(g_spm_partition_db.partitions[partition_idx]);
1273 int32_t i;
1274
1275 partition->runtime_data.caller_partition_idx = SPM_INVALID_PARTITION_IDX;
Mingyang Sunda01a972019-07-12 17:32:59 +08001276 partition->runtime_data.iovec_args.in_len = 0;
1277 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1278 partition->runtime_data.iovec_args.in_vec[i].base = 0;
1279 partition->runtime_data.iovec_args.in_vec[i].len = 0;
1280 }
1281 partition->runtime_data.iovec_args.out_len = 0;
1282 for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1283 partition->runtime_data.iovec_args.out_vec[i].base = 0;
1284 partition->runtime_data.iovec_args.out_vec[i].len = 0;
1285 }
1286 partition->runtime_data.orig_outvec = 0;
Summer Qin423dbef2019-08-22 15:59:35 +08001287}
Summer Qin830c5542020-02-14 13:44:20 +08001288
1289void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1290{
1291 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1292 uint32_t running_partition_flags = 0;
1293 uint32_t running_partition_idx;
1294
1295 /* Check permissions on request type basis */
1296
1297 switch (svc_ctx->r0) {
1298 case TFM_SPM_REQUEST_RESET_VOTE:
1299 running_partition_idx =
1300 tfm_spm_partition_get_running_partition_idx();
1301 running_partition_flags = tfm_spm_partition_get_flags(
1302 running_partition_idx);
1303
1304 /* Currently only PSA Root of Trust services are allowed to make Reset
1305 * vote request
1306 */
1307 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1308 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1309 }
1310
1311 /* FixMe: this is a placeholder for checks to be performed before
1312 * allowing execution of reset
1313 */
1314 *res_ptr = (uint32_t)TFM_SUCCESS;
1315
1316 break;
1317 default:
1318 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1319 }
1320}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001321
1322enum spm_err_t tfm_spm_db_init(void)
1323{
1324 uint32_t i;
1325
1326 /* This function initialises partition db */
1327
1328 /* For the non secure Execution environment */
1329 tfm_nspm_configure_clients();
1330
1331 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1332 g_spm_partition_db.partitions[i].runtime_data.partition_state =
1333 SPM_PARTITION_STATE_UNINIT;
1334 g_spm_partition_db.partitions[i].runtime_data.caller_partition_idx =
1335 SPM_INVALID_PARTITION_IDX;
1336 g_spm_partition_db.partitions[i].runtime_data.caller_client_id =
1337 TFM_INVALID_CLIENT_ID;
1338 g_spm_partition_db.partitions[i].runtime_data.ctx_stack_ptr =
1339 ctx_stack_list[i];
1340 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1341 g_spm_partition_db.partitions[i].platform_data_list =
1342 platform_data_list_list[i];
1343 }
1344 g_spm_partition_db.is_init = 1;
1345
1346 return SPM_ERR_OK;
1347}