blob: 7b20af58c3e3b58f51bc57c124f1c170ed4b5542 [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
David Huf07e97d2021-02-15 22:05:40 +080011#include "fih.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000012#include "psa/client.h"
13#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080014#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080015#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080016#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080017#include "tfm_spm_hal.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_api.h"
19#include "tfm_secure_api.h"
20#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080021#include "tfm_hal_defs.h"
22#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_core_utils.h"
Kevin Peng385fda82021-08-18 10:41:19 +080026#include "tfm_nspm.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080028#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080029#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080030#include "tfm_pools.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080031#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080032#include "psa_manifest/pid.h"
Mingyang Sun00df2352021-04-15 15:46:08 +080033#include "load/partition_defs.h"
34#include "load/service_defs.h"
Ken Liu86686282021-04-27 11:11:15 +080035#include "load/asset_defs.h"
Ken Liuacd2a572021-05-12 16:19:04 +080036#include "load/spm_load_api.h"
Kevin Peng27e42272021-05-24 17:58:53 +080037#include "load/irq_defs.h"
Edison Ai764d41f2018-09-21 15:56:36 +080038
Ken Liuea45b0d2021-05-22 17:41:25 +080039/* Partition and service runtime data list head/runtime data table */
40static struct partition_head_t partitions_listhead;
41static struct service_head_t services_listhead;
Ken Liub3b2cb62021-05-22 00:39:28 +080042struct service_t *stateless_services_ref_tbl[STATIC_HANDLE_NUM_LIMIT];
Summer Qind99509f2019-08-02 17:36:58 +080043
Edison Ai764d41f2018-09-21 15:56:36 +080044/* Pools */
45TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
46 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080047
Mingyang Sun22a3faf2021-07-09 15:32:47 +080048/* The veneer section names come from the scatter file */
49REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
50REGION_DECLARE(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit);
51
Kevin Pengeca45b92021-02-09 14:46:50 +080052void spm_interrupt_handler(struct partition_load_info_t *p_ldinf,
53 psa_signal_t signal,
54 uint32_t irq_line,
55 psa_flih_func flih_func);
Mingyang Sund44522a2020-01-16 16:48:37 +080056
57#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080058
Summer Qin373feb12020-03-27 15:35:33 +080059/*********************** Connection handle conversion APIs *******************/
60
Summer Qin373feb12020-03-27 15:35:33 +080061#define CONVERSION_FACTOR_BITOFFSET 3
62#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
63/* Set 32 as the maximum */
64#define CONVERSION_FACTOR_VALUE_MAX 0x20
65
66#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
67#error "CONVERSION FACTOR OUT OF RANGE"
68#endif
69
70static uint32_t loop_index;
71
72/*
73 * A handle instance psa_handle_t allocated inside SPM is actually a memory
74 * address among the handle pool. Return this handle to the client directly
75 * exposes information of secure memory address. In this case, converting the
76 * handle into another value does not represent the memory address to avoid
77 * exposing secure memory directly to clients.
78 *
79 * This function converts the handle instance into another value by scaling the
80 * handle in pool offset, the converted value is named as a user handle.
81 *
82 * The formula:
83 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
84 * CLIENT_HANDLE_VALUE_MIN + loop_index
85 * where:
86 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
87 * exceed CONVERSION_FACTOR_VALUE_MAX.
88 *
89 * handle_instance in RANGE[POOL_START, POOL_END]
90 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
91 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
92 *
93 * note:
94 * loop_index is used to promise same handle instance is converted into
95 * different user handles in short time.
96 */
Ken Liu505b1702020-05-29 13:19:58 +080097psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080098{
99 psa_handle_t user_handle;
100
101 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
102 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
103 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
104 CLIENT_HANDLE_VALUE_MIN + loop_index);
105
106 return user_handle;
107}
108
109/*
110 * This function converts a user handle into a corresponded handle instance.
111 * The converted value is validated before returning, an invalid handle instance
112 * is returned as NULL.
113 *
114 * The formula:
115 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
116 * CONVERSION_FACTOR_VALUE) + POOL_START
117 * where:
118 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
119 * exceed CONVERSION_FACTOR_VALUE_MAX.
120 *
121 * handle_instance in RANGE[POOL_START, POOL_END]
122 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
123 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
124 */
125struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
126{
127 struct tfm_conn_handle_t *handle_instance;
128
129 if (user_handle == PSA_NULL_HANDLE) {
130 return NULL;
131 }
132
133 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
134 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
135 (uintptr_t)conn_handle_pool);
136
137 return handle_instance;
138}
139
Edison Ai764d41f2018-09-21 15:56:36 +0800140/* Service handle management functions */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800141struct tfm_conn_handle_t *tfm_spm_create_conn_handle(struct service_t *service,
142 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800143{
Edison Ai9cc26242019-08-06 11:28:04 +0800144 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800145
Ken Liuf250b8b2019-12-27 16:31:24 +0800146 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800147
148 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800149 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
150 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800151 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800152 }
153
Edison Ai9cc26242019-08-06 11:28:04 +0800154 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800155 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800156 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800157
158 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800159 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800160
Summer Qin630c76b2020-05-20 10:32:58 +0800161 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800162}
163
Summer Qin630c76b2020-05-20 10:32:58 +0800164int32_t tfm_spm_validate_conn_handle(
165 const struct tfm_conn_handle_t *conn_handle,
166 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800167{
168 /* Check the handle address is validated */
169 if (is_valid_chunk_data_in_pool(conn_handle_pool,
170 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800171 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800172 }
173
174 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800175 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800176 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800177 }
178
Ken Liubcae38b2021-01-20 15:47:44 +0800179 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800180}
181
Mingyang Sun783a59b2021-04-20 15:52:18 +0800182int32_t tfm_spm_free_conn_handle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800183 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800184{
Ken Liuf250b8b2019-12-27 16:31:24 +0800185 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800186 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800187
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200188 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800189 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200190
Edison Ai764d41f2018-09-21 15:56:36 +0800191 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800192 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800193
194 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800195 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800196 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800197}
198
Mingyang Sun783a59b2021-04-20 15:52:18 +0800199int32_t tfm_spm_set_rhandle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800200 struct tfm_conn_handle_t *conn_handle,
201 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800202{
Ken Liuf250b8b2019-12-27 16:31:24 +0800203 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800204 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800205 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800206
Summer Qin630c76b2020-05-20 10:32:58 +0800207 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800208 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800209}
210
Mingyang Sund44522a2020-01-16 16:48:37 +0800211/**
Xinyu Zhang3a453242021-04-16 17:57:09 +0800212 * \brief Get reverse handle value from connection handle.
Mingyang Sund44522a2020-01-16 16:48:37 +0800213 *
214 * \param[in] service Target service context pointer
215 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800216 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800217 *
218 * \retval void * Success
219 * \retval "Does not return" Panic for those:
220 * service pointer are NULL
Xinyu Zhang3a453242021-04-16 17:57:09 +0800221 * handle is \ref PSA_NULL_HANDLE
Mingyang Sund44522a2020-01-16 16:48:37 +0800222 * handle node does not be found
223 */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800224static void *tfm_spm_get_rhandle(struct service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800225 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800226{
Ken Liuf250b8b2019-12-27 16:31:24 +0800227 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800228 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800229 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800230
Summer Qin630c76b2020-05-20 10:32:58 +0800231 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800232}
233
234/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800235
Summer Qin02f7f072020-08-24 16:02:54 +0800236struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
237 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800238{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800239 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800240 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800241
Ken Liuf250b8b2019-12-27 16:31:24 +0800242 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800243
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800244 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800245
Ken Liu2c47f7f2021-01-22 11:06:04 +0800246 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800247 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800248 }
249
Mingyang Sun73056b62020-07-03 15:18:46 +0800250 /*
251 * There may be multiple messages for this RoT Service signal, do not clear
252 * partition mask until no remaining message. Search may be optimized.
253 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800254 BI_LIST_FOR_EACH(node, head) {
Ken Liuc25558c2021-05-20 15:31:28 +0800255 tmp_msg = TO_CONTAINER(node, struct tfm_msg_body_t, msg_node);
Ken Liuacd2a572021-05-12 16:19:04 +0800256 if (tmp_msg->service->p_ldinf->signal == signal && msg) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800257 return msg;
Ken Liuacd2a572021-05-12 16:19:04 +0800258 } else if (tmp_msg->service->p_ldinf->signal == signal) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800259 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800260 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800261 }
262 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800263
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800264 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800265
266 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800267}
268
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800269#if TFM_LVL != 1
270/**
271 * \brief Change the privilege mode for partition thread mode.
272 *
273 * \param[in] privileged Privileged mode,
274 * \ref TFM_PARTITION_PRIVILEGED_MODE
275 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
276 *
277 * \note Barrier instructions are not called by this function, and if
278 * it is called in thread mode, it might be necessary to call
279 * them after this function returns.
280 */
281static void tfm_spm_partition_change_privilege(uint32_t privileged)
282{
283 CONTROL_Type ctrl;
284
285 ctrl.w = __get_CONTROL();
286
287 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
288 ctrl.b.nPRIV = 0;
289 } else {
290 ctrl.b.nPRIV = 1;
291 }
292
293 __set_CONTROL(ctrl.w);
294}
295#endif /* if(TFM_LVL != 1) */
296
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800297uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
298{
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800299#if TFM_LVL == 1
300 return TFM_PARTITION_PRIVILEGED_MODE;
301#else /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800302 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
303 return TFM_PARTITION_PRIVILEGED_MODE;
304 } else {
305 return TFM_PARTITION_UNPRIVILEGED_MODE;
306 }
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800307#endif /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800308}
309
Mingyang Sun783a59b2021-04-20 15:52:18 +0800310struct service_t *tfm_spm_get_service_by_sid(uint32_t sid)
Edison Ai764d41f2018-09-21 15:56:36 +0800311{
Feder.Liangaeb5a792021-08-10 16:12:56 +0800312 struct service_t *p_prev, *p_curr;
Edison Ai764d41f2018-09-21 15:56:36 +0800313
Feder.Liangaeb5a792021-08-10 16:12:56 +0800314 UNI_LIST_FOR_EACH_PREV(p_prev, p_curr, &services_listhead) {
315 if (p_curr->p_ldinf->sid == sid) {
316 UNI_LIST_MOVE_AFTER(&services_listhead, p_prev, p_curr);
317 return p_curr;
Mingyang Sunef42f442021-06-11 15:07:58 +0800318 }
319 }
320
Ken Liuea45b0d2021-05-22 17:41:25 +0800321 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800322}
323
Mingyang Sund44522a2020-01-16 16:48:37 +0800324/**
325 * \brief Get the partition context by partition ID.
326 *
327 * \param[in] partition_id Partition identity
328 *
329 * \retval NULL Failed
330 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800331 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800332 */
Kevin Pengeca45b92021-02-09 14:46:50 +0800333struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800334{
Ken Liuea45b0d2021-05-22 17:41:25 +0800335 struct partition_t *p_part;
Edison Ai764d41f2018-09-21 15:56:36 +0800336
Ken Liuea45b0d2021-05-22 17:41:25 +0800337 UNI_LIST_FOR_EACH(p_part, &partitions_listhead) {
338 if (p_part->p_ldinf->pid == partition_id) {
339 return p_part;
340 }
Edison Ai764d41f2018-09-21 15:56:36 +0800341 }
Ken Liuea45b0d2021-05-22 17:41:25 +0800342
Edison Ai764d41f2018-09-21 15:56:36 +0800343 return NULL;
344}
345
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800346struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800347{
Kevin Peng82fbca52021-03-09 13:48:48 +0800348 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800349 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800350
Ken Liuc25558c2021-05-20 15:31:28 +0800351 partition = TO_CONTAINER(pth, struct partition_t, sp_thread);
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800352
Kevin Peng79c2bda2020-07-24 16:31:12 +0800353 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800354}
355
Mingyang Sun783a59b2021-04-20 15:52:18 +0800356int32_t tfm_spm_check_client_version(struct service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530357 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800358{
Ken Liuf250b8b2019-12-27 16:31:24 +0800359 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800360
Ken Liuacd2a572021-05-12 16:19:04 +0800361 switch (SERVICE_GET_VERSION_POLICY(service->p_ldinf->flags)) {
Ken Liub3b2cb62021-05-22 00:39:28 +0800362 case SERVICE_VERSION_POLICY_RELAXED:
Ken Liuacd2a572021-05-12 16:19:04 +0800363 if (version > service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800364 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800365 }
366 break;
Ken Liub3b2cb62021-05-22 00:39:28 +0800367 case SERVICE_VERSION_POLICY_STRICT:
Ken Liuacd2a572021-05-12 16:19:04 +0800368 if (version != service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800369 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800370 }
371 break;
372 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800373 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800374 }
Ken Liubcae38b2021-01-20 15:47:44 +0800375 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800376}
377
Edison Aie728fbf2019-11-13 09:37:12 +0800378int32_t tfm_spm_check_authorization(uint32_t sid,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800379 struct service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800380 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800381{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800382 struct partition_t *partition = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800383 uint32_t *dep;
Edison Aie728fbf2019-11-13 09:37:12 +0800384 int32_t i;
385
Ken Liuf250b8b2019-12-27 16:31:24 +0800386 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800387
388 if (ns_caller) {
Ken Liuacd2a572021-05-12 16:19:04 +0800389 if (!SERVICE_IS_NS_ACCESSIBLE(service->p_ldinf->flags)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800390 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800391 }
392 } else {
393 partition = tfm_spm_get_running_partition();
394 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800395 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800396 }
397
Ken Liuacd2a572021-05-12 16:19:04 +0800398 dep = (uint32_t *)LOAD_INFO_DEPS(partition->p_ldinf);
399 for (i = 0; i < partition->p_ldinf->ndeps; i++) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800400 if (dep[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800401 break;
402 }
403 }
404
Ken Liuacd2a572021-05-12 16:19:04 +0800405 if (i == partition->p_ldinf->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800406 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800407 }
408 }
Ken Liubcae38b2021-01-20 15:47:44 +0800409 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800410}
411
Edison Ai764d41f2018-09-21 15:56:36 +0800412/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800413
Summer Qin02f7f072020-08-24 16:02:54 +0800414struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800415{
416 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200417 * The message handler passed by the caller is considered invalid in the
418 * following cases:
419 * 1. Not a valid message handle. (The address of a message is not the
420 * address of a possible handle from the pool
421 * 2. Handle not belongs to the caller partition (The handle is either
422 * unused, or owned by anither partition)
423 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800424 */
Ken Liu505b1702020-05-29 13:19:58 +0800425 struct tfm_msg_body_t *p_msg;
Kevin Pengeec41a82021-08-18 13:56:23 +0800426 int32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800427 struct tfm_conn_handle_t *p_conn_handle =
428 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200429
430 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800431 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800432 return NULL;
433 }
434
Ken Liu505b1702020-05-29 13:19:58 +0800435 p_msg = &p_conn_handle->internal_msg;
436
Edison Ai764d41f2018-09-21 15:56:36 +0800437 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200438 * Check that the magic number is correct. This proves that the message
439 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800440 */
Ken Liu505b1702020-05-29 13:19:58 +0800441 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800442 return NULL;
443 }
444
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200445 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800446 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liuacd2a572021-05-12 16:19:04 +0800447 if (partition_id != p_msg->service->partition->p_ldinf->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800448 return NULL;
449 }
450
Ken Liu505b1702020-05-29 13:19:58 +0800451 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800452}
453
Kevin Pengdf6aa292021-03-11 17:58:50 +0800454struct tfm_msg_body_t *
455 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
456{
457 TFM_CORE_ASSERT(conn_handle != NULL);
458
459 return &(conn_handle->internal_msg);
460}
461
Edison Ai97115822019-08-01 14:22:19 +0800462void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800463 struct service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800464 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800465 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800466 psa_invec *invec, size_t in_len,
467 psa_outvec *outvec, size_t out_len,
468 psa_outvec *caller_outvec)
469{
Edison Ai764d41f2018-09-21 15:56:36 +0800470 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800471 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800472
Ken Liuf250b8b2019-12-27 16:31:24 +0800473 TFM_CORE_ASSERT(msg);
474 TFM_CORE_ASSERT(service);
475 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
476 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
477 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
478 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
479 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800480
Edison Ai764d41f2018-09-21 15:56:36 +0800481 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800482 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800483
Ken Liu35f89392019-03-14 14:51:05 +0800484 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800485 msg->magic = TFM_MSG_MAGIC;
486 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800487 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800488 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800489
490 /* Copy contents */
491 msg->msg.type = type;
492
493 for (i = 0; i < in_len; i++) {
494 msg->msg.in_size[i] = invec[i].len;
495 msg->invec[i].base = invec[i].base;
496 }
497
498 for (i = 0; i < out_len; i++) {
499 msg->msg.out_size[i] = outvec[i].len;
500 msg->outvec[i].base = outvec[i].base;
501 /* Out len is used to record the writed number, set 0 here again */
502 msg->outvec[i].len = 0;
503 }
504
Ken Liu505b1702020-05-29 13:19:58 +0800505 /* Use the user connect handle as the message handle */
506 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800507
Ken Liu505b1702020-05-29 13:19:58 +0800508 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800509 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800510 if (conn_handle) {
511 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800512 }
David Hu46603dd2019-12-11 18:05:16 +0800513
514 /* Set the private data of NSPE client caller in multi-core topology */
515 if (TFM_CLIENT_ID_IS_NS(client_id)) {
516 tfm_rpc_set_caller_data(msg, client_id);
517 }
Edison Ai764d41f2018-09-21 15:56:36 +0800518}
519
Mingyang Sun783a59b2021-04-20 15:52:18 +0800520void tfm_spm_send_event(struct service_t *service,
Kevin Peng8dac6102021-03-09 16:44:00 +0800521 struct tfm_msg_body_t *msg)
Edison Ai764d41f2018-09-21 15:56:36 +0800522{
Kevin Peng8dac6102021-03-09 16:44:00 +0800523 struct partition_t *partition = NULL;
524 psa_signal_t signal = 0;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800525
Ken Liuacd2a572021-05-12 16:19:04 +0800526 if (!msg || !service || !service->p_ldinf || !service->partition) {
Kevin Peng8dac6102021-03-09 16:44:00 +0800527 tfm_core_panic();
528 }
529
530 partition = service->partition;
Ken Liuacd2a572021-05-12 16:19:04 +0800531 signal = service->p_ldinf->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800532
Mingyang Sun73056b62020-07-03 15:18:46 +0800533 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800534 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800535
536 /* Messages put. Update signals */
Kevin Peng8dac6102021-03-09 16:44:00 +0800537 partition->signals_asserted |= signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800538
Kevin Peng8dac6102021-03-09 16:44:00 +0800539 if (partition->signals_waiting & signal) {
540 tfm_event_wake(
541 &partition->event,
542 (partition->signals_asserted & partition->signals_waiting));
543 partition->signals_waiting &= ~signal;
544 }
Edison Ai764d41f2018-09-21 15:56:36 +0800545
David Hufb38d562019-09-23 15:58:34 +0800546 /*
547 * If it is a NS request via RPC, it is unnecessary to block current
548 * thread.
549 */
550 if (!is_tfm_rpc_msg(msg)) {
551 tfm_event_wait(&msg->ack_evnt);
552 }
Edison Ai764d41f2018-09-21 15:56:36 +0800553}
554
Kevin Pengeec41a82021-08-18 13:56:23 +0800555int32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800556{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800557 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800558
Kevin Peng79c2bda2020-07-24 16:31:12 +0800559 partition = tfm_spm_get_running_partition();
Ken Liuacd2a572021-05-12 16:19:04 +0800560 if (partition && partition->p_ldinf) {
561 return partition->p_ldinf->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800562 } else {
563 return INVALID_PARTITION_ID;
564 }
Edison Ai764d41f2018-09-21 15:56:36 +0800565}
566
Summer Qin43c185d2019-10-10 15:44:42 +0800567int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800568 enum tfm_memory_access_e access,
569 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800570{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800571 enum tfm_hal_status_t err;
572 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800573
574 /* If len is zero, this indicates an empty buffer and base is ignored */
575 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800576 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800577 }
578
579 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800580 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800581 }
582
583 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800584 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800585 }
586
Summer Qin424d4db2019-03-25 14:09:51 +0800587 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800588 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800589 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800590 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800591 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800592
593 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
594 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
595 } else {
596 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
597 }
598
599 if (ns_caller) {
600 attr |= TFM_HAL_ACCESS_NS;
601 }
602
603 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
604
605 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800606 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800607 }
608
Ken Liubcae38b2021-01-20 15:47:44 +0800609 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800610}
611
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800612bool tfm_spm_is_ns_caller(void)
613{
614#if defined(TFM_MULTI_CORE_TOPOLOGY) || defined(FORWARD_PROT_MSG)
615 /* Multi-core NS PSA API request is processed by pendSV. */
616 return (__get_active_exc_num() == EXC_NUM_PENDSV);
617#else
618 struct partition_t *partition = tfm_spm_get_running_partition();
Mingyang Sune529e3b2021-07-12 14:46:30 +0800619
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800620 if (!partition) {
621 tfm_core_panic();
622 }
623
624 return (partition->p_ldinf->pid == TFM_SP_NON_SECURE_ID);
625#endif
626}
627
Mingyang Sune529e3b2021-07-12 14:46:30 +0800628uint32_t tfm_spm_get_caller_privilege_mode(void)
629{
630 struct partition_t *partition;
631
632#if defined(TFM_MULTI_CORE_TOPOLOGY) || defined(FORWARD_PROT_MSG)
633 /*
634 * In multi-core topology, if PSA request is from mailbox, the client
635 * is unprivileged.
636 */
637 if (__get_active_exc_num() == EXC_NUM_PENDSV) {
638 return TFM_PARTITION_UNPRIVILEGED_MODE;
639 }
640#endif
641 partition = tfm_spm_get_running_partition();
642 if (!partition) {
643 tfm_core_panic();
644 }
645
646 return tfm_spm_partition_get_privileged_mode(partition->p_ldinf->flags);
647}
648
Kevin Peng385fda82021-08-18 10:41:19 +0800649int32_t tfm_spm_get_client_id(bool ns_caller)
650{
651 int32_t client_id;
652
653 if (ns_caller) {
654 client_id = tfm_nspm_get_current_client_id();
655 } else {
656 client_id = tfm_spm_partition_get_running_partition_id();
657 }
658
659 if (ns_caller != (client_id < 0)) {
660 /* NS client ID must be negative and Secure ID must >= 0 */
661 tfm_core_panic();
662 }
663
664 return client_id;
665}
666
Ken Liuce2692d2020-02-11 12:39:36 +0800667uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800668{
Kevin Peng54d47fb2021-06-15 16:40:08 +0800669 uint32_t i;
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800670 bool privileged;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800671 struct partition_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800672 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Ken Liu86686282021-04-27 11:11:15 +0800673 const struct platform_data_t *platform_data_p;
Mingyang Sun8d004f72021-06-01 10:46:26 +0800674 const struct partition_load_info_t *p_ldinf;
Ken Liu4520ce32021-05-11 22:49:10 +0800675 struct asset_desc_t *p_asset_load;
David Huf07e97d2021-02-15 22:05:40 +0800676#ifdef TFM_FIH_PROFILE_ON
677 fih_int fih_rc = FIH_FAILURE;
678#endif
Edison Ai764d41f2018-09-21 15:56:36 +0800679
680 tfm_pool_init(conn_handle_pool,
681 POOL_BUFFER_SIZE(conn_handle_pool),
682 sizeof(struct tfm_conn_handle_t),
683 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800684
Ken Liuea45b0d2021-05-22 17:41:25 +0800685 UNI_LISI_INIT_HEAD(&partitions_listhead);
686 UNI_LISI_INIT_HEAD(&services_listhead);
687
Ken Liuacd2a572021-05-12 16:19:04 +0800688 while (1) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800689 partition = load_a_partition_assuredly(&partitions_listhead);
Ken Liuacd2a572021-05-12 16:19:04 +0800690 if (partition == NULL) {
691 break;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800692 }
693
Mingyang Sun8d004f72021-06-01 10:46:26 +0800694 p_ldinf = partition->p_ldinf;
Mingyang Sun2b352662021-04-21 11:35:43 +0800695
Kevin Peng27e42272021-05-24 17:58:53 +0800696 if (p_ldinf->nservices) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800697 load_services_assuredly(partition, &services_listhead,
Kevin Peng27e42272021-05-24 17:58:53 +0800698 stateless_services_ref_tbl,
699 sizeof(stateless_services_ref_tbl));
700 }
701
702 if (p_ldinf->nirqs) {
703 load_irqs_assuredly(partition);
704 }
705
Ken Liuacd2a572021-05-12 16:19:04 +0800706 /* Init mmio assets */
Mingyang Sun8d004f72021-06-01 10:46:26 +0800707 if (p_ldinf->nassets > 0) {
708 if (tfm_spm_partition_get_privileged_mode(p_ldinf->flags) ==
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800709 TFM_PARTITION_PRIVILEGED_MODE) {
710 privileged = true;
711 } else {
712 privileged = false;
713 }
714 }
715
Mingyang Sun8d004f72021-06-01 10:46:26 +0800716 p_asset_load = (struct asset_desc_t *)LOAD_INFO_ASSET(p_ldinf);
717 for (i = 0; i < p_ldinf->nassets; i++) {
Ken Liu86686282021-04-27 11:11:15 +0800718 /* Skip the memory-based asset */
Mingyang Sundf02b852021-07-27 14:29:25 +0800719 if (!(p_asset_load[i].attr & ASSET_ATTR_NAMED_MMIO)) {
Ken Liu86686282021-04-27 11:11:15 +0800720 continue;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100721 }
Ken Liu86686282021-04-27 11:11:15 +0800722
Mingyang Sundf02b852021-07-27 14:29:25 +0800723 platform_data_p = REFERENCE_TO_PTR(p_asset_load[i].dev.dev_ref,
Ken Liuacd2a572021-05-12 16:19:04 +0800724 struct platform_data_t *);
Ken Liu25e09c72021-05-24 15:46:46 +0800725
726 /*
727 * TODO: some partitions declare MMIO not exist on specific
728 * platforms, and the platform defines a dummy NULL reference
729 * for these MMIO items, which cause 'nassets' to contain several
730 * NULL items. Skip these NULL items initialization temporarily to
731 * avoid HAL API panic.
732 * Eventually, these platform-specific partitions need to be moved
733 * into a platform-specific folder. Then this workaround can be
734 * removed.
735 */
736 if (!platform_data_p) {
737 continue;
738 }
739
Ken Liu86686282021-04-27 11:11:15 +0800740#ifdef TFM_FIH_PROFILE_ON
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800741 FIH_CALL(tfm_spm_hal_configure_default_isolation, fih_rc,
742 privileged, platform_data_p);
Ken Liu86686282021-04-27 11:11:15 +0800743 if (fih_not_eq(fih_rc, fih_int_encode(TFM_PLAT_ERR_SUCCESS))) {
744 tfm_core_panic();
745 }
746#else /* TFM_FIH_PROFILE_ON */
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800747 if (tfm_spm_hal_configure_default_isolation(privileged,
Ken Liu86686282021-04-27 11:11:15 +0800748 platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
749 tfm_core_panic();
750 }
751#endif /* TFM_FIH_PROFILE_ON */
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100752 }
753
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800754 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800755
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800756 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800757 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800758
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800759 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800760
Mingyang Sun8d004f72021-06-01 10:46:26 +0800761 /* Extendable partition load info is right after p_ldinf. */
Mingyang Sun2b352662021-04-21 11:35:43 +0800762 tfm_core_thrd_init(
763 pth,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800764 POSITION_TO_ENTRY(p_ldinf->entry, tfm_core_thrd_entry_t),
Mingyang Sun2b352662021-04-21 11:35:43 +0800765 NULL,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800766 LOAD_ALLOCED_STACK_ADDR(p_ldinf) + p_ldinf->stack_size,
767 LOAD_ALLOCED_STACK_ADDR(p_ldinf));
Edison Ai788bae22019-02-18 17:38:59 +0800768
Mingyang Sun8d004f72021-06-01 10:46:26 +0800769 pth->prior = TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_ldinf->flags));
Edison Ai764d41f2018-09-21 15:56:36 +0800770
Mingyang Sun8d004f72021-06-01 10:46:26 +0800771 if (p_ldinf->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800772 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800773 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800774 }
775
Edison Ai764d41f2018-09-21 15:56:36 +0800776 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800777 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800778 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800779 }
Edison Ai764d41f2018-09-21 15:56:36 +0800780 }
781
Ken Liu483f5da2019-04-24 10:45:21 +0800782 /*
783 * All threads initialized, start the scheduler.
784 *
785 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800786 * It is worthy to give the thread object to scheduler if the background
787 * context belongs to one of the threads. Here the background thread is the
788 * initialization thread who calls SPM SVC, which re-uses the non-secure
789 * entry thread's stack. After SPM initialization is done, this stack is
790 * cleaned up and the background context is never going to return. Tell
791 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800792 */
Summer Qin66f1e032020-01-06 15:40:03 +0800793 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800794
Summer Qind2ad7e72020-01-06 18:16:35 +0800795 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800796}
Ken Liu2d175172019-03-21 17:08:41 +0800797
Kevin Peng25b190b2020-10-30 17:10:45 +0800798#if TFM_LVL != 1
Kevin Peng64011942021-05-25 11:19:07 +0800799static void set_up_boundary(const struct partition_load_info_t *p_ldinf)
800{
801#if TFM_LVL == 3
David Huf07e97d2021-02-15 22:05:40 +0800802#if defined(TFM_FIH_PROFILE_ON) && (TFM_LVL == 3)
803 fih_int fih_rc = FIH_FAILURE;
804#endif
Kevin Peng64011942021-05-25 11:19:07 +0800805 /*
806 * FIXME: To implement isolations among partitions in isolation level 3,
807 * each partition needs to run in unprivileged mode. Currently some
808 * PRoTs cannot work in unprivileged mode, make them privileged now.
809 */
810 if (!(p_ldinf->flags & SPM_PART_FLAG_PSA_ROT)) {
811 struct asset_desc_t *p_asset =
812 (struct asset_desc_t *)LOAD_INFO_ASSET(p_ldinf);
813 /* Partition must have private data as the first asset in LVL3 */
814 if (p_ldinf->nassets == 0) {
815 tfm_core_panic();
816 }
Mingyang Sundf02b852021-07-27 14:29:25 +0800817 if (p_asset->attr & ASSET_ATTR_NAMED_MMIO) {
Kevin Peng64011942021-05-25 11:19:07 +0800818 tfm_core_panic();
819 }
820 /* FIXME: only MPU-based implementations are supported currently */
821#ifdef TFM_FIH_PROFILE_ON
822 FIH_CALL(tfm_hal_mpu_update_partition_boundary, fih_rc,
Mingyang Sundf02b852021-07-27 14:29:25 +0800823 p_asset->mem.start, p_asset->mem.limit);
Kevin Peng64011942021-05-25 11:19:07 +0800824 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
825 tfm_core_panic();
826 }
827#else /* TFM_FIH_PROFILE_ON */
Mingyang Sundf02b852021-07-27 14:29:25 +0800828 if (tfm_hal_mpu_update_partition_boundary(p_asset->mem.start,
829 p_asset->mem.limit)
Kevin Peng64011942021-05-25 11:19:07 +0800830 != TFM_HAL_SUCCESS) {
831 tfm_core_panic();
832 }
833#endif /* TFM_FIH_PROFILE_ON */
834 }
835#else /* TFM_LVL == 3 */
836 (void)p_ldinf;
837#endif /* TFM_LVL == 3 */
838}
839#endif /* TFM_LVL != 1 */
840
841void tfm_set_up_isolation_boundary(const struct partition_t *partition)
842{
843#if TFM_LVL != 1
844 const struct partition_load_info_t *p_ldinf;
845 uint32_t is_privileged;
846
847 p_ldinf = partition->p_ldinf;
848 is_privileged = p_ldinf->flags & SPM_PART_FLAG_PSA_ROT ?
849 TFM_PARTITION_PRIVILEGED_MODE :
850 TFM_PARTITION_UNPRIVILEGED_MODE;
851
852 tfm_spm_partition_change_privilege(is_privileged);
853
854 set_up_boundary(p_ldinf);
855#else /* TFM_LVL != 1 */
856 (void)partition;
857#endif /* TFM_LVL != 1 */
858}
859
860void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
861{
862 struct partition_t *p_next_partition;
863 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
864 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
Ken Liu2d175172019-03-21 17:08:41 +0800865
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200866 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liuc25558c2021-05-20 15:31:28 +0800867 p_next_partition = TO_CONTAINER(pth_next,
868 struct partition_t,
869 sp_thread);
Kevin Peng64011942021-05-25 11:19:07 +0800870 tfm_set_up_isolation_boundary(p_next_partition);
Mate Toth-Palc430b992019-05-09 21:01:14 +0200871
Summer Qind2ad7e72020-01-06 18:16:35 +0800872 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800873 }
David Hufb38d562019-09-23 15:58:34 +0800874
875 /*
876 * Handle pending mailbox message from NS in multi-core topology.
877 * Empty operation on single Armv8-M platform.
878 */
879 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800880}
Mingyang Sund44522a2020-01-16 16:48:37 +0800881
Summer Qin02f7f072020-08-24 16:02:54 +0800882void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800883{
884 uint32_t i;
885
886 /*
887 * FixeMe: abstract these part into dedicated functions to avoid
888 * accessing thread context in psa layer
889 */
890 /* If it is a NS request via RPC, the owner of this message is not set */
891 if (!is_tfm_rpc_msg(msg)) {
892 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
893 }
894
895 for (i = 0; i < PSA_MAX_IOVEC; i++) {
896 if (msg->msg.out_size[i] == 0) {
897 continue;
898 }
899
900 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
901
902 msg->caller_outvec[i].len = msg->outvec[i].len;
903 }
904}
905
Summer Qin02f7f072020-08-24 16:02:54 +0800906void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800907{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800908 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800909
910 /*
911 * The value of partition_id must be greater than zero as the target of
912 * notification must be a Secure Partition, providing a Non-secure
913 * Partition ID is a fatal error.
914 */
915 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
916 tfm_core_panic();
917 }
918
919 /*
920 * It is a fatal error if partition_id does not correspond to a Secure
921 * Partition.
922 */
923 partition = tfm_spm_get_partition_by_id(partition_id);
924 if (!partition) {
925 tfm_core_panic();
926 }
927
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800928 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +0800929
Kevin Peng8dac6102021-03-09 16:44:00 +0800930 if (partition->signals_waiting & signal) {
931 tfm_event_wake(
932 &partition->event,
933 partition->signals_asserted & partition->signals_waiting);
934 partition->signals_waiting &= ~signal;
935 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800936}
937
Kevin Pengeca45b92021-02-09 14:46:50 +0800938__attribute__((naked))
939static void tfm_flih_deprivileged_handling(uint32_t p_ldinf,
940 psa_flih_func flih_func,
941 psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800942{
Kevin Pengeca45b92021-02-09 14:46:50 +0800943 __ASM volatile("SVC %0 \n"
944 "BX LR \n"
945 : : "I" (TFM_SVC_PREPARE_DEPRIV_FLIH));
946}
Kevin Pengdc791882021-03-12 10:57:12 +0800947
Kevin Pengeca45b92021-02-09 14:46:50 +0800948void spm_interrupt_handler(struct partition_load_info_t *p_ldinf,
949 psa_signal_t signal,
950 uint32_t irq_line,
951 psa_flih_func flih_func)
952{
Kevin Pengeec41a82021-08-18 13:56:23 +0800953 int32_t pid;
Kevin Pengeca45b92021-02-09 14:46:50 +0800954 psa_flih_result_t flih_result;
Kevin Pengdc791882021-03-12 10:57:12 +0800955
Kevin Pengeca45b92021-02-09 14:46:50 +0800956 pid = p_ldinf->pid;
957
958 if (flih_func == NULL) {
959 /* SLIH Model Handling */
960 __disable_irq();
961 tfm_spm_hal_disable_irq(irq_line);
962 notify_with_signal(pid, signal);
963 __enable_irq();
964 return;
965 }
966
967 /* FLIH Model Handling */
968 if (tfm_spm_partition_get_privileged_mode(p_ldinf->flags) ==
969 TFM_PARTITION_PRIVILEGED_MODE) {
970 flih_result = flih_func();
971 if (flih_result == PSA_FLIH_SIGNAL) {
972 __disable_irq();
973 notify_with_signal(pid, signal);
974 __enable_irq();
975 } else if (flih_result != PSA_FLIH_NO_SIGNAL) {
976 /*
977 * Nothing needed to do for PSA_FLIH_NO_SIGNAL
978 * But if the flih_result is invalid, should panic.
979 */
980 tfm_core_panic();
981 }
982 } else {
983 tfm_flih_deprivileged_handling((uint32_t)p_ldinf, flih_func, signal);
984 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800985}
986
Kevin Peng27e42272021-05-24 17:58:53 +0800987struct irq_load_info_t *get_irq_info_for_signal(
988 const struct partition_load_info_t *p_ldinf,
989 psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800990{
991 size_t i;
Kevin Peng27e42272021-05-24 17:58:53 +0800992 struct irq_load_info_t *irq_info;
Mingyang Sund44522a2020-01-16 16:48:37 +0800993
Ken Liu24dffb22021-02-10 11:03:58 +0800994 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng27e42272021-05-24 17:58:53 +0800995 return NULL;
Kevin Peng410bee52021-01-13 16:27:17 +0800996 }
997
Kevin Peng27e42272021-05-24 17:58:53 +0800998 irq_info = (struct irq_load_info_t *)LOAD_INFO_IRQ(p_ldinf);
999 for (i = 0; i < p_ldinf->nirqs; i++) {
1000 if (irq_info[i].signal == signal) {
1001 return &irq_info[i];
Mingyang Sund44522a2020-01-16 16:48:37 +08001002 }
1003 }
Kevin Penga20b5af2021-01-11 11:20:52 +08001004
Kevin Peng27e42272021-05-24 17:58:53 +08001005 return NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +08001006}
1007
Summer Qindea1f2c2021-01-11 14:46:34 +08001008#if !defined(__ARM_ARCH_8_1M_MAIN__)
Mingyang Sun22a3faf2021-07-09 15:32:47 +08001009void tfm_spm_validate_caller(uint32_t *p_ctx, uint32_t exc_return)
Mingyang Sund44522a2020-01-16 16:48:37 +08001010{
Mingyang Sun22a3faf2021-07-09 15:32:47 +08001011 /*
1012 * TODO: the reentrant detection mechanism needs to be changed when there
1013 * is no boundaries.
1014 */
Mingyang Sund44522a2020-01-16 16:48:37 +08001015 uintptr_t stacked_ctx_pos;
Mingyang Sun22a3faf2021-07-09 15:32:47 +08001016 bool ns_caller = false;
1017 struct partition_t *p_cur_sp = tfm_spm_get_running_partition();
1018 uint32_t veneer_base =
1019 (uint32_t)&REGION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Base);
1020 uint32_t veneer_limit =
1021 (uint32_t)&REGION_NAME(Image$$, TFM_UNPRIV_CODE, $$RO$$Limit);
1022
1023 if (!p_cur_sp) {
1024 tfm_core_panic();
1025 }
1026
1027 /*
1028 * The caller security attribute detection bases on LR of state context.
1029 * However, if SP calls PSA APIs based on its customized SVC, the LR may be
1030 * occupied by general purpose value while calling SVC.
1031 * Check if caller comes from non-secure: return address (p_ctx[6]) belongs
1032 * to veneer section, and the bit0 of LR (p_ctx[5]) is zero.
1033 */
1034 if (p_ctx[6] >= veneer_base && p_ctx[6] < veneer_limit &&
1035 !(p_ctx[5] & TFM_VENEER_LR_BIT0_MASK)) {
1036 ns_caller = true;
1037 }
1038
1039 /* If called from ns, partition ID should be TFM_SP_NON_SECURE_ID. */
1040 if ((ns_caller == true) !=
1041 (p_cur_sp->p_ldinf->pid == TFM_SP_NON_SECURE_ID)) {
1042 tfm_core_panic();
1043 }
Mingyang Sund44522a2020-01-16 16:48:37 +08001044
1045 if (ns_caller) {
1046 /*
1047 * The background IRQ can't be supported, since if SP is executing,
1048 * the preempted context of SP can be different with the one who
Mingyang Sun22a3faf2021-07-09 15:32:47 +08001049 * preempts veneer. Check if veneer stack contains multiple contexts.
Mingyang Sund44522a2020-01-16 16:48:37 +08001050 */
1051 stacked_ctx_pos = (uintptr_t)p_ctx +
1052 sizeof(struct tfm_state_context_t) +
Ken Liu05e13ba2020-07-25 10:31:33 +08001053 TFM_STACK_SEALED_SIZE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001054
1055 if (is_stack_alloc_fp_space(exc_return)) {
Xinyu Zhang3a453242021-04-16 17:57:09 +08001056#if defined(__FPU_USED) && (__FPU_USED == 1U)
Mingyang Sund44522a2020-01-16 16:48:37 +08001057 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1058 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1059 sizeof(uint32_t);
1060 }
1061#endif
1062 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1063 }
1064
Mingyang Sunaf22ffa2020-07-09 17:48:37 +08001065 if (stacked_ctx_pos != p_cur_sp->sp_thread.stk_top) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001066 tfm_core_panic();
1067 }
Mingyang Sund44522a2020-01-16 16:48:37 +08001068 }
1069}
Summer Qindea1f2c2021-01-11 14:46:34 +08001070#endif