blob: e812d2c9fde4d011e304e4f36e45e1310c78b55b [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
David Huf07e97d2021-02-15 22:05:40 +080011#include "fih.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000012#include "psa/client.h"
13#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080014#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080015#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080016#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080017#include "tfm_spm_hal.h"
18#include "tfm_irq_list.h"
19#include "tfm_api.h"
20#include "tfm_secure_api.h"
21#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080022#include "tfm_hal_defs.h"
23#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080024#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_core_utils.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080028#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080029#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080030#include "tfm_pools.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080031#include "spm_partition_defs.h"
32#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080033#include "tfm/tfm_spm_services.h"
Mingyang Sun00df2352021-04-15 15:46:08 +080034#include "load/partition_defs.h"
35#include "load/service_defs.h"
Ken Liu86686282021-04-27 11:11:15 +080036#include "load/asset_defs.h"
Ken Liuacd2a572021-05-12 16:19:04 +080037#include "load/spm_load_api.h"
Edison Ai764d41f2018-09-21 15:56:36 +080038
Ken Liuacd2a572021-05-12 16:19:04 +080039extern struct spm_partition_db_t g_spm_partition_db;
40static struct service_t *all_services;
Summer Qind99509f2019-08-02 17:36:58 +080041
Edison Ai764d41f2018-09-21 15:56:36 +080042/* Pools */
43TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
44 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080045
Kevin Pengdc791882021-03-12 10:57:12 +080046void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
47 uint32_t irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080048
49#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080050
Summer Qin373feb12020-03-27 15:35:33 +080051/*********************** Connection handle conversion APIs *******************/
52
Summer Qin373feb12020-03-27 15:35:33 +080053#define CONVERSION_FACTOR_BITOFFSET 3
54#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
55/* Set 32 as the maximum */
56#define CONVERSION_FACTOR_VALUE_MAX 0x20
57
58#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
59#error "CONVERSION FACTOR OUT OF RANGE"
60#endif
61
62static uint32_t loop_index;
63
64/*
65 * A handle instance psa_handle_t allocated inside SPM is actually a memory
66 * address among the handle pool. Return this handle to the client directly
67 * exposes information of secure memory address. In this case, converting the
68 * handle into another value does not represent the memory address to avoid
69 * exposing secure memory directly to clients.
70 *
71 * This function converts the handle instance into another value by scaling the
72 * handle in pool offset, the converted value is named as a user handle.
73 *
74 * The formula:
75 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
76 * CLIENT_HANDLE_VALUE_MIN + loop_index
77 * where:
78 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
79 * exceed CONVERSION_FACTOR_VALUE_MAX.
80 *
81 * handle_instance in RANGE[POOL_START, POOL_END]
82 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
83 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
84 *
85 * note:
86 * loop_index is used to promise same handle instance is converted into
87 * different user handles in short time.
88 */
Ken Liu505b1702020-05-29 13:19:58 +080089psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080090{
91 psa_handle_t user_handle;
92
93 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
94 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
95 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
96 CLIENT_HANDLE_VALUE_MIN + loop_index);
97
98 return user_handle;
99}
100
101/*
102 * This function converts a user handle into a corresponded handle instance.
103 * The converted value is validated before returning, an invalid handle instance
104 * is returned as NULL.
105 *
106 * The formula:
107 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
108 * CONVERSION_FACTOR_VALUE) + POOL_START
109 * where:
110 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
111 * exceed CONVERSION_FACTOR_VALUE_MAX.
112 *
113 * handle_instance in RANGE[POOL_START, POOL_END]
114 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
115 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
116 */
117struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
118{
119 struct tfm_conn_handle_t *handle_instance;
120
121 if (user_handle == PSA_NULL_HANDLE) {
122 return NULL;
123 }
124
125 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
126 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
127 (uintptr_t)conn_handle_pool);
128
129 return handle_instance;
130}
131
Edison Ai764d41f2018-09-21 15:56:36 +0800132/* Service handle management functions */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800133struct tfm_conn_handle_t *tfm_spm_create_conn_handle(struct service_t *service,
134 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800135{
Edison Ai9cc26242019-08-06 11:28:04 +0800136 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800137
Ken Liuf250b8b2019-12-27 16:31:24 +0800138 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800139
140 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800141 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
142 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800143 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800144 }
145
Edison Ai9cc26242019-08-06 11:28:04 +0800146 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800147 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800148 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800149
150 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800151 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800152
Summer Qin630c76b2020-05-20 10:32:58 +0800153 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800154}
155
Summer Qin630c76b2020-05-20 10:32:58 +0800156int32_t tfm_spm_validate_conn_handle(
157 const struct tfm_conn_handle_t *conn_handle,
158 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800159{
160 /* Check the handle address is validated */
161 if (is_valid_chunk_data_in_pool(conn_handle_pool,
162 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800163 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800164 }
165
166 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800167 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800168 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800169 }
170
Ken Liubcae38b2021-01-20 15:47:44 +0800171 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800172}
173
Mingyang Sun783a59b2021-04-20 15:52:18 +0800174int32_t tfm_spm_free_conn_handle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800175 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800176{
Ken Liuf250b8b2019-12-27 16:31:24 +0800177 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800178 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800179
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200180 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800181 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200182
Edison Ai764d41f2018-09-21 15:56:36 +0800183 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800184 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800185
186 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800187 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800188 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800189}
190
Mingyang Sun783a59b2021-04-20 15:52:18 +0800191int32_t tfm_spm_set_rhandle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800192 struct tfm_conn_handle_t *conn_handle,
193 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800194{
Ken Liuf250b8b2019-12-27 16:31:24 +0800195 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800196 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800197 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800198
Summer Qin630c76b2020-05-20 10:32:58 +0800199 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800200 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800201}
202
Mingyang Sund44522a2020-01-16 16:48:37 +0800203/**
204 * \brief Get reverse handle value from connection hanlde.
205 *
206 * \param[in] service Target service context pointer
207 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800208 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800209 *
210 * \retval void * Success
211 * \retval "Does not return" Panic for those:
212 * service pointer are NULL
213 * hanlde is \ref PSA_NULL_HANDLE
214 * handle node does not be found
215 */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800216static void *tfm_spm_get_rhandle(struct service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800217 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800218{
Ken Liuf250b8b2019-12-27 16:31:24 +0800219 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800220 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800221 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800222
Summer Qin630c76b2020-05-20 10:32:58 +0800223 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800224}
225
226/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800227
Summer Qin02f7f072020-08-24 16:02:54 +0800228struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
229 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800230{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800231 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800232 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800233
Ken Liuf250b8b2019-12-27 16:31:24 +0800234 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800235
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800236 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800237
Ken Liu2c47f7f2021-01-22 11:06:04 +0800238 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800239 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800240 }
241
Mingyang Sun73056b62020-07-03 15:18:46 +0800242 /*
243 * There may be multiple messages for this RoT Service signal, do not clear
244 * partition mask until no remaining message. Search may be optimized.
245 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800246 BI_LIST_FOR_EACH(node, head) {
Ken Liuc25558c2021-05-20 15:31:28 +0800247 tmp_msg = TO_CONTAINER(node, struct tfm_msg_body_t, msg_node);
Ken Liuacd2a572021-05-12 16:19:04 +0800248 if (tmp_msg->service->p_ldinf->signal == signal && msg) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800249 return msg;
Ken Liuacd2a572021-05-12 16:19:04 +0800250 } else if (tmp_msg->service->p_ldinf->signal == signal) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800251 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800252 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800253 }
254 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800255
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800256 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800257
258 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800259}
260
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800261/**
262 * \brief Returns the index of the partition with the given partition ID.
263 *
264 * \param[in] partition_id Partition id
265 *
266 * \return the partition idx if partition_id is valid,
267 * \ref SPM_INVALID_PARTITION_IDX othervise
268 */
269static uint32_t get_partition_idx(uint32_t partition_id)
270{
271 uint32_t i;
272
273 if (partition_id == INVALID_PARTITION_ID) {
274 return SPM_INVALID_PARTITION_IDX;
275 }
276
277 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
Ken Liuacd2a572021-05-12 16:19:04 +0800278 if (g_spm_partition_db.partitions[i].p_ldinf->pid == partition_id) {
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800279 return i;
280 }
281 }
282 return SPM_INVALID_PARTITION_IDX;
283}
284
285/**
286 * \brief Get the flags associated with a partition
287 *
288 * \param[in] partition_idx Partition index
289 *
290 * \return Flags associated with the partition
291 *
292 * \note This function doesn't check if partition_idx is valid.
293 */
294static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
295{
Ken Liuacd2a572021-05-12 16:19:04 +0800296 return g_spm_partition_db.partitions[partition_idx].p_ldinf->flags;
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800297}
298
299#if TFM_LVL != 1
300/**
301 * \brief Change the privilege mode for partition thread mode.
302 *
303 * \param[in] privileged Privileged mode,
304 * \ref TFM_PARTITION_PRIVILEGED_MODE
305 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
306 *
307 * \note Barrier instructions are not called by this function, and if
308 * it is called in thread mode, it might be necessary to call
309 * them after this function returns.
310 */
311static void tfm_spm_partition_change_privilege(uint32_t privileged)
312{
313 CONTROL_Type ctrl;
314
315 ctrl.w = __get_CONTROL();
316
317 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
318 ctrl.b.nPRIV = 0;
319 } else {
320 ctrl.b.nPRIV = 1;
321 }
322
323 __set_CONTROL(ctrl.w);
324}
325#endif /* if(TFM_LVL != 1) */
326
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800327uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
328{
329 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
330 return TFM_PARTITION_PRIVILEGED_MODE;
331 } else {
332 return TFM_PARTITION_UNPRIVILEGED_MODE;
333 }
334}
335
336bool tfm_is_partition_privileged(uint32_t partition_idx)
337{
338 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
339
340 return tfm_spm_partition_get_privileged_mode(flags) ==
341 TFM_PARTITION_PRIVILEGED_MODE;
342}
343
Mingyang Sun783a59b2021-04-20 15:52:18 +0800344struct service_t *tfm_spm_get_service_by_sid(uint32_t sid)
Edison Ai764d41f2018-09-21 15:56:36 +0800345{
Ken Liuacd2a572021-05-12 16:19:04 +0800346 struct service_t *p_serv = all_services;
Edison Ai764d41f2018-09-21 15:56:36 +0800347
Ken Liuacd2a572021-05-12 16:19:04 +0800348 while (p_serv && p_serv->p_ldinf->sid != sid) {
349 p_serv = TO_CONTAINER(BI_LIST_NEXT_NODE(&p_serv->list),
350 struct service_t, list);
351 if (p_serv == all_services)
352 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800353 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800354
Ken Liuacd2a572021-05-12 16:19:04 +0800355 return p_serv;
Edison Ai764d41f2018-09-21 15:56:36 +0800356}
357
Mingyang Sund44522a2020-01-16 16:48:37 +0800358/**
359 * \brief Get the partition context by partition ID.
360 *
361 * \param[in] partition_id Partition identity
362 *
363 * \retval NULL Failed
364 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800365 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800366 */
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800367static struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800368{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800369 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800370
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800371 if (idx != SPM_INVALID_PARTITION_IDX) {
372 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800373 }
374 return NULL;
375}
376
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800377struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800378{
Kevin Peng82fbca52021-03-09 13:48:48 +0800379 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800380 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800381
Ken Liuc25558c2021-05-20 15:31:28 +0800382 partition = TO_CONTAINER(pth, struct partition_t, sp_thread);
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800383
Kevin Peng79c2bda2020-07-24 16:31:12 +0800384 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800385}
386
Mingyang Sun783a59b2021-04-20 15:52:18 +0800387int32_t tfm_spm_check_client_version(struct service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530388 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800389{
Ken Liuf250b8b2019-12-27 16:31:24 +0800390 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800391
Ken Liuacd2a572021-05-12 16:19:04 +0800392 switch (SERVICE_GET_VERSION_POLICY(service->p_ldinf->flags)) {
Edison Ai764d41f2018-09-21 15:56:36 +0800393 case TFM_VERSION_POLICY_RELAXED:
Ken Liuacd2a572021-05-12 16:19:04 +0800394 if (version > service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800395 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800396 }
397 break;
398 case TFM_VERSION_POLICY_STRICT:
Ken Liuacd2a572021-05-12 16:19:04 +0800399 if (version != service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800400 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800401 }
402 break;
403 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800404 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800405 }
Ken Liubcae38b2021-01-20 15:47:44 +0800406 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800407}
408
Edison Aie728fbf2019-11-13 09:37:12 +0800409int32_t tfm_spm_check_authorization(uint32_t sid,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800410 struct service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800411 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800412{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800413 struct partition_t *partition = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800414 uint32_t *dep;
Edison Aie728fbf2019-11-13 09:37:12 +0800415 int32_t i;
416
Ken Liuf250b8b2019-12-27 16:31:24 +0800417 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800418
419 if (ns_caller) {
Ken Liuacd2a572021-05-12 16:19:04 +0800420 if (!SERVICE_IS_NS_ACCESSIBLE(service->p_ldinf->flags)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800421 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800422 }
423 } else {
424 partition = tfm_spm_get_running_partition();
425 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800426 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800427 }
428
Ken Liuacd2a572021-05-12 16:19:04 +0800429 dep = (uint32_t *)LOAD_INFO_DEPS(partition->p_ldinf);
430 for (i = 0; i < partition->p_ldinf->ndeps; i++) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800431 if (dep[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800432 break;
433 }
434 }
435
Ken Liuacd2a572021-05-12 16:19:04 +0800436 if (i == partition->p_ldinf->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800437 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800438 }
439 }
Ken Liubcae38b2021-01-20 15:47:44 +0800440 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800441}
442
Edison Ai764d41f2018-09-21 15:56:36 +0800443/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800444
Summer Qin02f7f072020-08-24 16:02:54 +0800445struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800446{
447 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200448 * The message handler passed by the caller is considered invalid in the
449 * following cases:
450 * 1. Not a valid message handle. (The address of a message is not the
451 * address of a possible handle from the pool
452 * 2. Handle not belongs to the caller partition (The handle is either
453 * unused, or owned by anither partition)
454 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800455 */
Ken Liu505b1702020-05-29 13:19:58 +0800456 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800457 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800458 struct tfm_conn_handle_t *p_conn_handle =
459 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200460
461 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800462 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800463 return NULL;
464 }
465
Ken Liu505b1702020-05-29 13:19:58 +0800466 p_msg = &p_conn_handle->internal_msg;
467
Edison Ai764d41f2018-09-21 15:56:36 +0800468 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200469 * Check that the magic number is correct. This proves that the message
470 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800471 */
Ken Liu505b1702020-05-29 13:19:58 +0800472 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800473 return NULL;
474 }
475
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200476 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800477 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liuacd2a572021-05-12 16:19:04 +0800478 if (partition_id != p_msg->service->partition->p_ldinf->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800479 return NULL;
480 }
481
Ken Liu505b1702020-05-29 13:19:58 +0800482 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800483}
484
Kevin Pengdf6aa292021-03-11 17:58:50 +0800485struct tfm_msg_body_t *
486 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
487{
488 TFM_CORE_ASSERT(conn_handle != NULL);
489
490 return &(conn_handle->internal_msg);
491}
492
Edison Ai97115822019-08-01 14:22:19 +0800493void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800494 struct service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800495 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800496 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800497 psa_invec *invec, size_t in_len,
498 psa_outvec *outvec, size_t out_len,
499 psa_outvec *caller_outvec)
500{
Edison Ai764d41f2018-09-21 15:56:36 +0800501 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800502 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800503
Ken Liuf250b8b2019-12-27 16:31:24 +0800504 TFM_CORE_ASSERT(msg);
505 TFM_CORE_ASSERT(service);
506 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
507 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
508 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
509 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
510 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800511
Edison Ai764d41f2018-09-21 15:56:36 +0800512 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800513 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800514
Ken Liu35f89392019-03-14 14:51:05 +0800515 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800516 msg->magic = TFM_MSG_MAGIC;
517 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800518 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800519 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800520
521 /* Copy contents */
522 msg->msg.type = type;
523
524 for (i = 0; i < in_len; i++) {
525 msg->msg.in_size[i] = invec[i].len;
526 msg->invec[i].base = invec[i].base;
527 }
528
529 for (i = 0; i < out_len; i++) {
530 msg->msg.out_size[i] = outvec[i].len;
531 msg->outvec[i].base = outvec[i].base;
532 /* Out len is used to record the writed number, set 0 here again */
533 msg->outvec[i].len = 0;
534 }
535
Ken Liu505b1702020-05-29 13:19:58 +0800536 /* Use the user connect handle as the message handle */
537 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800538
Ken Liu505b1702020-05-29 13:19:58 +0800539 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800540 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800541 if (conn_handle) {
542 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800543 }
David Hu46603dd2019-12-11 18:05:16 +0800544
545 /* Set the private data of NSPE client caller in multi-core topology */
546 if (TFM_CLIENT_ID_IS_NS(client_id)) {
547 tfm_rpc_set_caller_data(msg, client_id);
548 }
Edison Ai764d41f2018-09-21 15:56:36 +0800549}
550
Mingyang Sun783a59b2021-04-20 15:52:18 +0800551void tfm_spm_send_event(struct service_t *service,
Kevin Peng8dac6102021-03-09 16:44:00 +0800552 struct tfm_msg_body_t *msg)
Edison Ai764d41f2018-09-21 15:56:36 +0800553{
Kevin Peng8dac6102021-03-09 16:44:00 +0800554 struct partition_t *partition = NULL;
555 psa_signal_t signal = 0;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800556
Ken Liuacd2a572021-05-12 16:19:04 +0800557 if (!msg || !service || !service->p_ldinf || !service->partition) {
Kevin Peng8dac6102021-03-09 16:44:00 +0800558 tfm_core_panic();
559 }
560
561 partition = service->partition;
Ken Liuacd2a572021-05-12 16:19:04 +0800562 signal = service->p_ldinf->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800563
Mingyang Sun73056b62020-07-03 15:18:46 +0800564 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800565 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800566
567 /* Messages put. Update signals */
Kevin Peng8dac6102021-03-09 16:44:00 +0800568 partition->signals_asserted |= signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800569
Kevin Peng8dac6102021-03-09 16:44:00 +0800570 if (partition->signals_waiting & signal) {
571 tfm_event_wake(
572 &partition->event,
573 (partition->signals_asserted & partition->signals_waiting));
574 partition->signals_waiting &= ~signal;
575 }
Edison Ai764d41f2018-09-21 15:56:36 +0800576
David Hufb38d562019-09-23 15:58:34 +0800577 /*
578 * If it is a NS request via RPC, it is unnecessary to block current
579 * thread.
580 */
581 if (!is_tfm_rpc_msg(msg)) {
582 tfm_event_wait(&msg->ack_evnt);
583 }
Edison Ai764d41f2018-09-21 15:56:36 +0800584}
585
Mingyang Sunf3d29892019-07-10 17:50:23 +0800586uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800587{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800588 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800589
Kevin Peng79c2bda2020-07-24 16:31:12 +0800590 partition = tfm_spm_get_running_partition();
Ken Liuacd2a572021-05-12 16:19:04 +0800591 if (partition && partition->p_ldinf) {
592 return partition->p_ldinf->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800593 } else {
594 return INVALID_PARTITION_ID;
595 }
Edison Ai764d41f2018-09-21 15:56:36 +0800596}
597
Summer Qin43c185d2019-10-10 15:44:42 +0800598int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800599 enum tfm_memory_access_e access,
600 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800601{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800602 enum tfm_hal_status_t err;
603 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800604
605 /* If len is zero, this indicates an empty buffer and base is ignored */
606 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800607 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800608 }
609
610 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800611 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800612 }
613
614 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800615 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800616 }
617
Summer Qin424d4db2019-03-25 14:09:51 +0800618 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800619 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800620 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800621 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800622 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800623
624 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
625 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
626 } else {
627 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
628 }
629
630 if (ns_caller) {
631 attr |= TFM_HAL_ACCESS_NS;
632 }
633
634 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
635
636 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800637 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800638 }
639
Ken Liubcae38b2021-01-20 15:47:44 +0800640 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800641}
642
Ken Liuce2692d2020-02-11 12:39:36 +0800643uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800644{
Ken Liuacd2a572021-05-12 16:19:04 +0800645 uint32_t i, j, part_idx = 0;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800646 struct partition_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800647 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Ken Liu86686282021-04-27 11:11:15 +0800648 const struct platform_data_t *platform_data_p;
Ken Liuacd2a572021-05-12 16:19:04 +0800649 const struct partition_load_info_t *p_cmninf;
Ken Liu4520ce32021-05-11 22:49:10 +0800650 struct asset_desc_t *p_asset_load;
David Huf07e97d2021-02-15 22:05:40 +0800651#ifdef TFM_FIH_PROFILE_ON
652 fih_int fih_rc = FIH_FAILURE;
653#endif
Edison Ai764d41f2018-09-21 15:56:36 +0800654
655 tfm_pool_init(conn_handle_pool,
656 POOL_BUFFER_SIZE(conn_handle_pool),
657 sizeof(struct tfm_conn_handle_t),
658 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800659
Ken Liuacd2a572021-05-12 16:19:04 +0800660 while (1) {
661 partition = load_a_partition_assuredly();
662 if (partition == NULL) {
663 break;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800664 }
665
Ken Liuacd2a572021-05-12 16:19:04 +0800666 load_services_assuredly(partition, &all_services);
Edison Aif0501702019-10-11 14:36:42 +0800667
Ken Liuacd2a572021-05-12 16:19:04 +0800668 p_cmninf = partition->p_ldinf;
Mingyang Sun2b352662021-04-21 11:35:43 +0800669
Ken Liuacd2a572021-05-12 16:19:04 +0800670 /* Init mmio assets */
Ken Liu4520ce32021-05-11 22:49:10 +0800671 p_asset_load = (struct asset_desc_t *)LOAD_INFO_ASSET(p_cmninf);
Ken Liu86686282021-04-27 11:11:15 +0800672 for (i = 0; i < p_cmninf->nassets; i++) {
673 /* Skip the memory-based asset */
Ken Liu4520ce32021-05-11 22:49:10 +0800674 if (!(p_asset_load[i].attr & ASSET_DEV_REF_BIT)) {
Ken Liu86686282021-04-27 11:11:15 +0800675 continue;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100676 }
Ken Liu86686282021-04-27 11:11:15 +0800677
Ken Liuacd2a572021-05-12 16:19:04 +0800678 platform_data_p = REFERENCE_TO_PTR(p_asset_load[i].dev.addr_ref,
679 struct platform_data_t *);
Ken Liu25e09c72021-05-24 15:46:46 +0800680
681 /*
682 * TODO: some partitions declare MMIO not exist on specific
683 * platforms, and the platform defines a dummy NULL reference
684 * for these MMIO items, which cause 'nassets' to contain several
685 * NULL items. Skip these NULL items initialization temporarily to
686 * avoid HAL API panic.
687 * Eventually, these platform-specific partitions need to be moved
688 * into a platform-specific folder. Then this workaround can be
689 * removed.
690 */
691 if (!platform_data_p) {
692 continue;
693 }
694
Ken Liu86686282021-04-27 11:11:15 +0800695#ifdef TFM_FIH_PROFILE_ON
Mingyang Sund30c9032021-05-19 17:19:44 +0800696 FIH_CALL(tfm_spm_hal_configure_default_isolation, fih_rc, part_idx,
Ken Liu86686282021-04-27 11:11:15 +0800697 platform_data_p);
698 if (fih_not_eq(fih_rc, fih_int_encode(TFM_PLAT_ERR_SUCCESS))) {
699 tfm_core_panic();
700 }
701#else /* TFM_FIH_PROFILE_ON */
Mingyang Sund30c9032021-05-19 17:19:44 +0800702 if (tfm_spm_hal_configure_default_isolation(part_idx,
Ken Liu86686282021-04-27 11:11:15 +0800703 platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
704 tfm_core_panic();
705 }
706#endif /* TFM_FIH_PROFILE_ON */
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100707 }
708
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800709 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800710
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800711 /* TODO: This can be optimized by generating the assigned signal
712 * in code generation time.
713 */
714 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800715 if (tfm_core_irq_signals[j].partition_id == p_cmninf->pid) {
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800716 partition->signals_allowed |=
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800717 tfm_core_irq_signals[j].signal_value;
Mingyang Sun2b352662021-04-21 11:35:43 +0800718 if ((p_cmninf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
719 == 0x0100) {
Kevin Pengc6976262021-01-11 15:52:55 +0800720 tfm_spm_hal_enable_irq(tfm_core_irq_signals[j].irq_line);
Mingyang Sun2b352662021-04-21 11:35:43 +0800721 } else if ((p_cmninf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
722 == 0x0101) {
Kevin Pengc6976262021-01-11 15:52:55 +0800723 tfm_spm_hal_disable_irq(tfm_core_irq_signals[j].irq_line);
724 }
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800725 }
726 }
727
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800728 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800729 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800730
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800731 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800732 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800733 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800734 }
735
Mingyang Sunf0851842021-05-11 11:44:19 +0800736 /* Extendable partition static info is right after p_cmninf. */
Mingyang Sun2b352662021-04-21 11:35:43 +0800737 tfm_core_thrd_init(
738 pth,
739 POSITION_TO_ENTRY(p_cmninf->entry, tfm_core_thrd_entry_t),
740 NULL,
Ken Liu4520ce32021-05-11 22:49:10 +0800741 LOAD_ALLOCED_STACK_ADDR(p_cmninf) + p_cmninf->stack_size,
742 LOAD_ALLOCED_STACK_ADDR(p_cmninf));
Edison Ai788bae22019-02-18 17:38:59 +0800743
Ken Liuacd2a572021-05-12 16:19:04 +0800744 pth->prior = TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_cmninf->flags));
Edison Ai764d41f2018-09-21 15:56:36 +0800745
Mingyang Sun2b352662021-04-21 11:35:43 +0800746 if (p_cmninf->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800747 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800748 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800749 }
750
Edison Ai764d41f2018-09-21 15:56:36 +0800751 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800752 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800753 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800754 }
Edison Ai764d41f2018-09-21 15:56:36 +0800755
Mingyang Sund30c9032021-05-19 17:19:44 +0800756 part_idx++;
Edison Ai764d41f2018-09-21 15:56:36 +0800757 }
758
Ken Liu483f5da2019-04-24 10:45:21 +0800759 /*
760 * All threads initialized, start the scheduler.
761 *
762 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800763 * It is worthy to give the thread object to scheduler if the background
764 * context belongs to one of the threads. Here the background thread is the
765 * initialization thread who calls SPM SVC, which re-uses the non-secure
766 * entry thread's stack. After SPM initialization is done, this stack is
767 * cleaned up and the background context is never going to return. Tell
768 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800769 */
Summer Qin66f1e032020-01-06 15:40:03 +0800770 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800771
Summer Qind2ad7e72020-01-06 18:16:35 +0800772 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800773}
Ken Liu2d175172019-03-21 17:08:41 +0800774
Summer Qind2ad7e72020-01-06 18:16:35 +0800775void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800776{
Kevin Peng25b190b2020-10-30 17:10:45 +0800777#if TFM_LVL != 1
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800778 struct partition_t *p_next_partition;
Ken Liu4520ce32021-05-11 22:49:10 +0800779 const struct partition_load_info_t *p_part_static;
Ken Liu2d175172019-03-21 17:08:41 +0800780 uint32_t is_privileged;
781#endif
Kevin Peng82fbca52021-03-09 13:48:48 +0800782 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
783 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
David Huf07e97d2021-02-15 22:05:40 +0800784#if defined(TFM_FIH_PROFILE_ON) && (TFM_LVL == 3)
785 fih_int fih_rc = FIH_FAILURE;
786#endif
Ken Liu2d175172019-03-21 17:08:41 +0800787
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200788 if (pth_next != NULL && pth_curr != pth_next) {
Kevin Peng25b190b2020-10-30 17:10:45 +0800789#if TFM_LVL != 1
Ken Liuc25558c2021-05-20 15:31:28 +0800790 p_next_partition = TO_CONTAINER(pth_next,
791 struct partition_t,
792 sp_thread);
Ken Liuacd2a572021-05-12 16:19:04 +0800793 p_part_static = p_next_partition->p_ldinf;
Ken Liu86686282021-04-27 11:11:15 +0800794 if (p_part_static->flags & SPM_PART_FLAG_PSA_ROT) {
Ken Liu2d175172019-03-21 17:08:41 +0800795 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
796 } else {
797 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
798 }
799
800 tfm_spm_partition_change_privilege(is_privileged);
Kevin Peng25b190b2020-10-30 17:10:45 +0800801#if TFM_LVL == 3
802 /*
803 * FIXME: To implement isolations among partitions in isolation level 3,
804 * each partition needs to run in unprivileged mode. Currently some
805 * PRoTs cannot work in unprivileged mode, make them privileged now.
806 */
807 if (is_privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
Ken Liu86686282021-04-27 11:11:15 +0800808 struct asset_desc_t *p_asset =
Ken Liu4520ce32021-05-11 22:49:10 +0800809 (struct asset_desc_t *)LOAD_INFO_ASSET(p_part_static);
Ken Liu86686282021-04-27 11:11:15 +0800810 /* Partition must have private data as the first asset in LVL3 */
811 if (p_part_static->nassets == 0) {
812 tfm_core_panic();
813 }
814 if (p_asset->attr & ASSET_DEV_REF_BIT) {
815 tfm_core_panic();
816 }
Kevin Peng25b190b2020-10-30 17:10:45 +0800817 /* FIXME: only MPU-based implementations are supported currently */
David Huf07e97d2021-02-15 22:05:40 +0800818#ifdef TFM_FIH_PROFILE_ON
819 FIH_CALL(tfm_hal_mpu_update_partition_boundary, fih_rc,
Ken Liu86686282021-04-27 11:11:15 +0800820 p_asset->mem.addr_x, p_asset->mem.addr_y);
David Huf07e97d2021-02-15 22:05:40 +0800821 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
822 tfm_core_panic();
823 }
824#else /* TFM_FIH_PROFILE_ON */
Ken Liu86686282021-04-27 11:11:15 +0800825 if (tfm_hal_mpu_update_partition_boundary(p_asset->mem.addr_x,
826 p_asset->mem.addr_y)
Kevin Peng25b190b2020-10-30 17:10:45 +0800827 != TFM_HAL_SUCCESS) {
828 tfm_core_panic();
829 }
David Huf07e97d2021-02-15 22:05:40 +0800830#endif /* TFM_FIH_PROFILE_ON */
Kevin Peng25b190b2020-10-30 17:10:45 +0800831 }
832#endif /* TFM_LVL == 3 */
833#endif /* TFM_LVL != 1 */
Mate Toth-Palc430b992019-05-09 21:01:14 +0200834
Summer Qind2ad7e72020-01-06 18:16:35 +0800835 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800836 }
David Hufb38d562019-09-23 15:58:34 +0800837
838 /*
839 * Handle pending mailbox message from NS in multi-core topology.
840 * Empty operation on single Armv8-M platform.
841 */
842 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800843}
Mingyang Sund44522a2020-01-16 16:48:37 +0800844
Summer Qin02f7f072020-08-24 16:02:54 +0800845void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800846{
847 uint32_t i;
848
849 /*
850 * FixeMe: abstract these part into dedicated functions to avoid
851 * accessing thread context in psa layer
852 */
853 /* If it is a NS request via RPC, the owner of this message is not set */
854 if (!is_tfm_rpc_msg(msg)) {
855 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
856 }
857
858 for (i = 0; i < PSA_MAX_IOVEC; i++) {
859 if (msg->msg.out_size[i] == 0) {
860 continue;
861 }
862
863 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
864
865 msg->caller_outvec[i].len = msg->outvec[i].len;
866 }
867}
868
Summer Qin02f7f072020-08-24 16:02:54 +0800869void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800870{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800871 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800872
873 /*
874 * The value of partition_id must be greater than zero as the target of
875 * notification must be a Secure Partition, providing a Non-secure
876 * Partition ID is a fatal error.
877 */
878 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
879 tfm_core_panic();
880 }
881
882 /*
883 * It is a fatal error if partition_id does not correspond to a Secure
884 * Partition.
885 */
886 partition = tfm_spm_get_partition_by_id(partition_id);
887 if (!partition) {
888 tfm_core_panic();
889 }
890
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800891 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +0800892
Kevin Peng8dac6102021-03-09 16:44:00 +0800893 if (partition->signals_waiting & signal) {
894 tfm_event_wake(
895 &partition->event,
896 partition->signals_asserted & partition->signals_waiting);
897 partition->signals_waiting &= ~signal;
898 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800899}
900
Mingyang Sund44522a2020-01-16 16:48:37 +0800901/**
Kevin Pengdc791882021-03-12 10:57:12 +0800902 * \brief Sets signal to partition for Second-Level Interrupt Handling mode IRQ
Mingyang Sund44522a2020-01-16 16:48:37 +0800903 *
904 * \param[in] partition_id The ID of the partition which handles this IRQ
905 * \param[in] signal The signal associated with this IRQ
906 * \param[in] irq_line The number of the IRQ line
907 *
908 * \retval void Success.
909 * \retval "Does not return" Partition ID is invalid
910 */
Kevin Pengdc791882021-03-12 10:57:12 +0800911void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
912 uint32_t irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +0800913{
Kevin Pengdc791882021-03-12 10:57:12 +0800914 __disable_irq();
915
Mingyang Sund44522a2020-01-16 16:48:37 +0800916 tfm_spm_hal_disable_irq(irq_line);
917 notify_with_signal(partition_id, signal);
Kevin Pengdc791882021-03-12 10:57:12 +0800918
919 __enable_irq();
Mingyang Sund44522a2020-01-16 16:48:37 +0800920}
921
Kevin Penga20b5af2021-01-11 11:20:52 +0800922int32_t get_irq_line_for_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800923{
924 size_t i;
925
Ken Liu24dffb22021-02-10 11:03:58 +0800926 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng410bee52021-01-13 16:27:17 +0800927 return -1;
928 }
929
Mingyang Sund44522a2020-01-16 16:48:37 +0800930 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
931 if (tfm_core_irq_signals[i].partition_id == partition_id &&
932 tfm_core_irq_signals[i].signal_value == signal) {
Kevin Penga20b5af2021-01-11 11:20:52 +0800933 return tfm_core_irq_signals[i].irq_line;
Mingyang Sund44522a2020-01-16 16:48:37 +0800934 }
935 }
Kevin Penga20b5af2021-01-11 11:20:52 +0800936
Ken Liubcae38b2021-01-20 15:47:44 +0800937 return SPM_ERROR_GENERIC;
Mingyang Sund44522a2020-01-16 16:48:37 +0800938}
939
Summer Qindea1f2c2021-01-11 14:46:34 +0800940#if !defined(__ARM_ARCH_8_1M_MAIN__)
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800941void tfm_spm_validate_caller(struct partition_t *p_cur_sp, uint32_t *p_ctx,
942 uint32_t exc_return, bool ns_caller)
Mingyang Sund44522a2020-01-16 16:48:37 +0800943{
944 uintptr_t stacked_ctx_pos;
945
946 if (ns_caller) {
947 /*
948 * The background IRQ can't be supported, since if SP is executing,
949 * the preempted context of SP can be different with the one who
950 * preempts veneer.
951 */
Ken Liuacd2a572021-05-12 16:19:04 +0800952 if (p_cur_sp->p_ldinf->pid != TFM_SP_NON_SECURE_ID) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800953 tfm_core_panic();
954 }
955
956 /*
957 * It is non-secure caller, check if veneer stack contains
958 * multiple contexts.
959 */
960 stacked_ctx_pos = (uintptr_t)p_ctx +
961 sizeof(struct tfm_state_context_t) +
Ken Liu05e13ba2020-07-25 10:31:33 +0800962 TFM_STACK_SEALED_SIZE;
Mingyang Sund44522a2020-01-16 16:48:37 +0800963
964 if (is_stack_alloc_fp_space(exc_return)) {
965#if defined (__FPU_USED) && (__FPU_USED == 1U)
966 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
967 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
968 sizeof(uint32_t);
969 }
970#endif
971 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
972 }
973
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800974 if (stacked_ctx_pos != p_cur_sp->sp_thread.stk_top) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800975 tfm_core_panic();
976 }
Ken Liuacd2a572021-05-12 16:19:04 +0800977 } else if (p_cur_sp->p_ldinf->pid <= 0) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800978 tfm_core_panic();
979 }
980}
Summer Qindea1f2c2021-01-11 14:46:34 +0800981#endif
Summer Qin830c5542020-02-14 13:44:20 +0800982
983void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
984{
985 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
986 uint32_t running_partition_flags = 0;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800987 const struct partition_t *partition = NULL;
Summer Qin830c5542020-02-14 13:44:20 +0800988
989 /* Check permissions on request type basis */
990
991 switch (svc_ctx->r0) {
992 case TFM_SPM_REQUEST_RESET_VOTE:
993 partition = tfm_spm_get_running_partition();
994 if (!partition) {
995 tfm_core_panic();
996 }
Ken Liuacd2a572021-05-12 16:19:04 +0800997 running_partition_flags = partition->p_ldinf->flags;
Summer Qin830c5542020-02-14 13:44:20 +0800998
999 /* Currently only PSA Root of Trust services are allowed to make Reset
1000 * vote request
1001 */
1002 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1003 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1004 }
1005
1006 /* FixMe: this is a placeholder for checks to be performed before
1007 * allowing execution of reset
1008 */
1009 *res_ptr = (uint32_t)TFM_SUCCESS;
1010
1011 break;
1012 default:
1013 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1014 }
1015}