blob: 19cfbdd1193cdcb31e339d764b8f30f7e74829df [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
David Huf07e97d2021-02-15 22:05:40 +080011#include "fih.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000012#include "psa/client.h"
13#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080014#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080015#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080016#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080017#include "tfm_spm_hal.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_api.h"
19#include "tfm_secure_api.h"
20#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080021#include "tfm_hal_defs.h"
22#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_core_utils.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080028#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080029#include "tfm_pools.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080030#include "spm_partition_defs.h"
31#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080032#include "tfm/tfm_spm_services.h"
Mingyang Sun00df2352021-04-15 15:46:08 +080033#include "load/partition_defs.h"
34#include "load/service_defs.h"
Ken Liu86686282021-04-27 11:11:15 +080035#include "load/asset_defs.h"
Ken Liuacd2a572021-05-12 16:19:04 +080036#include "load/spm_load_api.h"
Kevin Peng27e42272021-05-24 17:58:53 +080037#include "load/irq_defs.h"
Edison Ai764d41f2018-09-21 15:56:36 +080038
Ken Liuea45b0d2021-05-22 17:41:25 +080039/* Partition and service runtime data list head/runtime data table */
40static struct partition_head_t partitions_listhead;
41static struct service_head_t services_listhead;
Ken Liub3b2cb62021-05-22 00:39:28 +080042struct service_t *stateless_services_ref_tbl[STATIC_HANDLE_NUM_LIMIT];
Summer Qind99509f2019-08-02 17:36:58 +080043
Edison Ai764d41f2018-09-21 15:56:36 +080044/* Pools */
45TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
46 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080047
Kevin Pengdc791882021-03-12 10:57:12 +080048void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
49 uint32_t irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080050
51#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080052
Summer Qin373feb12020-03-27 15:35:33 +080053/*********************** Connection handle conversion APIs *******************/
54
Summer Qin373feb12020-03-27 15:35:33 +080055#define CONVERSION_FACTOR_BITOFFSET 3
56#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
57/* Set 32 as the maximum */
58#define CONVERSION_FACTOR_VALUE_MAX 0x20
59
60#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
61#error "CONVERSION FACTOR OUT OF RANGE"
62#endif
63
64static uint32_t loop_index;
65
66/*
67 * A handle instance psa_handle_t allocated inside SPM is actually a memory
68 * address among the handle pool. Return this handle to the client directly
69 * exposes information of secure memory address. In this case, converting the
70 * handle into another value does not represent the memory address to avoid
71 * exposing secure memory directly to clients.
72 *
73 * This function converts the handle instance into another value by scaling the
74 * handle in pool offset, the converted value is named as a user handle.
75 *
76 * The formula:
77 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
78 * CLIENT_HANDLE_VALUE_MIN + loop_index
79 * where:
80 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
81 * exceed CONVERSION_FACTOR_VALUE_MAX.
82 *
83 * handle_instance in RANGE[POOL_START, POOL_END]
84 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
85 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
86 *
87 * note:
88 * loop_index is used to promise same handle instance is converted into
89 * different user handles in short time.
90 */
Ken Liu505b1702020-05-29 13:19:58 +080091psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080092{
93 psa_handle_t user_handle;
94
95 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
96 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
97 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
98 CLIENT_HANDLE_VALUE_MIN + loop_index);
99
100 return user_handle;
101}
102
103/*
104 * This function converts a user handle into a corresponded handle instance.
105 * The converted value is validated before returning, an invalid handle instance
106 * is returned as NULL.
107 *
108 * The formula:
109 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
110 * CONVERSION_FACTOR_VALUE) + POOL_START
111 * where:
112 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
113 * exceed CONVERSION_FACTOR_VALUE_MAX.
114 *
115 * handle_instance in RANGE[POOL_START, POOL_END]
116 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
117 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
118 */
119struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
120{
121 struct tfm_conn_handle_t *handle_instance;
122
123 if (user_handle == PSA_NULL_HANDLE) {
124 return NULL;
125 }
126
127 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
128 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
129 (uintptr_t)conn_handle_pool);
130
131 return handle_instance;
132}
133
Edison Ai764d41f2018-09-21 15:56:36 +0800134/* Service handle management functions */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800135struct tfm_conn_handle_t *tfm_spm_create_conn_handle(struct service_t *service,
136 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800137{
Edison Ai9cc26242019-08-06 11:28:04 +0800138 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800139
Ken Liuf250b8b2019-12-27 16:31:24 +0800140 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800141
142 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800143 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
144 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800145 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800146 }
147
Edison Ai9cc26242019-08-06 11:28:04 +0800148 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800149 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800150 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800151
152 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800153 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800154
Summer Qin630c76b2020-05-20 10:32:58 +0800155 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800156}
157
Summer Qin630c76b2020-05-20 10:32:58 +0800158int32_t tfm_spm_validate_conn_handle(
159 const struct tfm_conn_handle_t *conn_handle,
160 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800161{
162 /* Check the handle address is validated */
163 if (is_valid_chunk_data_in_pool(conn_handle_pool,
164 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800165 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800166 }
167
168 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800169 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800170 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800171 }
172
Ken Liubcae38b2021-01-20 15:47:44 +0800173 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800174}
175
Mingyang Sun783a59b2021-04-20 15:52:18 +0800176int32_t tfm_spm_free_conn_handle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800177 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800178{
Ken Liuf250b8b2019-12-27 16:31:24 +0800179 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800180 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800181
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200182 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800183 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200184
Edison Ai764d41f2018-09-21 15:56:36 +0800185 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800186 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800187
188 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800189 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800190 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800191}
192
Mingyang Sun783a59b2021-04-20 15:52:18 +0800193int32_t tfm_spm_set_rhandle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800194 struct tfm_conn_handle_t *conn_handle,
195 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800196{
Ken Liuf250b8b2019-12-27 16:31:24 +0800197 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800198 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800199 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800200
Summer Qin630c76b2020-05-20 10:32:58 +0800201 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800202 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800203}
204
Mingyang Sund44522a2020-01-16 16:48:37 +0800205/**
Xinyu Zhang3a453242021-04-16 17:57:09 +0800206 * \brief Get reverse handle value from connection handle.
Mingyang Sund44522a2020-01-16 16:48:37 +0800207 *
208 * \param[in] service Target service context pointer
209 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800210 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800211 *
212 * \retval void * Success
213 * \retval "Does not return" Panic for those:
214 * service pointer are NULL
Xinyu Zhang3a453242021-04-16 17:57:09 +0800215 * handle is \ref PSA_NULL_HANDLE
Mingyang Sund44522a2020-01-16 16:48:37 +0800216 * handle node does not be found
217 */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800218static void *tfm_spm_get_rhandle(struct service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800219 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800220{
Ken Liuf250b8b2019-12-27 16:31:24 +0800221 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800222 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800223 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800224
Summer Qin630c76b2020-05-20 10:32:58 +0800225 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800226}
227
228/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800229
Summer Qin02f7f072020-08-24 16:02:54 +0800230struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
231 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800232{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800233 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800234 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800235
Ken Liuf250b8b2019-12-27 16:31:24 +0800236 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800237
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800238 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800239
Ken Liu2c47f7f2021-01-22 11:06:04 +0800240 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800241 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800242 }
243
Mingyang Sun73056b62020-07-03 15:18:46 +0800244 /*
245 * There may be multiple messages for this RoT Service signal, do not clear
246 * partition mask until no remaining message. Search may be optimized.
247 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800248 BI_LIST_FOR_EACH(node, head) {
Ken Liuc25558c2021-05-20 15:31:28 +0800249 tmp_msg = TO_CONTAINER(node, struct tfm_msg_body_t, msg_node);
Ken Liuacd2a572021-05-12 16:19:04 +0800250 if (tmp_msg->service->p_ldinf->signal == signal && msg) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800251 return msg;
Ken Liuacd2a572021-05-12 16:19:04 +0800252 } else if (tmp_msg->service->p_ldinf->signal == signal) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800253 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800254 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800255 }
256 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800257
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800258 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800259
260 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800261}
262
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800263#if TFM_LVL != 1
264/**
265 * \brief Change the privilege mode for partition thread mode.
266 *
267 * \param[in] privileged Privileged mode,
268 * \ref TFM_PARTITION_PRIVILEGED_MODE
269 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
270 *
271 * \note Barrier instructions are not called by this function, and if
272 * it is called in thread mode, it might be necessary to call
273 * them after this function returns.
274 */
275static void tfm_spm_partition_change_privilege(uint32_t privileged)
276{
277 CONTROL_Type ctrl;
278
279 ctrl.w = __get_CONTROL();
280
281 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
282 ctrl.b.nPRIV = 0;
283 } else {
284 ctrl.b.nPRIV = 1;
285 }
286
287 __set_CONTROL(ctrl.w);
288}
289#endif /* if(TFM_LVL != 1) */
290
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800291uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
292{
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800293#if TFM_LVL == 1
294 return TFM_PARTITION_PRIVILEGED_MODE;
295#else /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800296 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
297 return TFM_PARTITION_PRIVILEGED_MODE;
298 } else {
299 return TFM_PARTITION_UNPRIVILEGED_MODE;
300 }
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800301#endif /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800302}
303
Mingyang Sun783a59b2021-04-20 15:52:18 +0800304struct service_t *tfm_spm_get_service_by_sid(uint32_t sid)
Edison Ai764d41f2018-09-21 15:56:36 +0800305{
Ken Liuea45b0d2021-05-22 17:41:25 +0800306 struct service_t *p_serv;
Edison Ai764d41f2018-09-21 15:56:36 +0800307
Ken Liuea45b0d2021-05-22 17:41:25 +0800308 UNI_LIST_FOR_EACH(p_serv, &services_listhead) {
309 if (p_serv->p_ldinf->sid == sid) {
310 return p_serv;
Mingyang Sunef42f442021-06-11 15:07:58 +0800311 }
312 }
313
Ken Liuea45b0d2021-05-22 17:41:25 +0800314 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800315}
316
Mingyang Sund44522a2020-01-16 16:48:37 +0800317/**
318 * \brief Get the partition context by partition ID.
319 *
320 * \param[in] partition_id Partition identity
321 *
322 * \retval NULL Failed
323 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800324 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800325 */
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800326static struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800327{
Ken Liuea45b0d2021-05-22 17:41:25 +0800328 struct partition_t *p_part;
Edison Ai764d41f2018-09-21 15:56:36 +0800329
Ken Liuea45b0d2021-05-22 17:41:25 +0800330 UNI_LIST_FOR_EACH(p_part, &partitions_listhead) {
331 if (p_part->p_ldinf->pid == partition_id) {
332 return p_part;
333 }
Edison Ai764d41f2018-09-21 15:56:36 +0800334 }
Ken Liuea45b0d2021-05-22 17:41:25 +0800335
Edison Ai764d41f2018-09-21 15:56:36 +0800336 return NULL;
337}
338
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800339struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800340{
Kevin Peng82fbca52021-03-09 13:48:48 +0800341 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800342 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800343
Ken Liuc25558c2021-05-20 15:31:28 +0800344 partition = TO_CONTAINER(pth, struct partition_t, sp_thread);
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800345
Kevin Peng79c2bda2020-07-24 16:31:12 +0800346 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800347}
348
Mingyang Sun783a59b2021-04-20 15:52:18 +0800349int32_t tfm_spm_check_client_version(struct service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530350 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800351{
Ken Liuf250b8b2019-12-27 16:31:24 +0800352 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800353
Ken Liuacd2a572021-05-12 16:19:04 +0800354 switch (SERVICE_GET_VERSION_POLICY(service->p_ldinf->flags)) {
Ken Liub3b2cb62021-05-22 00:39:28 +0800355 case SERVICE_VERSION_POLICY_RELAXED:
Ken Liuacd2a572021-05-12 16:19:04 +0800356 if (version > service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800357 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800358 }
359 break;
Ken Liub3b2cb62021-05-22 00:39:28 +0800360 case SERVICE_VERSION_POLICY_STRICT:
Ken Liuacd2a572021-05-12 16:19:04 +0800361 if (version != service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800362 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800363 }
364 break;
365 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800366 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800367 }
Ken Liubcae38b2021-01-20 15:47:44 +0800368 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800369}
370
Edison Aie728fbf2019-11-13 09:37:12 +0800371int32_t tfm_spm_check_authorization(uint32_t sid,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800372 struct service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800373 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800374{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800375 struct partition_t *partition = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800376 uint32_t *dep;
Edison Aie728fbf2019-11-13 09:37:12 +0800377 int32_t i;
378
Ken Liuf250b8b2019-12-27 16:31:24 +0800379 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800380
381 if (ns_caller) {
Ken Liuacd2a572021-05-12 16:19:04 +0800382 if (!SERVICE_IS_NS_ACCESSIBLE(service->p_ldinf->flags)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800383 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800384 }
385 } else {
386 partition = tfm_spm_get_running_partition();
387 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800388 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800389 }
390
Ken Liuacd2a572021-05-12 16:19:04 +0800391 dep = (uint32_t *)LOAD_INFO_DEPS(partition->p_ldinf);
392 for (i = 0; i < partition->p_ldinf->ndeps; i++) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800393 if (dep[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800394 break;
395 }
396 }
397
Ken Liuacd2a572021-05-12 16:19:04 +0800398 if (i == partition->p_ldinf->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800399 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800400 }
401 }
Ken Liubcae38b2021-01-20 15:47:44 +0800402 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800403}
404
Edison Ai764d41f2018-09-21 15:56:36 +0800405/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800406
Summer Qin02f7f072020-08-24 16:02:54 +0800407struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800408{
409 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200410 * The message handler passed by the caller is considered invalid in the
411 * following cases:
412 * 1. Not a valid message handle. (The address of a message is not the
413 * address of a possible handle from the pool
414 * 2. Handle not belongs to the caller partition (The handle is either
415 * unused, or owned by anither partition)
416 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800417 */
Ken Liu505b1702020-05-29 13:19:58 +0800418 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800419 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800420 struct tfm_conn_handle_t *p_conn_handle =
421 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200422
423 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800424 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800425 return NULL;
426 }
427
Ken Liu505b1702020-05-29 13:19:58 +0800428 p_msg = &p_conn_handle->internal_msg;
429
Edison Ai764d41f2018-09-21 15:56:36 +0800430 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200431 * Check that the magic number is correct. This proves that the message
432 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800433 */
Ken Liu505b1702020-05-29 13:19:58 +0800434 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800435 return NULL;
436 }
437
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200438 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800439 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liuacd2a572021-05-12 16:19:04 +0800440 if (partition_id != p_msg->service->partition->p_ldinf->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800441 return NULL;
442 }
443
Ken Liu505b1702020-05-29 13:19:58 +0800444 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800445}
446
Kevin Pengdf6aa292021-03-11 17:58:50 +0800447struct tfm_msg_body_t *
448 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
449{
450 TFM_CORE_ASSERT(conn_handle != NULL);
451
452 return &(conn_handle->internal_msg);
453}
454
Edison Ai97115822019-08-01 14:22:19 +0800455void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800456 struct service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800457 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800458 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800459 psa_invec *invec, size_t in_len,
460 psa_outvec *outvec, size_t out_len,
461 psa_outvec *caller_outvec)
462{
Edison Ai764d41f2018-09-21 15:56:36 +0800463 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800464 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800465
Ken Liuf250b8b2019-12-27 16:31:24 +0800466 TFM_CORE_ASSERT(msg);
467 TFM_CORE_ASSERT(service);
468 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
469 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
470 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
471 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
472 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800473
Edison Ai764d41f2018-09-21 15:56:36 +0800474 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800475 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800476
Ken Liu35f89392019-03-14 14:51:05 +0800477 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800478 msg->magic = TFM_MSG_MAGIC;
479 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800480 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800481 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800482
483 /* Copy contents */
484 msg->msg.type = type;
485
486 for (i = 0; i < in_len; i++) {
487 msg->msg.in_size[i] = invec[i].len;
488 msg->invec[i].base = invec[i].base;
489 }
490
491 for (i = 0; i < out_len; i++) {
492 msg->msg.out_size[i] = outvec[i].len;
493 msg->outvec[i].base = outvec[i].base;
494 /* Out len is used to record the writed number, set 0 here again */
495 msg->outvec[i].len = 0;
496 }
497
Ken Liu505b1702020-05-29 13:19:58 +0800498 /* Use the user connect handle as the message handle */
499 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800500
Ken Liu505b1702020-05-29 13:19:58 +0800501 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800502 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800503 if (conn_handle) {
504 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800505 }
David Hu46603dd2019-12-11 18:05:16 +0800506
507 /* Set the private data of NSPE client caller in multi-core topology */
508 if (TFM_CLIENT_ID_IS_NS(client_id)) {
509 tfm_rpc_set_caller_data(msg, client_id);
510 }
Edison Ai764d41f2018-09-21 15:56:36 +0800511}
512
Mingyang Sun783a59b2021-04-20 15:52:18 +0800513void tfm_spm_send_event(struct service_t *service,
Kevin Peng8dac6102021-03-09 16:44:00 +0800514 struct tfm_msg_body_t *msg)
Edison Ai764d41f2018-09-21 15:56:36 +0800515{
Kevin Peng8dac6102021-03-09 16:44:00 +0800516 struct partition_t *partition = NULL;
517 psa_signal_t signal = 0;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800518
Ken Liuacd2a572021-05-12 16:19:04 +0800519 if (!msg || !service || !service->p_ldinf || !service->partition) {
Kevin Peng8dac6102021-03-09 16:44:00 +0800520 tfm_core_panic();
521 }
522
523 partition = service->partition;
Ken Liuacd2a572021-05-12 16:19:04 +0800524 signal = service->p_ldinf->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800525
Mingyang Sun73056b62020-07-03 15:18:46 +0800526 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800527 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800528
529 /* Messages put. Update signals */
Kevin Peng8dac6102021-03-09 16:44:00 +0800530 partition->signals_asserted |= signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800531
Kevin Peng8dac6102021-03-09 16:44:00 +0800532 if (partition->signals_waiting & signal) {
533 tfm_event_wake(
534 &partition->event,
535 (partition->signals_asserted & partition->signals_waiting));
536 partition->signals_waiting &= ~signal;
537 }
Edison Ai764d41f2018-09-21 15:56:36 +0800538
David Hufb38d562019-09-23 15:58:34 +0800539 /*
540 * If it is a NS request via RPC, it is unnecessary to block current
541 * thread.
542 */
543 if (!is_tfm_rpc_msg(msg)) {
544 tfm_event_wait(&msg->ack_evnt);
545 }
Edison Ai764d41f2018-09-21 15:56:36 +0800546}
547
Mingyang Sunf3d29892019-07-10 17:50:23 +0800548uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800549{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800550 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800551
Kevin Peng79c2bda2020-07-24 16:31:12 +0800552 partition = tfm_spm_get_running_partition();
Ken Liuacd2a572021-05-12 16:19:04 +0800553 if (partition && partition->p_ldinf) {
554 return partition->p_ldinf->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800555 } else {
556 return INVALID_PARTITION_ID;
557 }
Edison Ai764d41f2018-09-21 15:56:36 +0800558}
559
Summer Qin43c185d2019-10-10 15:44:42 +0800560int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800561 enum tfm_memory_access_e access,
562 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800563{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800564 enum tfm_hal_status_t err;
565 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800566
567 /* If len is zero, this indicates an empty buffer and base is ignored */
568 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800569 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800570 }
571
572 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800573 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800574 }
575
576 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800577 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800578 }
579
Summer Qin424d4db2019-03-25 14:09:51 +0800580 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800581 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800582 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800583 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800584 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800585
586 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
587 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
588 } else {
589 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
590 }
591
592 if (ns_caller) {
593 attr |= TFM_HAL_ACCESS_NS;
594 }
595
596 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
597
598 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800599 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800600 }
601
Ken Liubcae38b2021-01-20 15:47:44 +0800602 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800603}
604
Ken Liuce2692d2020-02-11 12:39:36 +0800605uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800606{
Kevin Peng54d47fb2021-06-15 16:40:08 +0800607 uint32_t i;
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800608 bool privileged;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800609 struct partition_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800610 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Ken Liu86686282021-04-27 11:11:15 +0800611 const struct platform_data_t *platform_data_p;
Mingyang Sun8d004f72021-06-01 10:46:26 +0800612 const struct partition_load_info_t *p_ldinf;
Ken Liu4520ce32021-05-11 22:49:10 +0800613 struct asset_desc_t *p_asset_load;
David Huf07e97d2021-02-15 22:05:40 +0800614#ifdef TFM_FIH_PROFILE_ON
615 fih_int fih_rc = FIH_FAILURE;
616#endif
Edison Ai764d41f2018-09-21 15:56:36 +0800617
618 tfm_pool_init(conn_handle_pool,
619 POOL_BUFFER_SIZE(conn_handle_pool),
620 sizeof(struct tfm_conn_handle_t),
621 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800622
Ken Liuea45b0d2021-05-22 17:41:25 +0800623 UNI_LISI_INIT_HEAD(&partitions_listhead);
624 UNI_LISI_INIT_HEAD(&services_listhead);
625
Ken Liuacd2a572021-05-12 16:19:04 +0800626 while (1) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800627 partition = load_a_partition_assuredly(&partitions_listhead);
Ken Liuacd2a572021-05-12 16:19:04 +0800628 if (partition == NULL) {
629 break;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800630 }
631
Mingyang Sun8d004f72021-06-01 10:46:26 +0800632 p_ldinf = partition->p_ldinf;
Mingyang Sun2b352662021-04-21 11:35:43 +0800633
Kevin Peng27e42272021-05-24 17:58:53 +0800634 if (p_ldinf->nservices) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800635 load_services_assuredly(partition, &services_listhead,
Kevin Peng27e42272021-05-24 17:58:53 +0800636 stateless_services_ref_tbl,
637 sizeof(stateless_services_ref_tbl));
638 }
639
640 if (p_ldinf->nirqs) {
641 load_irqs_assuredly(partition);
642 }
643
Ken Liuacd2a572021-05-12 16:19:04 +0800644 /* Init mmio assets */
Mingyang Sun8d004f72021-06-01 10:46:26 +0800645 if (p_ldinf->nassets > 0) {
646 if (tfm_spm_partition_get_privileged_mode(p_ldinf->flags) ==
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800647 TFM_PARTITION_PRIVILEGED_MODE) {
648 privileged = true;
649 } else {
650 privileged = false;
651 }
652 }
653
Mingyang Sun8d004f72021-06-01 10:46:26 +0800654 p_asset_load = (struct asset_desc_t *)LOAD_INFO_ASSET(p_ldinf);
655 for (i = 0; i < p_ldinf->nassets; i++) {
Ken Liu86686282021-04-27 11:11:15 +0800656 /* Skip the memory-based asset */
Ken Liu4520ce32021-05-11 22:49:10 +0800657 if (!(p_asset_load[i].attr & ASSET_DEV_REF_BIT)) {
Ken Liu86686282021-04-27 11:11:15 +0800658 continue;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100659 }
Ken Liu86686282021-04-27 11:11:15 +0800660
Ken Liuacd2a572021-05-12 16:19:04 +0800661 platform_data_p = REFERENCE_TO_PTR(p_asset_load[i].dev.addr_ref,
662 struct platform_data_t *);
Ken Liu25e09c72021-05-24 15:46:46 +0800663
664 /*
665 * TODO: some partitions declare MMIO not exist on specific
666 * platforms, and the platform defines a dummy NULL reference
667 * for these MMIO items, which cause 'nassets' to contain several
668 * NULL items. Skip these NULL items initialization temporarily to
669 * avoid HAL API panic.
670 * Eventually, these platform-specific partitions need to be moved
671 * into a platform-specific folder. Then this workaround can be
672 * removed.
673 */
674 if (!platform_data_p) {
675 continue;
676 }
677
Ken Liu86686282021-04-27 11:11:15 +0800678#ifdef TFM_FIH_PROFILE_ON
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800679 FIH_CALL(tfm_spm_hal_configure_default_isolation, fih_rc,
680 privileged, platform_data_p);
Ken Liu86686282021-04-27 11:11:15 +0800681 if (fih_not_eq(fih_rc, fih_int_encode(TFM_PLAT_ERR_SUCCESS))) {
682 tfm_core_panic();
683 }
684#else /* TFM_FIH_PROFILE_ON */
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800685 if (tfm_spm_hal_configure_default_isolation(privileged,
Ken Liu86686282021-04-27 11:11:15 +0800686 platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
687 tfm_core_panic();
688 }
689#endif /* TFM_FIH_PROFILE_ON */
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100690 }
691
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800692 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800693
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800694 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800695 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800696
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800697 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800698 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800699 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800700 }
701
Mingyang Sun8d004f72021-06-01 10:46:26 +0800702 /* Extendable partition load info is right after p_ldinf. */
Mingyang Sun2b352662021-04-21 11:35:43 +0800703 tfm_core_thrd_init(
704 pth,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800705 POSITION_TO_ENTRY(p_ldinf->entry, tfm_core_thrd_entry_t),
Mingyang Sun2b352662021-04-21 11:35:43 +0800706 NULL,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800707 LOAD_ALLOCED_STACK_ADDR(p_ldinf) + p_ldinf->stack_size,
708 LOAD_ALLOCED_STACK_ADDR(p_ldinf));
Edison Ai788bae22019-02-18 17:38:59 +0800709
Mingyang Sun8d004f72021-06-01 10:46:26 +0800710 pth->prior = TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_ldinf->flags));
Edison Ai764d41f2018-09-21 15:56:36 +0800711
Mingyang Sun8d004f72021-06-01 10:46:26 +0800712 if (p_ldinf->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800713 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800714 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800715 }
716
Edison Ai764d41f2018-09-21 15:56:36 +0800717 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800718 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800719 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800720 }
Edison Ai764d41f2018-09-21 15:56:36 +0800721 }
722
Ken Liu483f5da2019-04-24 10:45:21 +0800723 /*
724 * All threads initialized, start the scheduler.
725 *
726 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800727 * It is worthy to give the thread object to scheduler if the background
728 * context belongs to one of the threads. Here the background thread is the
729 * initialization thread who calls SPM SVC, which re-uses the non-secure
730 * entry thread's stack. After SPM initialization is done, this stack is
731 * cleaned up and the background context is never going to return. Tell
732 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800733 */
Summer Qin66f1e032020-01-06 15:40:03 +0800734 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800735
Summer Qind2ad7e72020-01-06 18:16:35 +0800736 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800737}
Ken Liu2d175172019-03-21 17:08:41 +0800738
Summer Qind2ad7e72020-01-06 18:16:35 +0800739void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800740{
Kevin Peng25b190b2020-10-30 17:10:45 +0800741#if TFM_LVL != 1
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800742 struct partition_t *p_next_partition;
Ken Liu4520ce32021-05-11 22:49:10 +0800743 const struct partition_load_info_t *p_part_static;
Ken Liu2d175172019-03-21 17:08:41 +0800744 uint32_t is_privileged;
745#endif
Kevin Peng82fbca52021-03-09 13:48:48 +0800746 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
747 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
David Huf07e97d2021-02-15 22:05:40 +0800748#if defined(TFM_FIH_PROFILE_ON) && (TFM_LVL == 3)
749 fih_int fih_rc = FIH_FAILURE;
750#endif
Ken Liu2d175172019-03-21 17:08:41 +0800751
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200752 if (pth_next != NULL && pth_curr != pth_next) {
Kevin Peng25b190b2020-10-30 17:10:45 +0800753#if TFM_LVL != 1
Ken Liuc25558c2021-05-20 15:31:28 +0800754 p_next_partition = TO_CONTAINER(pth_next,
755 struct partition_t,
756 sp_thread);
Ken Liuacd2a572021-05-12 16:19:04 +0800757 p_part_static = p_next_partition->p_ldinf;
Ken Liu86686282021-04-27 11:11:15 +0800758 if (p_part_static->flags & SPM_PART_FLAG_PSA_ROT) {
Ken Liu2d175172019-03-21 17:08:41 +0800759 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
760 } else {
761 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
762 }
763
764 tfm_spm_partition_change_privilege(is_privileged);
Kevin Peng25b190b2020-10-30 17:10:45 +0800765#if TFM_LVL == 3
766 /*
767 * FIXME: To implement isolations among partitions in isolation level 3,
768 * each partition needs to run in unprivileged mode. Currently some
769 * PRoTs cannot work in unprivileged mode, make them privileged now.
770 */
771 if (is_privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
Ken Liu86686282021-04-27 11:11:15 +0800772 struct asset_desc_t *p_asset =
Ken Liu4520ce32021-05-11 22:49:10 +0800773 (struct asset_desc_t *)LOAD_INFO_ASSET(p_part_static);
Ken Liu86686282021-04-27 11:11:15 +0800774 /* Partition must have private data as the first asset in LVL3 */
775 if (p_part_static->nassets == 0) {
776 tfm_core_panic();
777 }
778 if (p_asset->attr & ASSET_DEV_REF_BIT) {
779 tfm_core_panic();
780 }
Kevin Peng25b190b2020-10-30 17:10:45 +0800781 /* FIXME: only MPU-based implementations are supported currently */
David Huf07e97d2021-02-15 22:05:40 +0800782#ifdef TFM_FIH_PROFILE_ON
783 FIH_CALL(tfm_hal_mpu_update_partition_boundary, fih_rc,
Ken Liu86686282021-04-27 11:11:15 +0800784 p_asset->mem.addr_x, p_asset->mem.addr_y);
David Huf07e97d2021-02-15 22:05:40 +0800785 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
786 tfm_core_panic();
787 }
788#else /* TFM_FIH_PROFILE_ON */
Ken Liu86686282021-04-27 11:11:15 +0800789 if (tfm_hal_mpu_update_partition_boundary(p_asset->mem.addr_x,
790 p_asset->mem.addr_y)
Kevin Peng25b190b2020-10-30 17:10:45 +0800791 != TFM_HAL_SUCCESS) {
792 tfm_core_panic();
793 }
David Huf07e97d2021-02-15 22:05:40 +0800794#endif /* TFM_FIH_PROFILE_ON */
Kevin Peng25b190b2020-10-30 17:10:45 +0800795 }
796#endif /* TFM_LVL == 3 */
797#endif /* TFM_LVL != 1 */
Mate Toth-Palc430b992019-05-09 21:01:14 +0200798
Summer Qind2ad7e72020-01-06 18:16:35 +0800799 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800800 }
David Hufb38d562019-09-23 15:58:34 +0800801
802 /*
803 * Handle pending mailbox message from NS in multi-core topology.
804 * Empty operation on single Armv8-M platform.
805 */
806 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800807}
Mingyang Sund44522a2020-01-16 16:48:37 +0800808
Summer Qin02f7f072020-08-24 16:02:54 +0800809void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800810{
811 uint32_t i;
812
813 /*
814 * FixeMe: abstract these part into dedicated functions to avoid
815 * accessing thread context in psa layer
816 */
817 /* If it is a NS request via RPC, the owner of this message is not set */
818 if (!is_tfm_rpc_msg(msg)) {
819 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
820 }
821
822 for (i = 0; i < PSA_MAX_IOVEC; i++) {
823 if (msg->msg.out_size[i] == 0) {
824 continue;
825 }
826
827 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
828
829 msg->caller_outvec[i].len = msg->outvec[i].len;
830 }
831}
832
Summer Qin02f7f072020-08-24 16:02:54 +0800833void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800834{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800835 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800836
837 /*
838 * The value of partition_id must be greater than zero as the target of
839 * notification must be a Secure Partition, providing a Non-secure
840 * Partition ID is a fatal error.
841 */
842 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
843 tfm_core_panic();
844 }
845
846 /*
847 * It is a fatal error if partition_id does not correspond to a Secure
848 * Partition.
849 */
850 partition = tfm_spm_get_partition_by_id(partition_id);
851 if (!partition) {
852 tfm_core_panic();
853 }
854
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800855 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +0800856
Kevin Peng8dac6102021-03-09 16:44:00 +0800857 if (partition->signals_waiting & signal) {
858 tfm_event_wake(
859 &partition->event,
860 partition->signals_asserted & partition->signals_waiting);
861 partition->signals_waiting &= ~signal;
862 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800863}
864
Mingyang Sund44522a2020-01-16 16:48:37 +0800865/**
Kevin Pengdc791882021-03-12 10:57:12 +0800866 * \brief Sets signal to partition for Second-Level Interrupt Handling mode IRQ
Mingyang Sund44522a2020-01-16 16:48:37 +0800867 *
868 * \param[in] partition_id The ID of the partition which handles this IRQ
869 * \param[in] signal The signal associated with this IRQ
870 * \param[in] irq_line The number of the IRQ line
871 *
872 * \retval void Success.
873 * \retval "Does not return" Partition ID is invalid
874 */
Kevin Pengdc791882021-03-12 10:57:12 +0800875void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
876 uint32_t irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +0800877{
Kevin Pengdc791882021-03-12 10:57:12 +0800878 __disable_irq();
879
Mingyang Sund44522a2020-01-16 16:48:37 +0800880 tfm_spm_hal_disable_irq(irq_line);
881 notify_with_signal(partition_id, signal);
Kevin Pengdc791882021-03-12 10:57:12 +0800882
883 __enable_irq();
Mingyang Sund44522a2020-01-16 16:48:37 +0800884}
885
Kevin Peng27e42272021-05-24 17:58:53 +0800886struct irq_load_info_t *get_irq_info_for_signal(
887 const struct partition_load_info_t *p_ldinf,
888 psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800889{
890 size_t i;
Kevin Peng27e42272021-05-24 17:58:53 +0800891 struct irq_load_info_t *irq_info;
Mingyang Sund44522a2020-01-16 16:48:37 +0800892
Ken Liu24dffb22021-02-10 11:03:58 +0800893 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng27e42272021-05-24 17:58:53 +0800894 return NULL;
Kevin Peng410bee52021-01-13 16:27:17 +0800895 }
896
Kevin Peng27e42272021-05-24 17:58:53 +0800897 irq_info = (struct irq_load_info_t *)LOAD_INFO_IRQ(p_ldinf);
898 for (i = 0; i < p_ldinf->nirqs; i++) {
899 if (irq_info[i].signal == signal) {
900 return &irq_info[i];
Mingyang Sund44522a2020-01-16 16:48:37 +0800901 }
902 }
Kevin Penga20b5af2021-01-11 11:20:52 +0800903
Kevin Peng27e42272021-05-24 17:58:53 +0800904 return NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800905}
906
Summer Qindea1f2c2021-01-11 14:46:34 +0800907#if !defined(__ARM_ARCH_8_1M_MAIN__)
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800908void tfm_spm_validate_caller(struct partition_t *p_cur_sp, uint32_t *p_ctx,
909 uint32_t exc_return, bool ns_caller)
Mingyang Sund44522a2020-01-16 16:48:37 +0800910{
911 uintptr_t stacked_ctx_pos;
912
913 if (ns_caller) {
914 /*
915 * The background IRQ can't be supported, since if SP is executing,
916 * the preempted context of SP can be different with the one who
917 * preempts veneer.
918 */
Ken Liuacd2a572021-05-12 16:19:04 +0800919 if (p_cur_sp->p_ldinf->pid != TFM_SP_NON_SECURE_ID) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800920 tfm_core_panic();
921 }
922
923 /*
924 * It is non-secure caller, check if veneer stack contains
925 * multiple contexts.
926 */
927 stacked_ctx_pos = (uintptr_t)p_ctx +
928 sizeof(struct tfm_state_context_t) +
Ken Liu05e13ba2020-07-25 10:31:33 +0800929 TFM_STACK_SEALED_SIZE;
Mingyang Sund44522a2020-01-16 16:48:37 +0800930
931 if (is_stack_alloc_fp_space(exc_return)) {
Xinyu Zhang3a453242021-04-16 17:57:09 +0800932#if defined(__FPU_USED) && (__FPU_USED == 1U)
Mingyang Sund44522a2020-01-16 16:48:37 +0800933 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
934 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
935 sizeof(uint32_t);
936 }
937#endif
938 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
939 }
940
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800941 if (stacked_ctx_pos != p_cur_sp->sp_thread.stk_top) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800942 tfm_core_panic();
943 }
Ken Liuacd2a572021-05-12 16:19:04 +0800944 } else if (p_cur_sp->p_ldinf->pid <= 0) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800945 tfm_core_panic();
946 }
947}
Summer Qindea1f2c2021-01-11 14:46:34 +0800948#endif
Summer Qin830c5542020-02-14 13:44:20 +0800949
950void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
951{
952 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
953 uint32_t running_partition_flags = 0;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800954 const struct partition_t *partition = NULL;
Summer Qin830c5542020-02-14 13:44:20 +0800955
956 /* Check permissions on request type basis */
957
958 switch (svc_ctx->r0) {
959 case TFM_SPM_REQUEST_RESET_VOTE:
960 partition = tfm_spm_get_running_partition();
961 if (!partition) {
962 tfm_core_panic();
963 }
Ken Liuacd2a572021-05-12 16:19:04 +0800964 running_partition_flags = partition->p_ldinf->flags;
Summer Qin830c5542020-02-14 13:44:20 +0800965
966 /* Currently only PSA Root of Trust services are allowed to make Reset
967 * vote request
968 */
969 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
970 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
971 }
972
973 /* FixMe: this is a placeholder for checks to be performed before
974 * allowing execution of reset
975 */
976 *res_ptr = (uint32_t)TFM_SUCCESS;
977
978 break;
979 default:
980 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
981 }
982}