blob: fc3dd739dbe33e353166de3562f600f896f6330a [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
David Huf07e97d2021-02-15 22:05:40 +080011#include "fih.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000012#include "psa/client.h"
13#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080014#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080015#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080016#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080017#include "tfm_spm_hal.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_api.h"
19#include "tfm_secure_api.h"
20#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080021#include "tfm_hal_defs.h"
22#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_core_utils.h"
Kevin Peng385fda82021-08-18 10:41:19 +080026#include "tfm_nspm.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080028#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080029#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080030#include "tfm_pools.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080031#include "region.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080032#include "psa_manifest/pid.h"
Mingyang Sun00df2352021-04-15 15:46:08 +080033#include "load/partition_defs.h"
34#include "load/service_defs.h"
Ken Liu86686282021-04-27 11:11:15 +080035#include "load/asset_defs.h"
Ken Liuacd2a572021-05-12 16:19:04 +080036#include "load/spm_load_api.h"
Ken Liu3dd92562021-08-17 16:22:54 +080037#include "load/interrupt_defs.h"
Edison Ai764d41f2018-09-21 15:56:36 +080038
Ken Liuea45b0d2021-05-22 17:41:25 +080039/* Partition and service runtime data list head/runtime data table */
40static struct partition_head_t partitions_listhead;
41static struct service_head_t services_listhead;
Ken Liub3b2cb62021-05-22 00:39:28 +080042struct service_t *stateless_services_ref_tbl[STATIC_HANDLE_NUM_LIMIT];
Summer Qind99509f2019-08-02 17:36:58 +080043
Edison Ai764d41f2018-09-21 15:56:36 +080044/* Pools */
45TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
46 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080047
Kevin Pengeca45b92021-02-09 14:46:50 +080048void spm_interrupt_handler(struct partition_load_info_t *p_ldinf,
49 psa_signal_t signal,
50 uint32_t irq_line,
51 psa_flih_func flih_func);
Mingyang Sund44522a2020-01-16 16:48:37 +080052
53#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080054
Summer Qin373feb12020-03-27 15:35:33 +080055/*********************** Connection handle conversion APIs *******************/
56
Summer Qin373feb12020-03-27 15:35:33 +080057#define CONVERSION_FACTOR_BITOFFSET 3
58#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
59/* Set 32 as the maximum */
60#define CONVERSION_FACTOR_VALUE_MAX 0x20
61
62#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
63#error "CONVERSION FACTOR OUT OF RANGE"
64#endif
65
66static uint32_t loop_index;
67
68/*
69 * A handle instance psa_handle_t allocated inside SPM is actually a memory
70 * address among the handle pool. Return this handle to the client directly
71 * exposes information of secure memory address. In this case, converting the
72 * handle into another value does not represent the memory address to avoid
73 * exposing secure memory directly to clients.
74 *
75 * This function converts the handle instance into another value by scaling the
76 * handle in pool offset, the converted value is named as a user handle.
77 *
78 * The formula:
79 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
80 * CLIENT_HANDLE_VALUE_MIN + loop_index
81 * where:
82 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
83 * exceed CONVERSION_FACTOR_VALUE_MAX.
84 *
85 * handle_instance in RANGE[POOL_START, POOL_END]
86 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
87 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
88 *
89 * note:
90 * loop_index is used to promise same handle instance is converted into
91 * different user handles in short time.
92 */
Ken Liu505b1702020-05-29 13:19:58 +080093psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080094{
95 psa_handle_t user_handle;
96
97 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
98 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
99 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
100 CLIENT_HANDLE_VALUE_MIN + loop_index);
101
102 return user_handle;
103}
104
105/*
106 * This function converts a user handle into a corresponded handle instance.
107 * The converted value is validated before returning, an invalid handle instance
108 * is returned as NULL.
109 *
110 * The formula:
111 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
112 * CONVERSION_FACTOR_VALUE) + POOL_START
113 * where:
114 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
115 * exceed CONVERSION_FACTOR_VALUE_MAX.
116 *
117 * handle_instance in RANGE[POOL_START, POOL_END]
118 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
119 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
120 */
121struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
122{
123 struct tfm_conn_handle_t *handle_instance;
124
125 if (user_handle == PSA_NULL_HANDLE) {
126 return NULL;
127 }
128
129 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
130 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
131 (uintptr_t)conn_handle_pool);
132
133 return handle_instance;
134}
135
Edison Ai764d41f2018-09-21 15:56:36 +0800136/* Service handle management functions */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800137struct tfm_conn_handle_t *tfm_spm_create_conn_handle(struct service_t *service,
138 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800139{
Edison Ai9cc26242019-08-06 11:28:04 +0800140 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800141
Ken Liuf250b8b2019-12-27 16:31:24 +0800142 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800143
144 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800145 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
146 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800147 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800148 }
149
Edison Ai9cc26242019-08-06 11:28:04 +0800150 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800151 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800152 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800153
154 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800155 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800156
Summer Qin630c76b2020-05-20 10:32:58 +0800157 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800158}
159
Summer Qin630c76b2020-05-20 10:32:58 +0800160int32_t tfm_spm_validate_conn_handle(
161 const struct tfm_conn_handle_t *conn_handle,
162 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800163{
164 /* Check the handle address is validated */
165 if (is_valid_chunk_data_in_pool(conn_handle_pool,
166 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800167 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800168 }
169
170 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800171 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800172 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800173 }
174
Ken Liubcae38b2021-01-20 15:47:44 +0800175 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800176}
177
Mingyang Sun783a59b2021-04-20 15:52:18 +0800178int32_t tfm_spm_free_conn_handle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800179 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800180{
Ken Liuf250b8b2019-12-27 16:31:24 +0800181 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800182 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800183
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200184 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800185 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200186
Edison Ai764d41f2018-09-21 15:56:36 +0800187 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800188 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800189
190 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800191 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800192 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800193}
194
Mingyang Sun783a59b2021-04-20 15:52:18 +0800195int32_t tfm_spm_set_rhandle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800196 struct tfm_conn_handle_t *conn_handle,
197 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800198{
Ken Liuf250b8b2019-12-27 16:31:24 +0800199 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800200 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800201 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800202
Summer Qin630c76b2020-05-20 10:32:58 +0800203 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800204 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800205}
206
Mingyang Sund44522a2020-01-16 16:48:37 +0800207/**
Xinyu Zhang3a453242021-04-16 17:57:09 +0800208 * \brief Get reverse handle value from connection handle.
Mingyang Sund44522a2020-01-16 16:48:37 +0800209 *
210 * \param[in] service Target service context pointer
211 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800212 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800213 *
214 * \retval void * Success
215 * \retval "Does not return" Panic for those:
216 * service pointer are NULL
Xinyu Zhang3a453242021-04-16 17:57:09 +0800217 * handle is \ref PSA_NULL_HANDLE
Mingyang Sund44522a2020-01-16 16:48:37 +0800218 * handle node does not be found
219 */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800220static void *tfm_spm_get_rhandle(struct service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800221 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800222{
Ken Liuf250b8b2019-12-27 16:31:24 +0800223 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800224 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800225 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800226
Summer Qin630c76b2020-05-20 10:32:58 +0800227 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800228}
229
230/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800231
Summer Qin02f7f072020-08-24 16:02:54 +0800232struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
233 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800234{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800235 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800236 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800237
Ken Liuf250b8b2019-12-27 16:31:24 +0800238 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800239
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800240 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800241
Ken Liu2c47f7f2021-01-22 11:06:04 +0800242 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800243 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800244 }
245
Mingyang Sun73056b62020-07-03 15:18:46 +0800246 /*
247 * There may be multiple messages for this RoT Service signal, do not clear
248 * partition mask until no remaining message. Search may be optimized.
249 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800250 BI_LIST_FOR_EACH(node, head) {
Ken Liuc25558c2021-05-20 15:31:28 +0800251 tmp_msg = TO_CONTAINER(node, struct tfm_msg_body_t, msg_node);
Ken Liuacd2a572021-05-12 16:19:04 +0800252 if (tmp_msg->service->p_ldinf->signal == signal && msg) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800253 return msg;
Ken Liuacd2a572021-05-12 16:19:04 +0800254 } else if (tmp_msg->service->p_ldinf->signal == signal) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800255 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800256 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800257 }
258 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800259
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800260 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800261
262 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800263}
264
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800265uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
266{
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800267#if TFM_LVL == 1
268 return TFM_PARTITION_PRIVILEGED_MODE;
269#else /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800270 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
271 return TFM_PARTITION_PRIVILEGED_MODE;
272 } else {
273 return TFM_PARTITION_UNPRIVILEGED_MODE;
274 }
Kevin Pengbf9a97e2021-06-18 16:34:12 +0800275#endif /* TFM_LVL == 1 */
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800276}
277
Mingyang Sun783a59b2021-04-20 15:52:18 +0800278struct service_t *tfm_spm_get_service_by_sid(uint32_t sid)
Edison Ai764d41f2018-09-21 15:56:36 +0800279{
Feder.Liangaeb5a792021-08-10 16:12:56 +0800280 struct service_t *p_prev, *p_curr;
Edison Ai764d41f2018-09-21 15:56:36 +0800281
Feder.Liangaeb5a792021-08-10 16:12:56 +0800282 UNI_LIST_FOR_EACH_PREV(p_prev, p_curr, &services_listhead) {
283 if (p_curr->p_ldinf->sid == sid) {
284 UNI_LIST_MOVE_AFTER(&services_listhead, p_prev, p_curr);
285 return p_curr;
Mingyang Sunef42f442021-06-11 15:07:58 +0800286 }
287 }
288
Ken Liuea45b0d2021-05-22 17:41:25 +0800289 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800290}
291
Mingyang Sund44522a2020-01-16 16:48:37 +0800292/**
293 * \brief Get the partition context by partition ID.
294 *
295 * \param[in] partition_id Partition identity
296 *
297 * \retval NULL Failed
298 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800299 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800300 */
Kevin Pengeca45b92021-02-09 14:46:50 +0800301struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800302{
Ken Liuea45b0d2021-05-22 17:41:25 +0800303 struct partition_t *p_part;
Edison Ai764d41f2018-09-21 15:56:36 +0800304
Ken Liuea45b0d2021-05-22 17:41:25 +0800305 UNI_LIST_FOR_EACH(p_part, &partitions_listhead) {
306 if (p_part->p_ldinf->pid == partition_id) {
307 return p_part;
308 }
Edison Ai764d41f2018-09-21 15:56:36 +0800309 }
Ken Liuea45b0d2021-05-22 17:41:25 +0800310
Edison Ai764d41f2018-09-21 15:56:36 +0800311 return NULL;
312}
313
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800314struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800315{
Kevin Peng82fbca52021-03-09 13:48:48 +0800316 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800317 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800318
Ken Liuc25558c2021-05-20 15:31:28 +0800319 partition = TO_CONTAINER(pth, struct partition_t, sp_thread);
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800320
Kevin Peng79c2bda2020-07-24 16:31:12 +0800321 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800322}
323
Mingyang Sun783a59b2021-04-20 15:52:18 +0800324int32_t tfm_spm_check_client_version(struct service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530325 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800326{
Ken Liuf250b8b2019-12-27 16:31:24 +0800327 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800328
Ken Liuacd2a572021-05-12 16:19:04 +0800329 switch (SERVICE_GET_VERSION_POLICY(service->p_ldinf->flags)) {
Ken Liub3b2cb62021-05-22 00:39:28 +0800330 case SERVICE_VERSION_POLICY_RELAXED:
Ken Liuacd2a572021-05-12 16:19:04 +0800331 if (version > service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800332 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800333 }
334 break;
Ken Liub3b2cb62021-05-22 00:39:28 +0800335 case SERVICE_VERSION_POLICY_STRICT:
Ken Liuacd2a572021-05-12 16:19:04 +0800336 if (version != service->p_ldinf->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800337 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800338 }
339 break;
340 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800341 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800342 }
Ken Liubcae38b2021-01-20 15:47:44 +0800343 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800344}
345
Edison Aie728fbf2019-11-13 09:37:12 +0800346int32_t tfm_spm_check_authorization(uint32_t sid,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800347 struct service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800348 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800349{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800350 struct partition_t *partition = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800351 uint32_t *dep;
Edison Aie728fbf2019-11-13 09:37:12 +0800352 int32_t i;
353
Ken Liuf250b8b2019-12-27 16:31:24 +0800354 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800355
356 if (ns_caller) {
Ken Liuacd2a572021-05-12 16:19:04 +0800357 if (!SERVICE_IS_NS_ACCESSIBLE(service->p_ldinf->flags)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800358 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800359 }
360 } else {
361 partition = tfm_spm_get_running_partition();
362 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800363 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800364 }
365
Ken Liuacd2a572021-05-12 16:19:04 +0800366 dep = (uint32_t *)LOAD_INFO_DEPS(partition->p_ldinf);
367 for (i = 0; i < partition->p_ldinf->ndeps; i++) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800368 if (dep[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800369 break;
370 }
371 }
372
Ken Liuacd2a572021-05-12 16:19:04 +0800373 if (i == partition->p_ldinf->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800374 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800375 }
376 }
Ken Liubcae38b2021-01-20 15:47:44 +0800377 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800378}
379
Edison Ai764d41f2018-09-21 15:56:36 +0800380/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800381
Summer Qin02f7f072020-08-24 16:02:54 +0800382struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800383{
384 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200385 * The message handler passed by the caller is considered invalid in the
386 * following cases:
387 * 1. Not a valid message handle. (The address of a message is not the
388 * address of a possible handle from the pool
389 * 2. Handle not belongs to the caller partition (The handle is either
390 * unused, or owned by anither partition)
391 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800392 */
Ken Liu505b1702020-05-29 13:19:58 +0800393 struct tfm_msg_body_t *p_msg;
Kevin Pengeec41a82021-08-18 13:56:23 +0800394 int32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800395 struct tfm_conn_handle_t *p_conn_handle =
396 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200397
398 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800399 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800400 return NULL;
401 }
402
Ken Liu505b1702020-05-29 13:19:58 +0800403 p_msg = &p_conn_handle->internal_msg;
404
Edison Ai764d41f2018-09-21 15:56:36 +0800405 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200406 * Check that the magic number is correct. This proves that the message
407 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800408 */
Ken Liu505b1702020-05-29 13:19:58 +0800409 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800410 return NULL;
411 }
412
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200413 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800414 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liuacd2a572021-05-12 16:19:04 +0800415 if (partition_id != p_msg->service->partition->p_ldinf->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800416 return NULL;
417 }
418
Ken Liu505b1702020-05-29 13:19:58 +0800419 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800420}
421
Kevin Pengdf6aa292021-03-11 17:58:50 +0800422struct tfm_msg_body_t *
423 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
424{
425 TFM_CORE_ASSERT(conn_handle != NULL);
426
427 return &(conn_handle->internal_msg);
428}
429
Edison Ai97115822019-08-01 14:22:19 +0800430void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800431 struct service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800432 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800433 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800434 psa_invec *invec, size_t in_len,
435 psa_outvec *outvec, size_t out_len,
436 psa_outvec *caller_outvec)
437{
Edison Ai764d41f2018-09-21 15:56:36 +0800438 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800439 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800440
Ken Liuf250b8b2019-12-27 16:31:24 +0800441 TFM_CORE_ASSERT(msg);
442 TFM_CORE_ASSERT(service);
443 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
444 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
445 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
446 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
447 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800448
Edison Ai764d41f2018-09-21 15:56:36 +0800449 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800450 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800451
Ken Liu35f89392019-03-14 14:51:05 +0800452 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800453 msg->magic = TFM_MSG_MAGIC;
454 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800455 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800456 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800457
458 /* Copy contents */
459 msg->msg.type = type;
460
461 for (i = 0; i < in_len; i++) {
462 msg->msg.in_size[i] = invec[i].len;
463 msg->invec[i].base = invec[i].base;
464 }
465
466 for (i = 0; i < out_len; i++) {
467 msg->msg.out_size[i] = outvec[i].len;
468 msg->outvec[i].base = outvec[i].base;
469 /* Out len is used to record the writed number, set 0 here again */
470 msg->outvec[i].len = 0;
471 }
472
Ken Liu505b1702020-05-29 13:19:58 +0800473 /* Use the user connect handle as the message handle */
474 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800475
Ken Liu505b1702020-05-29 13:19:58 +0800476 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800477 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800478 if (conn_handle) {
479 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800480 }
David Hu46603dd2019-12-11 18:05:16 +0800481
482 /* Set the private data of NSPE client caller in multi-core topology */
483 if (TFM_CLIENT_ID_IS_NS(client_id)) {
484 tfm_rpc_set_caller_data(msg, client_id);
485 }
Edison Ai764d41f2018-09-21 15:56:36 +0800486}
487
Mingyang Sun783a59b2021-04-20 15:52:18 +0800488void tfm_spm_send_event(struct service_t *service,
Kevin Peng8dac6102021-03-09 16:44:00 +0800489 struct tfm_msg_body_t *msg)
Edison Ai764d41f2018-09-21 15:56:36 +0800490{
Kevin Peng8dac6102021-03-09 16:44:00 +0800491 struct partition_t *partition = NULL;
492 psa_signal_t signal = 0;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800493
Ken Liuacd2a572021-05-12 16:19:04 +0800494 if (!msg || !service || !service->p_ldinf || !service->partition) {
Kevin Peng8dac6102021-03-09 16:44:00 +0800495 tfm_core_panic();
496 }
497
498 partition = service->partition;
Ken Liuacd2a572021-05-12 16:19:04 +0800499 signal = service->p_ldinf->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800500
Mingyang Sun73056b62020-07-03 15:18:46 +0800501 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800502 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800503
504 /* Messages put. Update signals */
Kevin Peng8dac6102021-03-09 16:44:00 +0800505 partition->signals_asserted |= signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800506
Kevin Peng8dac6102021-03-09 16:44:00 +0800507 if (partition->signals_waiting & signal) {
508 tfm_event_wake(
509 &partition->event,
510 (partition->signals_asserted & partition->signals_waiting));
511 partition->signals_waiting &= ~signal;
512 }
Edison Ai764d41f2018-09-21 15:56:36 +0800513
David Hufb38d562019-09-23 15:58:34 +0800514 /*
515 * If it is a NS request via RPC, it is unnecessary to block current
516 * thread.
517 */
518 if (!is_tfm_rpc_msg(msg)) {
519 tfm_event_wait(&msg->ack_evnt);
520 }
Edison Ai764d41f2018-09-21 15:56:36 +0800521}
522
Kevin Pengeec41a82021-08-18 13:56:23 +0800523int32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800524{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800525 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800526
Kevin Peng79c2bda2020-07-24 16:31:12 +0800527 partition = tfm_spm_get_running_partition();
Ken Liuacd2a572021-05-12 16:19:04 +0800528 if (partition && partition->p_ldinf) {
529 return partition->p_ldinf->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800530 } else {
531 return INVALID_PARTITION_ID;
532 }
Edison Ai764d41f2018-09-21 15:56:36 +0800533}
534
Summer Qin43c185d2019-10-10 15:44:42 +0800535int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800536 enum tfm_memory_access_e access,
537 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800538{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800539 enum tfm_hal_status_t err;
540 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800541
542 /* If len is zero, this indicates an empty buffer and base is ignored */
543 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800544 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800545 }
546
547 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800548 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800549 }
550
551 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800552 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800553 }
554
Summer Qin424d4db2019-03-25 14:09:51 +0800555 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800556 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800557 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800558 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800559 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800560
561 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
562 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
563 } else {
564 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
565 }
566
567 if (ns_caller) {
568 attr |= TFM_HAL_ACCESS_NS;
569 }
570
571 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
572
573 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800574 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800575 }
576
Ken Liubcae38b2021-01-20 15:47:44 +0800577 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800578}
579
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800580bool tfm_spm_is_ns_caller(void)
581{
582#if defined(TFM_MULTI_CORE_TOPOLOGY) || defined(FORWARD_PROT_MSG)
583 /* Multi-core NS PSA API request is processed by pendSV. */
584 return (__get_active_exc_num() == EXC_NUM_PENDSV);
585#else
586 struct partition_t *partition = tfm_spm_get_running_partition();
Mingyang Sune529e3b2021-07-12 14:46:30 +0800587
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800588 if (!partition) {
589 tfm_core_panic();
590 }
591
592 return (partition->p_ldinf->pid == TFM_SP_NON_SECURE_ID);
593#endif
594}
595
Mingyang Sune529e3b2021-07-12 14:46:30 +0800596uint32_t tfm_spm_get_caller_privilege_mode(void)
597{
598 struct partition_t *partition;
599
600#if defined(TFM_MULTI_CORE_TOPOLOGY) || defined(FORWARD_PROT_MSG)
601 /*
602 * In multi-core topology, if PSA request is from mailbox, the client
603 * is unprivileged.
604 */
605 if (__get_active_exc_num() == EXC_NUM_PENDSV) {
606 return TFM_PARTITION_UNPRIVILEGED_MODE;
607 }
608#endif
609 partition = tfm_spm_get_running_partition();
610 if (!partition) {
611 tfm_core_panic();
612 }
613
614 return tfm_spm_partition_get_privileged_mode(partition->p_ldinf->flags);
615}
616
Kevin Peng385fda82021-08-18 10:41:19 +0800617int32_t tfm_spm_get_client_id(bool ns_caller)
618{
619 int32_t client_id;
620
621 if (ns_caller) {
622 client_id = tfm_nspm_get_current_client_id();
623 } else {
624 client_id = tfm_spm_partition_get_running_partition_id();
625 }
626
627 if (ns_caller != (client_id < 0)) {
628 /* NS client ID must be negative and Secure ID must >= 0 */
629 tfm_core_panic();
630 }
631
632 return client_id;
633}
634
Ken Liuce2692d2020-02-11 12:39:36 +0800635uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800636{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800637 struct partition_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800638 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Mingyang Sun8d004f72021-06-01 10:46:26 +0800639 const struct partition_load_info_t *p_ldinf;
Ken Liuce58bfc2021-05-12 17:54:48 +0800640 void *p_boundaries = NULL;
641
David Huf07e97d2021-02-15 22:05:40 +0800642#ifdef TFM_FIH_PROFILE_ON
643 fih_int fih_rc = FIH_FAILURE;
644#endif
Edison Ai764d41f2018-09-21 15:56:36 +0800645
646 tfm_pool_init(conn_handle_pool,
647 POOL_BUFFER_SIZE(conn_handle_pool),
648 sizeof(struct tfm_conn_handle_t),
649 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800650
Ken Liuea45b0d2021-05-22 17:41:25 +0800651 UNI_LISI_INIT_HEAD(&partitions_listhead);
652 UNI_LISI_INIT_HEAD(&services_listhead);
653
Ken Liuacd2a572021-05-12 16:19:04 +0800654 while (1) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800655 partition = load_a_partition_assuredly(&partitions_listhead);
Shawn Shan23331062021-09-01 14:58:21 +0800656 if (partition == NO_MORE_PARTITION) {
Ken Liuacd2a572021-05-12 16:19:04 +0800657 break;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800658 }
659
Mingyang Sun8d004f72021-06-01 10:46:26 +0800660 p_ldinf = partition->p_ldinf;
Mingyang Sun2b352662021-04-21 11:35:43 +0800661
Kevin Peng27e42272021-05-24 17:58:53 +0800662 if (p_ldinf->nservices) {
Ken Liuea45b0d2021-05-22 17:41:25 +0800663 load_services_assuredly(partition, &services_listhead,
Kevin Peng27e42272021-05-24 17:58:53 +0800664 stateless_services_ref_tbl,
665 sizeof(stateless_services_ref_tbl));
666 }
667
668 if (p_ldinf->nirqs) {
669 load_irqs_assuredly(partition);
670 }
671
Ken Liuce58bfc2021-05-12 17:54:48 +0800672 /* Bind the partition with plaform. */
673#if TFM_FIH_PROFILE_ON
674 FIH_CALL(tfm_hal_bind_boundaries, fih_rc, partition->p_ldinf,
675 &p_boundaries);
676 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
677 tfm_core_panic();
Mingyang Sun61f8fbc2021-06-04 17:49:56 +0800678 }
Ken Liu86686282021-04-27 11:11:15 +0800679#else /* TFM_FIH_PROFILE_ON */
Ken Liuce58bfc2021-05-12 17:54:48 +0800680 if (tfm_hal_bind_boundaries(partition->p_ldinf,
681 &p_boundaries) != TFM_HAL_SUCCESS) {
682 tfm_core_panic();
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100683 }
Ken Liuce58bfc2021-05-12 17:54:48 +0800684#endif /* TFM_FIH_PROFILE_ON */
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100685
Ken Liuce58bfc2021-05-12 17:54:48 +0800686 partition->p_boundaries = p_boundaries;
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800687 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800688
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800689 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800690 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800691
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800692 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800693
Mingyang Sun8d004f72021-06-01 10:46:26 +0800694 /* Extendable partition load info is right after p_ldinf. */
Mingyang Sun2b352662021-04-21 11:35:43 +0800695 tfm_core_thrd_init(
696 pth,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800697 POSITION_TO_ENTRY(p_ldinf->entry, tfm_core_thrd_entry_t),
Mingyang Sun2b352662021-04-21 11:35:43 +0800698 NULL,
Mingyang Sun8d004f72021-06-01 10:46:26 +0800699 LOAD_ALLOCED_STACK_ADDR(p_ldinf) + p_ldinf->stack_size,
700 LOAD_ALLOCED_STACK_ADDR(p_ldinf));
Edison Ai788bae22019-02-18 17:38:59 +0800701
Mingyang Sun8d004f72021-06-01 10:46:26 +0800702 pth->prior = TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_ldinf->flags));
Edison Ai764d41f2018-09-21 15:56:36 +0800703
Mingyang Sun8d004f72021-06-01 10:46:26 +0800704 if (p_ldinf->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800705 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800706 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800707 }
708
Edison Ai764d41f2018-09-21 15:56:36 +0800709 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800710 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800711 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800712 }
Edison Ai764d41f2018-09-21 15:56:36 +0800713 }
714
Ken Liu483f5da2019-04-24 10:45:21 +0800715 /*
716 * All threads initialized, start the scheduler.
717 *
718 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800719 * It is worthy to give the thread object to scheduler if the background
720 * context belongs to one of the threads. Here the background thread is the
721 * initialization thread who calls SPM SVC, which re-uses the non-secure
722 * entry thread's stack. After SPM initialization is done, this stack is
723 * cleaned up and the background context is never going to return. Tell
724 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800725 */
Summer Qin66f1e032020-01-06 15:40:03 +0800726 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800727
Summer Qind2ad7e72020-01-06 18:16:35 +0800728 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800729}
Ken Liu2d175172019-03-21 17:08:41 +0800730
Kevin Peng64011942021-05-25 11:19:07 +0800731void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
732{
Ken Liuce58bfc2021-05-12 17:54:48 +0800733 struct partition_t *p_part_curr, *p_part_next;
Kevin Peng64011942021-05-25 11:19:07 +0800734 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
735 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
Ken Liu2d175172019-03-21 17:08:41 +0800736
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200737 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liuce58bfc2021-05-12 17:54:48 +0800738 p_part_curr = TO_CONTAINER(pth_curr, struct partition_t, sp_thread);
739 p_part_next = TO_CONTAINER(pth_next, struct partition_t, sp_thread);
Mate Toth-Palc430b992019-05-09 21:01:14 +0200740
Ken Liuce58bfc2021-05-12 17:54:48 +0800741 /*
742 * If required, let the platform update boundary based on its
743 * implementation. Change privilege, MPU or other configurations.
744 */
745 if (p_part_curr->p_boundaries != p_part_next->p_boundaries) {
746 if (tfm_hal_update_boundaries(p_part_next->p_ldinf,
747 p_part_next->p_boundaries)
748 != TFM_HAL_SUCCESS) {
749 tfm_core_panic();
750 }
751 }
Summer Qind2ad7e72020-01-06 18:16:35 +0800752 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800753 }
David Hufb38d562019-09-23 15:58:34 +0800754
755 /*
756 * Handle pending mailbox message from NS in multi-core topology.
757 * Empty operation on single Armv8-M platform.
758 */
759 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800760}
Mingyang Sund44522a2020-01-16 16:48:37 +0800761
Summer Qin02f7f072020-08-24 16:02:54 +0800762void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800763{
764 uint32_t i;
765
766 /*
767 * FixeMe: abstract these part into dedicated functions to avoid
768 * accessing thread context in psa layer
769 */
770 /* If it is a NS request via RPC, the owner of this message is not set */
771 if (!is_tfm_rpc_msg(msg)) {
772 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
773 }
774
775 for (i = 0; i < PSA_MAX_IOVEC; i++) {
776 if (msg->msg.out_size[i] == 0) {
777 continue;
778 }
779
780 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
781
782 msg->caller_outvec[i].len = msg->outvec[i].len;
783 }
784}
785
Summer Qin02f7f072020-08-24 16:02:54 +0800786void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800787{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800788 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800789
790 /*
791 * The value of partition_id must be greater than zero as the target of
792 * notification must be a Secure Partition, providing a Non-secure
793 * Partition ID is a fatal error.
794 */
795 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
796 tfm_core_panic();
797 }
798
799 /*
800 * It is a fatal error if partition_id does not correspond to a Secure
801 * Partition.
802 */
803 partition = tfm_spm_get_partition_by_id(partition_id);
804 if (!partition) {
805 tfm_core_panic();
806 }
807
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800808 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +0800809
Kevin Peng8dac6102021-03-09 16:44:00 +0800810 if (partition->signals_waiting & signal) {
811 tfm_event_wake(
812 &partition->event,
813 partition->signals_asserted & partition->signals_waiting);
814 partition->signals_waiting &= ~signal;
815 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800816}
817
Kevin Pengeca45b92021-02-09 14:46:50 +0800818__attribute__((naked))
819static void tfm_flih_deprivileged_handling(uint32_t p_ldinf,
820 psa_flih_func flih_func,
821 psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800822{
Kevin Pengeca45b92021-02-09 14:46:50 +0800823 __ASM volatile("SVC %0 \n"
824 "BX LR \n"
825 : : "I" (TFM_SVC_PREPARE_DEPRIV_FLIH));
826}
Kevin Pengdc791882021-03-12 10:57:12 +0800827
Kevin Pengeca45b92021-02-09 14:46:50 +0800828void spm_interrupt_handler(struct partition_load_info_t *p_ldinf,
829 psa_signal_t signal,
830 uint32_t irq_line,
831 psa_flih_func flih_func)
832{
Kevin Pengeec41a82021-08-18 13:56:23 +0800833 int32_t pid;
Kevin Pengeca45b92021-02-09 14:46:50 +0800834 psa_flih_result_t flih_result;
Kevin Pengdc791882021-03-12 10:57:12 +0800835
Kevin Pengeca45b92021-02-09 14:46:50 +0800836 pid = p_ldinf->pid;
837
838 if (flih_func == NULL) {
839 /* SLIH Model Handling */
840 __disable_irq();
841 tfm_spm_hal_disable_irq(irq_line);
842 notify_with_signal(pid, signal);
843 __enable_irq();
844 return;
845 }
846
847 /* FLIH Model Handling */
848 if (tfm_spm_partition_get_privileged_mode(p_ldinf->flags) ==
849 TFM_PARTITION_PRIVILEGED_MODE) {
850 flih_result = flih_func();
851 if (flih_result == PSA_FLIH_SIGNAL) {
852 __disable_irq();
853 notify_with_signal(pid, signal);
854 __enable_irq();
855 } else if (flih_result != PSA_FLIH_NO_SIGNAL) {
856 /*
857 * Nothing needed to do for PSA_FLIH_NO_SIGNAL
858 * But if the flih_result is invalid, should panic.
859 */
860 tfm_core_panic();
861 }
862 } else {
863 tfm_flih_deprivileged_handling((uint32_t)p_ldinf, flih_func, signal);
864 }
Mingyang Sund44522a2020-01-16 16:48:37 +0800865}
866
Kevin Peng27e42272021-05-24 17:58:53 +0800867struct irq_load_info_t *get_irq_info_for_signal(
868 const struct partition_load_info_t *p_ldinf,
869 psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800870{
871 size_t i;
Kevin Peng27e42272021-05-24 17:58:53 +0800872 struct irq_load_info_t *irq_info;
Mingyang Sund44522a2020-01-16 16:48:37 +0800873
Ken Liu24dffb22021-02-10 11:03:58 +0800874 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng27e42272021-05-24 17:58:53 +0800875 return NULL;
Kevin Peng410bee52021-01-13 16:27:17 +0800876 }
877
Kevin Peng27e42272021-05-24 17:58:53 +0800878 irq_info = (struct irq_load_info_t *)LOAD_INFO_IRQ(p_ldinf);
879 for (i = 0; i < p_ldinf->nirqs; i++) {
880 if (irq_info[i].signal == signal) {
881 return &irq_info[i];
Mingyang Sund44522a2020-01-16 16:48:37 +0800882 }
883 }
Kevin Penga20b5af2021-01-11 11:20:52 +0800884
Kevin Peng27e42272021-05-24 17:58:53 +0800885 return NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800886}