blob: 1642626eab4e62a01f905e1575e45d3e6bf52e1c [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
David Huf07e97d2021-02-15 22:05:40 +080011#include "fih.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000012#include "psa/client.h"
13#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080014#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080015#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080016#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080017#include "tfm_spm_hal.h"
18#include "tfm_irq_list.h"
19#include "tfm_api.h"
20#include "tfm_secure_api.h"
21#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080022#include "tfm_hal_defs.h"
23#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080024#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_core_utils.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080028#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080029#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080030#include "tfm_pools.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080031#include "region.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080032#include "region_defs.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080033#include "spm_partition_defs.h"
34#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080035#include "tfm/tfm_spm_services.h"
Mingyang Sun00df2352021-04-15 15:46:08 +080036#include "load/partition_defs.h"
37#include "load/service_defs.h"
Ken Liu86686282021-04-27 11:11:15 +080038#include "load/asset_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080039#include "load/partition_static_load.h"
Edison Ai764d41f2018-09-21 15:56:36 +080040
Ken Liu1f345b02020-05-30 21:11:05 +080041#include "secure_fw/partitions/tfm_service_list.inc"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080042#include "tfm_spm_db_ipc.inc"
Summer Qind99509f2019-08-02 17:36:58 +080043
Edison Ai764d41f2018-09-21 15:56:36 +080044/* Pools */
45TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
46 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080047
Kevin Pengdc791882021-03-12 10:57:12 +080048void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
49 uint32_t irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080050
51#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080052
Mingyang Sun2b352662021-04-21 11:35:43 +080053/* Partition static data region */
54REGION_DECLARE(Image$$, TFM_SP_STATIC_LIST, $$RO$$Base);
55REGION_DECLARE(Image$$, TFM_SP_STATIC_LIST, $$RO$$Limit);
56
Summer Qin373feb12020-03-27 15:35:33 +080057/*********************** Connection handle conversion APIs *******************/
58
Summer Qin373feb12020-03-27 15:35:33 +080059#define CONVERSION_FACTOR_BITOFFSET 3
60#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
61/* Set 32 as the maximum */
62#define CONVERSION_FACTOR_VALUE_MAX 0x20
63
64#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
65#error "CONVERSION FACTOR OUT OF RANGE"
66#endif
67
68static uint32_t loop_index;
69
70/*
71 * A handle instance psa_handle_t allocated inside SPM is actually a memory
72 * address among the handle pool. Return this handle to the client directly
73 * exposes information of secure memory address. In this case, converting the
74 * handle into another value does not represent the memory address to avoid
75 * exposing secure memory directly to clients.
76 *
77 * This function converts the handle instance into another value by scaling the
78 * handle in pool offset, the converted value is named as a user handle.
79 *
80 * The formula:
81 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
82 * CLIENT_HANDLE_VALUE_MIN + loop_index
83 * where:
84 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
85 * exceed CONVERSION_FACTOR_VALUE_MAX.
86 *
87 * handle_instance in RANGE[POOL_START, POOL_END]
88 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
89 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
90 *
91 * note:
92 * loop_index is used to promise same handle instance is converted into
93 * different user handles in short time.
94 */
Ken Liu505b1702020-05-29 13:19:58 +080095psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080096{
97 psa_handle_t user_handle;
98
99 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
100 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
101 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
102 CLIENT_HANDLE_VALUE_MIN + loop_index);
103
104 return user_handle;
105}
106
107/*
108 * This function converts a user handle into a corresponded handle instance.
109 * The converted value is validated before returning, an invalid handle instance
110 * is returned as NULL.
111 *
112 * The formula:
113 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
114 * CONVERSION_FACTOR_VALUE) + POOL_START
115 * where:
116 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
117 * exceed CONVERSION_FACTOR_VALUE_MAX.
118 *
119 * handle_instance in RANGE[POOL_START, POOL_END]
120 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
121 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
122 */
123struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
124{
125 struct tfm_conn_handle_t *handle_instance;
126
127 if (user_handle == PSA_NULL_HANDLE) {
128 return NULL;
129 }
130
131 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
132 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
133 (uintptr_t)conn_handle_pool);
134
135 return handle_instance;
136}
137
Edison Ai764d41f2018-09-21 15:56:36 +0800138/* Service handle management functions */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800139struct tfm_conn_handle_t *tfm_spm_create_conn_handle(struct service_t *service,
140 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800141{
Edison Ai9cc26242019-08-06 11:28:04 +0800142 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800143
Ken Liuf250b8b2019-12-27 16:31:24 +0800144 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800145
146 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800147 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
148 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800149 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800150 }
151
Edison Ai9cc26242019-08-06 11:28:04 +0800152 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800153 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800154 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800155
156 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800157 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800158
Summer Qin630c76b2020-05-20 10:32:58 +0800159 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800160}
161
Summer Qin630c76b2020-05-20 10:32:58 +0800162int32_t tfm_spm_validate_conn_handle(
163 const struct tfm_conn_handle_t *conn_handle,
164 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800165{
166 /* Check the handle address is validated */
167 if (is_valid_chunk_data_in_pool(conn_handle_pool,
168 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800169 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800170 }
171
172 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800173 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800174 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800175 }
176
Ken Liubcae38b2021-01-20 15:47:44 +0800177 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800178}
179
Mingyang Sun783a59b2021-04-20 15:52:18 +0800180int32_t tfm_spm_free_conn_handle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800181 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800182{
Ken Liuf250b8b2019-12-27 16:31:24 +0800183 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800184 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800185
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200186 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800187 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200188
Edison Ai764d41f2018-09-21 15:56:36 +0800189 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800190 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800191
192 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800193 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800194 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800195}
196
Mingyang Sun783a59b2021-04-20 15:52:18 +0800197int32_t tfm_spm_set_rhandle(struct service_t *service,
Summer Qin02f7f072020-08-24 16:02:54 +0800198 struct tfm_conn_handle_t *conn_handle,
199 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800200{
Ken Liuf250b8b2019-12-27 16:31:24 +0800201 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800202 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800203 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800204
Summer Qin630c76b2020-05-20 10:32:58 +0800205 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800206 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800207}
208
Mingyang Sund44522a2020-01-16 16:48:37 +0800209/**
210 * \brief Get reverse handle value from connection hanlde.
211 *
212 * \param[in] service Target service context pointer
213 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800214 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800215 *
216 * \retval void * Success
217 * \retval "Does not return" Panic for those:
218 * service pointer are NULL
219 * hanlde is \ref PSA_NULL_HANDLE
220 * handle node does not be found
221 */
Mingyang Sun783a59b2021-04-20 15:52:18 +0800222static void *tfm_spm_get_rhandle(struct service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800223 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800224{
Ken Liuf250b8b2019-12-27 16:31:24 +0800225 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800226 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800227 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800228
Summer Qin630c76b2020-05-20 10:32:58 +0800229 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800230}
231
232/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800233
Summer Qin02f7f072020-08-24 16:02:54 +0800234struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
235 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800236{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800237 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800238 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800239
Ken Liuf250b8b2019-12-27 16:31:24 +0800240 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800241
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800242 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800243
Ken Liu2c47f7f2021-01-22 11:06:04 +0800244 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800245 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800246 }
247
Mingyang Sun73056b62020-07-03 15:18:46 +0800248 /*
249 * There may be multiple messages for this RoT Service signal, do not clear
250 * partition mask until no remaining message. Search may be optimized.
251 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800252 BI_LIST_FOR_EACH(node, head) {
Ken Liuc25558c2021-05-20 15:31:28 +0800253 tmp_msg = TO_CONTAINER(node, struct tfm_msg_body_t, msg_node);
Mingyang Sun73056b62020-07-03 15:18:46 +0800254 if (tmp_msg->service->service_db->signal == signal && msg) {
255 return msg;
256 } else if (tmp_msg->service->service_db->signal == signal) {
257 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800258 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800259 }
260 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800261
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800262 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800263
264 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800265}
266
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800267/**
268 * \brief Returns the index of the partition with the given partition ID.
269 *
270 * \param[in] partition_id Partition id
271 *
272 * \return the partition idx if partition_id is valid,
273 * \ref SPM_INVALID_PARTITION_IDX othervise
274 */
275static uint32_t get_partition_idx(uint32_t partition_id)
276{
277 uint32_t i;
278
279 if (partition_id == INVALID_PARTITION_ID) {
280 return SPM_INVALID_PARTITION_IDX;
281 }
282
283 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
Mingyang Sun56c59692020-07-20 17:02:19 +0800284 if (g_spm_partition_db.partitions[i].p_static->pid == partition_id) {
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800285 return i;
286 }
287 }
288 return SPM_INVALID_PARTITION_IDX;
289}
290
291/**
292 * \brief Get the flags associated with a partition
293 *
294 * \param[in] partition_idx Partition index
295 *
296 * \return Flags associated with the partition
297 *
298 * \note This function doesn't check if partition_idx is valid.
299 */
300static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
301{
Mingyang Sun56c59692020-07-20 17:02:19 +0800302 return g_spm_partition_db.partitions[partition_idx].p_static->flags;
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800303}
304
305#if TFM_LVL != 1
306/**
307 * \brief Change the privilege mode for partition thread mode.
308 *
309 * \param[in] privileged Privileged mode,
310 * \ref TFM_PARTITION_PRIVILEGED_MODE
311 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
312 *
313 * \note Barrier instructions are not called by this function, and if
314 * it is called in thread mode, it might be necessary to call
315 * them after this function returns.
316 */
317static void tfm_spm_partition_change_privilege(uint32_t privileged)
318{
319 CONTROL_Type ctrl;
320
321 ctrl.w = __get_CONTROL();
322
323 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
324 ctrl.b.nPRIV = 0;
325 } else {
326 ctrl.b.nPRIV = 1;
327 }
328
329 __set_CONTROL(ctrl.w);
330}
331#endif /* if(TFM_LVL != 1) */
332
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800333uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
334{
335 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
336 return TFM_PARTITION_PRIVILEGED_MODE;
337 } else {
338 return TFM_PARTITION_UNPRIVILEGED_MODE;
339 }
340}
341
342bool tfm_is_partition_privileged(uint32_t partition_idx)
343{
344 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
345
346 return tfm_spm_partition_get_privileged_mode(flags) ==
347 TFM_PARTITION_PRIVILEGED_MODE;
348}
349
Mingyang Sun783a59b2021-04-20 15:52:18 +0800350struct service_t *tfm_spm_get_service_by_sid(uint32_t sid)
Edison Ai764d41f2018-09-21 15:56:36 +0800351{
Summer Qin2fca1c82020-03-20 14:37:55 +0800352 uint32_t i, num;
Edison Ai764d41f2018-09-21 15:56:36 +0800353
Mingyang Sun783a59b2021-04-20 15:52:18 +0800354 num = sizeof(g_services) / sizeof(struct service_t);
Summer Qin2fca1c82020-03-20 14:37:55 +0800355 for (i = 0; i < num; i++) {
Mingyang Sun783a59b2021-04-20 15:52:18 +0800356 if (g_services[i].service_db->sid == sid) {
357 return &g_services[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800358 }
359 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800360
Edison Ai764d41f2018-09-21 15:56:36 +0800361 return NULL;
362}
363
Mingyang Sund44522a2020-01-16 16:48:37 +0800364/**
365 * \brief Get the partition context by partition ID.
366 *
367 * \param[in] partition_id Partition identity
368 *
369 * \retval NULL Failed
370 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800371 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800372 */
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800373static struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800374{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800375 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800376
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800377 if (idx != SPM_INVALID_PARTITION_IDX) {
378 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800379 }
380 return NULL;
381}
382
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800383struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800384{
Kevin Peng82fbca52021-03-09 13:48:48 +0800385 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800386 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800387
Ken Liuc25558c2021-05-20 15:31:28 +0800388 partition = TO_CONTAINER(pth, struct partition_t, sp_thread);
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800389
Kevin Peng79c2bda2020-07-24 16:31:12 +0800390 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800391}
392
Mingyang Sun783a59b2021-04-20 15:52:18 +0800393int32_t tfm_spm_check_client_version(struct service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530394 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800395{
Ken Liuf250b8b2019-12-27 16:31:24 +0800396 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800397
Mingyang Sun2b352662021-04-21 11:35:43 +0800398 switch (SERVICE_GET_VERSION_POLICY(service->service_db->flags)) {
Edison Ai764d41f2018-09-21 15:56:36 +0800399 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530400 if (version > service->service_db->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800401 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800402 }
403 break;
404 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530405 if (version != service->service_db->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800406 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800407 }
408 break;
409 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800410 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800411 }
Ken Liubcae38b2021-01-20 15:47:44 +0800412 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800413}
414
Edison Aie728fbf2019-11-13 09:37:12 +0800415int32_t tfm_spm_check_authorization(uint32_t sid,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800416 struct service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800417 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800418{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800419 struct partition_t *partition = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800420 uint32_t *dep;
Edison Aie728fbf2019-11-13 09:37:12 +0800421 int32_t i;
422
Ken Liuf250b8b2019-12-27 16:31:24 +0800423 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800424
425 if (ns_caller) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800426 if (!SERVICE_IS_NS_ACCESSIBLE(service->service_db->flags)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800427 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800428 }
429 } else {
430 partition = tfm_spm_get_running_partition();
431 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800432 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800433 }
434
Ken Liu4520ce32021-05-11 22:49:10 +0800435 dep = (uint32_t *)LOAD_INFO_DEPS(partition->p_static);
Mingyang Sun56c59692020-07-20 17:02:19 +0800436 for (i = 0; i < partition->p_static->ndeps; i++) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800437 if (dep[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800438 break;
439 }
440 }
441
Mingyang Sun56c59692020-07-20 17:02:19 +0800442 if (i == partition->p_static->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800443 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800444 }
445 }
Ken Liubcae38b2021-01-20 15:47:44 +0800446 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800447}
448
Edison Ai764d41f2018-09-21 15:56:36 +0800449/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800450
Summer Qin02f7f072020-08-24 16:02:54 +0800451struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800452{
453 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200454 * The message handler passed by the caller is considered invalid in the
455 * following cases:
456 * 1. Not a valid message handle. (The address of a message is not the
457 * address of a possible handle from the pool
458 * 2. Handle not belongs to the caller partition (The handle is either
459 * unused, or owned by anither partition)
460 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800461 */
Ken Liu505b1702020-05-29 13:19:58 +0800462 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800463 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800464 struct tfm_conn_handle_t *p_conn_handle =
465 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200466
467 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800468 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800469 return NULL;
470 }
471
Ken Liu505b1702020-05-29 13:19:58 +0800472 p_msg = &p_conn_handle->internal_msg;
473
Edison Ai764d41f2018-09-21 15:56:36 +0800474 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200475 * Check that the magic number is correct. This proves that the message
476 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800477 */
Ken Liu505b1702020-05-29 13:19:58 +0800478 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800479 return NULL;
480 }
481
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200482 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800483 partition_id = tfm_spm_partition_get_running_partition_id();
Mingyang Sun56c59692020-07-20 17:02:19 +0800484 if (partition_id != p_msg->service->partition->p_static->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800485 return NULL;
486 }
487
Ken Liu505b1702020-05-29 13:19:58 +0800488 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800489}
490
Kevin Pengdf6aa292021-03-11 17:58:50 +0800491struct tfm_msg_body_t *
492 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
493{
494 TFM_CORE_ASSERT(conn_handle != NULL);
495
496 return &(conn_handle->internal_msg);
497}
498
Edison Ai97115822019-08-01 14:22:19 +0800499void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
Mingyang Sun783a59b2021-04-20 15:52:18 +0800500 struct service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800501 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800502 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800503 psa_invec *invec, size_t in_len,
504 psa_outvec *outvec, size_t out_len,
505 psa_outvec *caller_outvec)
506{
Edison Ai764d41f2018-09-21 15:56:36 +0800507 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800508 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800509
Ken Liuf250b8b2019-12-27 16:31:24 +0800510 TFM_CORE_ASSERT(msg);
511 TFM_CORE_ASSERT(service);
512 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
513 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
514 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
515 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
516 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800517
Edison Ai764d41f2018-09-21 15:56:36 +0800518 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800519 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800520
Ken Liu35f89392019-03-14 14:51:05 +0800521 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800522 msg->magic = TFM_MSG_MAGIC;
523 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800524 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800525 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800526
527 /* Copy contents */
528 msg->msg.type = type;
529
530 for (i = 0; i < in_len; i++) {
531 msg->msg.in_size[i] = invec[i].len;
532 msg->invec[i].base = invec[i].base;
533 }
534
535 for (i = 0; i < out_len; i++) {
536 msg->msg.out_size[i] = outvec[i].len;
537 msg->outvec[i].base = outvec[i].base;
538 /* Out len is used to record the writed number, set 0 here again */
539 msg->outvec[i].len = 0;
540 }
541
Ken Liu505b1702020-05-29 13:19:58 +0800542 /* Use the user connect handle as the message handle */
543 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800544
Ken Liu505b1702020-05-29 13:19:58 +0800545 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800546 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800547 if (conn_handle) {
548 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800549 }
David Hu46603dd2019-12-11 18:05:16 +0800550
551 /* Set the private data of NSPE client caller in multi-core topology */
552 if (TFM_CLIENT_ID_IS_NS(client_id)) {
553 tfm_rpc_set_caller_data(msg, client_id);
554 }
Edison Ai764d41f2018-09-21 15:56:36 +0800555}
556
Mingyang Sun783a59b2021-04-20 15:52:18 +0800557void tfm_spm_send_event(struct service_t *service,
Kevin Peng8dac6102021-03-09 16:44:00 +0800558 struct tfm_msg_body_t *msg)
Edison Ai764d41f2018-09-21 15:56:36 +0800559{
Kevin Peng8dac6102021-03-09 16:44:00 +0800560 struct partition_t *partition = NULL;
561 psa_signal_t signal = 0;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800562
Kevin Peng8dac6102021-03-09 16:44:00 +0800563 if (!msg || !service || !service->service_db || !service->partition) {
564 tfm_core_panic();
565 }
566
567 partition = service->partition;
568 signal = service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800569
Mingyang Sun73056b62020-07-03 15:18:46 +0800570 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800571 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800572
573 /* Messages put. Update signals */
Kevin Peng8dac6102021-03-09 16:44:00 +0800574 partition->signals_asserted |= signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800575
Kevin Peng8dac6102021-03-09 16:44:00 +0800576 if (partition->signals_waiting & signal) {
577 tfm_event_wake(
578 &partition->event,
579 (partition->signals_asserted & partition->signals_waiting));
580 partition->signals_waiting &= ~signal;
581 }
Edison Ai764d41f2018-09-21 15:56:36 +0800582
David Hufb38d562019-09-23 15:58:34 +0800583 /*
584 * If it is a NS request via RPC, it is unnecessary to block current
585 * thread.
586 */
587 if (!is_tfm_rpc_msg(msg)) {
588 tfm_event_wait(&msg->ack_evnt);
589 }
Edison Ai764d41f2018-09-21 15:56:36 +0800590}
591
Mingyang Sunf3d29892019-07-10 17:50:23 +0800592uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800593{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800594 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800595
Kevin Peng79c2bda2020-07-24 16:31:12 +0800596 partition = tfm_spm_get_running_partition();
Mingyang Sun56c59692020-07-20 17:02:19 +0800597 if (partition && partition->p_static) {
598 return partition->p_static->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800599 } else {
600 return INVALID_PARTITION_ID;
601 }
Edison Ai764d41f2018-09-21 15:56:36 +0800602}
603
Summer Qin43c185d2019-10-10 15:44:42 +0800604int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800605 enum tfm_memory_access_e access,
606 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800607{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800608 enum tfm_hal_status_t err;
609 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800610
611 /* If len is zero, this indicates an empty buffer and base is ignored */
612 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800613 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800614 }
615
616 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800617 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800618 }
619
620 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800621 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800622 }
623
Summer Qin424d4db2019-03-25 14:09:51 +0800624 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800625 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800626 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800627 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800628 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800629
630 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
631 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
632 } else {
633 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
634 }
635
636 if (ns_caller) {
637 attr |= TFM_HAL_ACCESS_NS;
638 }
639
640 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
641
642 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800643 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800644 }
645
Ken Liubcae38b2021-01-20 15:47:44 +0800646 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800647}
648
Mingyang Sun2b352662021-04-21 11:35:43 +0800649/* Allocate runtime space for partition from the pool. Static allocation. */
650static struct partition_t *tfm_allocate_partition(void)
651{
652 static uint32_t partition_pool_pos = 0;
653 struct partition_t *p_partition_allocated = NULL;
654
655 if (partition_pool_pos >= g_spm_partition_db.partition_count) {
656 return NULL;
657 }
658
659 p_partition_allocated = &g_spm_partition_db.partitions[partition_pool_pos];
660 partition_pool_pos++;
661
662 return p_partition_allocated;
663}
664
665/* Allocate runtime space for service from the pool. Static allocation. */
666static struct service_t *tfm_allocate_service(uint32_t service_count)
667{
668 static uint32_t service_pool_pos = 0;
669 struct service_t *p_service_allocated = NULL;
670 uint32_t num_of_services = sizeof(g_services) / sizeof(struct service_t);
671
Mingyang Sun1881ef12021-05-13 12:12:05 +0800672 if ((service_count == 0) ||
673 (service_count > num_of_services) ||
Mingyang Sun2b352662021-04-21 11:35:43 +0800674 (service_pool_pos >= num_of_services) ||
675 (service_pool_pos + service_count > num_of_services)) {
676 return NULL;
677 }
678
679 p_service_allocated = &g_services[service_pool_pos];
680 service_pool_pos += service_count;
681
682 return p_service_allocated;
683}
684
685/* Check partition static data validation */
Ken Liu4520ce32021-05-11 22:49:10 +0800686bool tfm_validate_partition_static(struct partition_load_info_t *p_cmninf)
Mingyang Sun2b352662021-04-21 11:35:43 +0800687{
688 return ((p_cmninf->psa_ff_ver & PARTITION_INFO_MAGIC_MASK)
689 == PARTITION_INFO_MAGIC);
690}
691
Ken Liuce2692d2020-02-11 12:39:36 +0800692uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800693{
Mingyang Sund30c9032021-05-19 17:19:44 +0800694 uint32_t i, j, part_idx;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800695 struct partition_t *partition;
Mingyang Sun2b352662021-04-21 11:35:43 +0800696 struct service_t *service;
Summer Qin66f1e032020-01-06 15:40:03 +0800697 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Ken Liu86686282021-04-27 11:11:15 +0800698 const struct platform_data_t *platform_data_p;
Ken Liu4520ce32021-05-11 22:49:10 +0800699 uintptr_t part_load_start, part_load_end;
700 struct partition_load_info_t *p_cmninf;
701 struct service_load_info_t *p_service_static;
702 struct asset_desc_t *p_asset_load;
David Huf07e97d2021-02-15 22:05:40 +0800703#ifdef TFM_FIH_PROFILE_ON
704 fih_int fih_rc = FIH_FAILURE;
705#endif
Edison Ai764d41f2018-09-21 15:56:36 +0800706
707 tfm_pool_init(conn_handle_pool,
708 POOL_BUFFER_SIZE(conn_handle_pool),
709 sizeof(struct tfm_conn_handle_t),
710 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800711
Mingyang Sun2b352662021-04-21 11:35:43 +0800712 /* Load partition and service data */
Mingyang Sund30c9032021-05-19 17:19:44 +0800713 part_idx = 0;
Ken Liu4520ce32021-05-11 22:49:10 +0800714 part_load_start = PART_REGION_ADDR(TFM_SP_STATIC_LIST, $$RO$$Base);
715 part_load_end = PART_REGION_ADDR(TFM_SP_STATIC_LIST, $$RO$$Limit);
716 while (part_load_start < part_load_end) {
717 p_cmninf = (struct partition_load_info_t *)part_load_start;
Ken Liu3669fd82021-02-08 16:46:22 +0800718
Mingyang Sun2b352662021-04-21 11:35:43 +0800719 /* Validate static info section range */
Ken Liu4520ce32021-05-11 22:49:10 +0800720 part_load_start += LOAD_INFSZ_BYTES(p_cmninf);
721 if (part_load_start > part_load_end) {
Kevin Peng79c2bda2020-07-24 16:31:12 +0800722 tfm_core_panic();
723 }
724
Mingyang Sun2b352662021-04-21 11:35:43 +0800725 /* Validate partition static info */
726 if (!tfm_validate_partition_static(p_cmninf)) {
727 tfm_core_panic();
728 }
729 if (!(p_cmninf->flags & SPM_PART_FLAG_IPC)) {
730 tfm_core_panic();
731 }
732 if ((p_cmninf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
733 > PSA_FRAMEWORK_VERSION) {
Kevin Penga876d432021-01-07 16:14:28 +0800734 ERROR_MSG("Warning: Partition requires higher framework version!");
Mingyang Sun2b352662021-04-21 11:35:43 +0800735 tfm_core_panic();
Edison Aif0501702019-10-11 14:36:42 +0800736 }
737
Mingyang Sun2b352662021-04-21 11:35:43 +0800738 /* Allocate runtime space */
739 partition = tfm_allocate_partition();
740 if (!partition) {
741 tfm_core_panic();
742 }
Mingyang Sun1881ef12021-05-13 12:12:05 +0800743 if (p_cmninf->nservices) {
744 service = tfm_allocate_service(p_cmninf->nservices);
745 if (!service) {
746 tfm_core_panic();
747 }
748 } else {
749 service = NULL;
Mingyang Sun2b352662021-04-21 11:35:43 +0800750 }
751
752 partition->p_static = p_cmninf;
753
Ken Liu86686282021-04-27 11:11:15 +0800754 /* Init partition device object assets */
Ken Liu4520ce32021-05-11 22:49:10 +0800755 p_asset_load = (struct asset_desc_t *)LOAD_INFO_ASSET(p_cmninf);
Ken Liu86686282021-04-27 11:11:15 +0800756 for (i = 0; i < p_cmninf->nassets; i++) {
757 /* Skip the memory-based asset */
Ken Liu4520ce32021-05-11 22:49:10 +0800758 if (!(p_asset_load[i].attr & ASSET_DEV_REF_BIT)) {
Ken Liu86686282021-04-27 11:11:15 +0800759 continue;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100760 }
Ken Liu86686282021-04-27 11:11:15 +0800761
Ken Liu4520ce32021-05-11 22:49:10 +0800762 platform_data_p = POSITION_TO_PTR(p_asset_load[i].dev.addr_ref,
Ken Liu86686282021-04-27 11:11:15 +0800763 struct platform_data_t *);
Ken Liu25e09c72021-05-24 15:46:46 +0800764
765 /*
766 * TODO: some partitions declare MMIO not exist on specific
767 * platforms, and the platform defines a dummy NULL reference
768 * for these MMIO items, which cause 'nassets' to contain several
769 * NULL items. Skip these NULL items initialization temporarily to
770 * avoid HAL API panic.
771 * Eventually, these platform-specific partitions need to be moved
772 * into a platform-specific folder. Then this workaround can be
773 * removed.
774 */
775 if (!platform_data_p) {
776 continue;
777 }
778
Ken Liu86686282021-04-27 11:11:15 +0800779#ifdef TFM_FIH_PROFILE_ON
Mingyang Sund30c9032021-05-19 17:19:44 +0800780 FIH_CALL(tfm_spm_hal_configure_default_isolation, fih_rc, part_idx,
Ken Liu86686282021-04-27 11:11:15 +0800781 platform_data_p);
782 if (fih_not_eq(fih_rc, fih_int_encode(TFM_PLAT_ERR_SUCCESS))) {
783 tfm_core_panic();
784 }
785#else /* TFM_FIH_PROFILE_ON */
Mingyang Sund30c9032021-05-19 17:19:44 +0800786 if (tfm_spm_hal_configure_default_isolation(part_idx,
Ken Liu86686282021-04-27 11:11:15 +0800787 platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
788 tfm_core_panic();
789 }
790#endif /* TFM_FIH_PROFILE_ON */
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100791 }
792
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800793 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800794
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800795 /* TODO: This can be optimized by generating the assigned signal
796 * in code generation time.
797 */
798 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
Mingyang Sun2b352662021-04-21 11:35:43 +0800799 if (tfm_core_irq_signals[j].partition_id == p_cmninf->pid) {
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800800 partition->signals_allowed |=
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800801 tfm_core_irq_signals[j].signal_value;
Mingyang Sun2b352662021-04-21 11:35:43 +0800802 if ((p_cmninf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
803 == 0x0100) {
Kevin Pengc6976262021-01-11 15:52:55 +0800804 tfm_spm_hal_enable_irq(tfm_core_irq_signals[j].irq_line);
Mingyang Sun2b352662021-04-21 11:35:43 +0800805 } else if ((p_cmninf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
806 == 0x0101) {
Kevin Pengc6976262021-01-11 15:52:55 +0800807 tfm_spm_hal_disable_irq(tfm_core_irq_signals[j].irq_line);
808 }
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800809 }
810 }
811
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800812 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800813 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800814
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800815 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800816 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800817 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800818 }
819
Mingyang Sunf0851842021-05-11 11:44:19 +0800820 /* Extendable partition static info is right after p_cmninf. */
Mingyang Sun2b352662021-04-21 11:35:43 +0800821 tfm_core_thrd_init(
822 pth,
823 POSITION_TO_ENTRY(p_cmninf->entry, tfm_core_thrd_entry_t),
824 NULL,
Ken Liu4520ce32021-05-11 22:49:10 +0800825 LOAD_ALLOCED_STACK_ADDR(p_cmninf) + p_cmninf->stack_size,
826 LOAD_ALLOCED_STACK_ADDR(p_cmninf));
Edison Ai788bae22019-02-18 17:38:59 +0800827
Mingyang Sun2b352662021-04-21 11:35:43 +0800828 pth->prior = TO_THREAD_PRIORITY(PARTITION_GET_PRIOR(p_cmninf->flags));
Edison Ai764d41f2018-09-21 15:56:36 +0800829
Mingyang Sun2b352662021-04-21 11:35:43 +0800830 if (p_cmninf->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800831 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800832 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800833 }
834
Edison Ai764d41f2018-09-21 15:56:36 +0800835 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800836 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800837 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800838 }
Edison Ai764d41f2018-09-21 15:56:36 +0800839
Mingyang Sun2b352662021-04-21 11:35:43 +0800840 /* Init Services in the partition */
841 p_service_static =
Ken Liu4520ce32021-05-11 22:49:10 +0800842 (struct service_load_info_t *)LOAD_INFO_SERVICE(p_cmninf);
Mingyang Sun2b352662021-04-21 11:35:43 +0800843 for (i = 0; i < p_cmninf->nservices; i++) {
844 /* Fill service runtime data */
845 partition->signals_allowed |= p_service_static[i].signal;
846 service[i].service_db = &p_service_static[i];
847 service[i].partition = partition;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800848
Mingyang Sun2b352662021-04-21 11:35:43 +0800849 /* Populate the p_service of stateless_service_ref[] */
850 if (SERVICE_IS_STATELESS(p_service_static[i].flags)) {
851 for (j = 0; j < STATIC_HANDLE_NUM_LIMIT; j++) {
852 if (stateless_service_ref[j].sid ==
853 p_service_static[i].sid) {
854 stateless_service_ref[j].p_service = &service[i];
855 break;
856 }
857 }
858 /* Stateless service not found in tracking table */
859 if (j >= STATIC_HANDLE_NUM_LIMIT) {
860 tfm_core_panic();
Mingyang Suneceda862021-02-25 15:09:48 +0800861 }
862 }
Mingyang Suneceda862021-02-25 15:09:48 +0800863
Mingyang Sun2b352662021-04-21 11:35:43 +0800864 BI_LIST_INIT_NODE(&service[i].handle_list);
865 }
Mingyang Sund30c9032021-05-19 17:19:44 +0800866 part_idx++;
Edison Ai764d41f2018-09-21 15:56:36 +0800867 }
868
Ken Liu483f5da2019-04-24 10:45:21 +0800869 /*
870 * All threads initialized, start the scheduler.
871 *
872 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800873 * It is worthy to give the thread object to scheduler if the background
874 * context belongs to one of the threads. Here the background thread is the
875 * initialization thread who calls SPM SVC, which re-uses the non-secure
876 * entry thread's stack. After SPM initialization is done, this stack is
877 * cleaned up and the background context is never going to return. Tell
878 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800879 */
Summer Qin66f1e032020-01-06 15:40:03 +0800880 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800881
Summer Qind2ad7e72020-01-06 18:16:35 +0800882 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800883}
Ken Liu2d175172019-03-21 17:08:41 +0800884
Summer Qind2ad7e72020-01-06 18:16:35 +0800885void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800886{
Kevin Peng25b190b2020-10-30 17:10:45 +0800887#if TFM_LVL != 1
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800888 struct partition_t *p_next_partition;
Ken Liu4520ce32021-05-11 22:49:10 +0800889 const struct partition_load_info_t *p_part_static;
Ken Liu2d175172019-03-21 17:08:41 +0800890 uint32_t is_privileged;
891#endif
Kevin Peng82fbca52021-03-09 13:48:48 +0800892 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
893 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
David Huf07e97d2021-02-15 22:05:40 +0800894#if defined(TFM_FIH_PROFILE_ON) && (TFM_LVL == 3)
895 fih_int fih_rc = FIH_FAILURE;
896#endif
Ken Liu2d175172019-03-21 17:08:41 +0800897
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200898 if (pth_next != NULL && pth_curr != pth_next) {
Kevin Peng25b190b2020-10-30 17:10:45 +0800899#if TFM_LVL != 1
Ken Liuc25558c2021-05-20 15:31:28 +0800900 p_next_partition = TO_CONTAINER(pth_next,
901 struct partition_t,
902 sp_thread);
Ken Liu86686282021-04-27 11:11:15 +0800903 p_part_static = p_next_partition->p_static;
904 if (p_part_static->flags & SPM_PART_FLAG_PSA_ROT) {
Ken Liu2d175172019-03-21 17:08:41 +0800905 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
906 } else {
907 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
908 }
909
910 tfm_spm_partition_change_privilege(is_privileged);
Kevin Peng25b190b2020-10-30 17:10:45 +0800911#if TFM_LVL == 3
912 /*
913 * FIXME: To implement isolations among partitions in isolation level 3,
914 * each partition needs to run in unprivileged mode. Currently some
915 * PRoTs cannot work in unprivileged mode, make them privileged now.
916 */
917 if (is_privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
Ken Liu86686282021-04-27 11:11:15 +0800918 struct asset_desc_t *p_asset =
Ken Liu4520ce32021-05-11 22:49:10 +0800919 (struct asset_desc_t *)LOAD_INFO_ASSET(p_part_static);
Ken Liu86686282021-04-27 11:11:15 +0800920 /* Partition must have private data as the first asset in LVL3 */
921 if (p_part_static->nassets == 0) {
922 tfm_core_panic();
923 }
924 if (p_asset->attr & ASSET_DEV_REF_BIT) {
925 tfm_core_panic();
926 }
Kevin Peng25b190b2020-10-30 17:10:45 +0800927 /* FIXME: only MPU-based implementations are supported currently */
David Huf07e97d2021-02-15 22:05:40 +0800928#ifdef TFM_FIH_PROFILE_ON
929 FIH_CALL(tfm_hal_mpu_update_partition_boundary, fih_rc,
Ken Liu86686282021-04-27 11:11:15 +0800930 p_asset->mem.addr_x, p_asset->mem.addr_y);
David Huf07e97d2021-02-15 22:05:40 +0800931 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
932 tfm_core_panic();
933 }
934#else /* TFM_FIH_PROFILE_ON */
Ken Liu86686282021-04-27 11:11:15 +0800935 if (tfm_hal_mpu_update_partition_boundary(p_asset->mem.addr_x,
936 p_asset->mem.addr_y)
Kevin Peng25b190b2020-10-30 17:10:45 +0800937 != TFM_HAL_SUCCESS) {
938 tfm_core_panic();
939 }
David Huf07e97d2021-02-15 22:05:40 +0800940#endif /* TFM_FIH_PROFILE_ON */
Kevin Peng25b190b2020-10-30 17:10:45 +0800941 }
942#endif /* TFM_LVL == 3 */
943#endif /* TFM_LVL != 1 */
Mate Toth-Palc430b992019-05-09 21:01:14 +0200944
Summer Qind2ad7e72020-01-06 18:16:35 +0800945 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800946 }
David Hufb38d562019-09-23 15:58:34 +0800947
948 /*
949 * Handle pending mailbox message from NS in multi-core topology.
950 * Empty operation on single Armv8-M platform.
951 */
952 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800953}
Mingyang Sund44522a2020-01-16 16:48:37 +0800954
Summer Qin02f7f072020-08-24 16:02:54 +0800955void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800956{
957 uint32_t i;
958
959 /*
960 * FixeMe: abstract these part into dedicated functions to avoid
961 * accessing thread context in psa layer
962 */
963 /* If it is a NS request via RPC, the owner of this message is not set */
964 if (!is_tfm_rpc_msg(msg)) {
965 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
966 }
967
968 for (i = 0; i < PSA_MAX_IOVEC; i++) {
969 if (msg->msg.out_size[i] == 0) {
970 continue;
971 }
972
973 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
974
975 msg->caller_outvec[i].len = msg->outvec[i].len;
976 }
977}
978
Summer Qin02f7f072020-08-24 16:02:54 +0800979void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800980{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800981 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800982
983 /*
984 * The value of partition_id must be greater than zero as the target of
985 * notification must be a Secure Partition, providing a Non-secure
986 * Partition ID is a fatal error.
987 */
988 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
989 tfm_core_panic();
990 }
991
992 /*
993 * It is a fatal error if partition_id does not correspond to a Secure
994 * Partition.
995 */
996 partition = tfm_spm_get_partition_by_id(partition_id);
997 if (!partition) {
998 tfm_core_panic();
999 }
1000
Mingyang Sunaf22ffa2020-07-09 17:48:37 +08001001 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +08001002
Kevin Peng8dac6102021-03-09 16:44:00 +08001003 if (partition->signals_waiting & signal) {
1004 tfm_event_wake(
1005 &partition->event,
1006 partition->signals_asserted & partition->signals_waiting);
1007 partition->signals_waiting &= ~signal;
1008 }
Mingyang Sund44522a2020-01-16 16:48:37 +08001009}
1010
Mingyang Sund44522a2020-01-16 16:48:37 +08001011/**
Kevin Pengdc791882021-03-12 10:57:12 +08001012 * \brief Sets signal to partition for Second-Level Interrupt Handling mode IRQ
Mingyang Sund44522a2020-01-16 16:48:37 +08001013 *
1014 * \param[in] partition_id The ID of the partition which handles this IRQ
1015 * \param[in] signal The signal associated with this IRQ
1016 * \param[in] irq_line The number of the IRQ line
1017 *
1018 * \retval void Success.
1019 * \retval "Does not return" Partition ID is invalid
1020 */
Kevin Pengdc791882021-03-12 10:57:12 +08001021void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
1022 uint32_t irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001023{
Kevin Pengdc791882021-03-12 10:57:12 +08001024 __disable_irq();
1025
Mingyang Sund44522a2020-01-16 16:48:37 +08001026 tfm_spm_hal_disable_irq(irq_line);
1027 notify_with_signal(partition_id, signal);
Kevin Pengdc791882021-03-12 10:57:12 +08001028
1029 __enable_irq();
Mingyang Sund44522a2020-01-16 16:48:37 +08001030}
1031
Kevin Penga20b5af2021-01-11 11:20:52 +08001032int32_t get_irq_line_for_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +08001033{
1034 size_t i;
1035
Ken Liu24dffb22021-02-10 11:03:58 +08001036 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng410bee52021-01-13 16:27:17 +08001037 return -1;
1038 }
1039
Mingyang Sund44522a2020-01-16 16:48:37 +08001040 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1041 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1042 tfm_core_irq_signals[i].signal_value == signal) {
Kevin Penga20b5af2021-01-11 11:20:52 +08001043 return tfm_core_irq_signals[i].irq_line;
Mingyang Sund44522a2020-01-16 16:48:37 +08001044 }
1045 }
Kevin Penga20b5af2021-01-11 11:20:52 +08001046
Ken Liubcae38b2021-01-20 15:47:44 +08001047 return SPM_ERROR_GENERIC;
Mingyang Sund44522a2020-01-16 16:48:37 +08001048}
1049
Summer Qindea1f2c2021-01-11 14:46:34 +08001050#if !defined(__ARM_ARCH_8_1M_MAIN__)
Mingyang Sunae70d8d2020-06-30 15:56:05 +08001051void tfm_spm_validate_caller(struct partition_t *p_cur_sp, uint32_t *p_ctx,
1052 uint32_t exc_return, bool ns_caller)
Mingyang Sund44522a2020-01-16 16:48:37 +08001053{
1054 uintptr_t stacked_ctx_pos;
1055
1056 if (ns_caller) {
1057 /*
1058 * The background IRQ can't be supported, since if SP is executing,
1059 * the preempted context of SP can be different with the one who
1060 * preempts veneer.
1061 */
Mingyang Sun56c59692020-07-20 17:02:19 +08001062 if (p_cur_sp->p_static->pid != TFM_SP_NON_SECURE_ID) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001063 tfm_core_panic();
1064 }
1065
1066 /*
1067 * It is non-secure caller, check if veneer stack contains
1068 * multiple contexts.
1069 */
1070 stacked_ctx_pos = (uintptr_t)p_ctx +
1071 sizeof(struct tfm_state_context_t) +
Ken Liu05e13ba2020-07-25 10:31:33 +08001072 TFM_STACK_SEALED_SIZE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001073
1074 if (is_stack_alloc_fp_space(exc_return)) {
1075#if defined (__FPU_USED) && (__FPU_USED == 1U)
1076 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1077 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1078 sizeof(uint32_t);
1079 }
1080#endif
1081 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1082 }
1083
Mingyang Sunaf22ffa2020-07-09 17:48:37 +08001084 if (stacked_ctx_pos != p_cur_sp->sp_thread.stk_top) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001085 tfm_core_panic();
1086 }
Mingyang Sun56c59692020-07-20 17:02:19 +08001087 } else if (p_cur_sp->p_static->pid <= 0) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001088 tfm_core_panic();
1089 }
1090}
Summer Qindea1f2c2021-01-11 14:46:34 +08001091#endif
Summer Qin830c5542020-02-14 13:44:20 +08001092
1093void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1094{
1095 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1096 uint32_t running_partition_flags = 0;
Mingyang Sunae70d8d2020-06-30 15:56:05 +08001097 const struct partition_t *partition = NULL;
Summer Qin830c5542020-02-14 13:44:20 +08001098
1099 /* Check permissions on request type basis */
1100
1101 switch (svc_ctx->r0) {
1102 case TFM_SPM_REQUEST_RESET_VOTE:
1103 partition = tfm_spm_get_running_partition();
1104 if (!partition) {
1105 tfm_core_panic();
1106 }
Mingyang Sun56c59692020-07-20 17:02:19 +08001107 running_partition_flags = partition->p_static->flags;
Summer Qin830c5542020-02-14 13:44:20 +08001108
1109 /* Currently only PSA Root of Trust services are allowed to make Reset
1110 * vote request
1111 */
1112 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1113 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1114 }
1115
1116 /* FixMe: this is a placeholder for checks to be performed before
1117 * allowing execution of reset
1118 */
1119 *res_ptr = (uint32_t)TFM_SUCCESS;
1120
1121 break;
1122 default:
1123 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1124 }
1125}