blob: 074c88a9a667272c5b1404605a14439e24750a0f [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Kevin Penga20b5af2021-01-11 11:20:52 +08002 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Ken Liu24dffb22021-02-10 11:03:58 +080010#include "bitops.h"
Jamie Foxcc31d402019-01-28 17:13:52 +000011#include "psa/client.h"
12#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080013#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080014#include "tfm_wait.h"
Ken Liubcae38b2021-01-20 15:47:44 +080015#include "internal_errors.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080016#include "tfm_spm_hal.h"
17#include "tfm_irq_list.h"
18#include "tfm_api.h"
19#include "tfm_secure_api.h"
20#include "tfm_memory_utils.h"
Mingyang Sund1ed6732020-08-26 15:52:21 +080021#include "tfm_hal_defs.h"
22#include "tfm_hal_isolation.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_core_utils.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_rpc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080027#include "tfm_core_trustzone.h"
Ken Liu24dffb22021-02-10 11:03:58 +080028#include "lists.h"
Edison Ai764d41f2018-09-21 15:56:36 +080029#include "tfm_pools.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080030#include "region.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080031#include "region_defs.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080032#include "spm_partition_defs.h"
33#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080034#include "tfm/tfm_spm_services.h"
Edison Ai764d41f2018-09-21 15:56:36 +080035
Ken Liu1f345b02020-05-30 21:11:05 +080036#include "secure_fw/partitions/tfm_service_list.inc"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080037#include "tfm_spm_db_ipc.inc"
Summer Qind99509f2019-08-02 17:36:58 +080038
Edison Ai764d41f2018-09-21 15:56:36 +080039/* Pools */
40TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
41 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080042
Kevin Pengdc791882021-03-12 10:57:12 +080043void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
44 uint32_t irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080045
46#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080047
Summer Qin373feb12020-03-27 15:35:33 +080048/*********************** Connection handle conversion APIs *******************/
49
Summer Qin373feb12020-03-27 15:35:33 +080050#define CONVERSION_FACTOR_BITOFFSET 3
51#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
52/* Set 32 as the maximum */
53#define CONVERSION_FACTOR_VALUE_MAX 0x20
54
55#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
56#error "CONVERSION FACTOR OUT OF RANGE"
57#endif
58
59static uint32_t loop_index;
60
61/*
62 * A handle instance psa_handle_t allocated inside SPM is actually a memory
63 * address among the handle pool. Return this handle to the client directly
64 * exposes information of secure memory address. In this case, converting the
65 * handle into another value does not represent the memory address to avoid
66 * exposing secure memory directly to clients.
67 *
68 * This function converts the handle instance into another value by scaling the
69 * handle in pool offset, the converted value is named as a user handle.
70 *
71 * The formula:
72 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
73 * CLIENT_HANDLE_VALUE_MIN + loop_index
74 * where:
75 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
76 * exceed CONVERSION_FACTOR_VALUE_MAX.
77 *
78 * handle_instance in RANGE[POOL_START, POOL_END]
79 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
80 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
81 *
82 * note:
83 * loop_index is used to promise same handle instance is converted into
84 * different user handles in short time.
85 */
Ken Liu505b1702020-05-29 13:19:58 +080086psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080087{
88 psa_handle_t user_handle;
89
90 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
91 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
92 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
93 CLIENT_HANDLE_VALUE_MIN + loop_index);
94
95 return user_handle;
96}
97
98/*
99 * This function converts a user handle into a corresponded handle instance.
100 * The converted value is validated before returning, an invalid handle instance
101 * is returned as NULL.
102 *
103 * The formula:
104 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
105 * CONVERSION_FACTOR_VALUE) + POOL_START
106 * where:
107 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
108 * exceed CONVERSION_FACTOR_VALUE_MAX.
109 *
110 * handle_instance in RANGE[POOL_START, POOL_END]
111 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
112 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
113 */
114struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
115{
116 struct tfm_conn_handle_t *handle_instance;
117
118 if (user_handle == PSA_NULL_HANDLE) {
119 return NULL;
120 }
121
122 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
123 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
124 (uintptr_t)conn_handle_pool);
125
126 return handle_instance;
127}
128
Edison Ai764d41f2018-09-21 15:56:36 +0800129/* Service handle management functions */
Summer Qin630c76b2020-05-20 10:32:58 +0800130struct tfm_conn_handle_t *tfm_spm_create_conn_handle(
131 struct tfm_spm_service_t *service,
Summer Qin1ce712a2019-10-14 18:04:05 +0800132 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800133{
Edison Ai9cc26242019-08-06 11:28:04 +0800134 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800135
Ken Liuf250b8b2019-12-27 16:31:24 +0800136 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800137
138 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800139 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
140 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800141 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800142 }
143
Edison Ai9cc26242019-08-06 11:28:04 +0800144 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800145 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800146 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800147
148 /* Add handle node to list for next psa functions */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800149 BI_LIST_INSERT_BEFORE(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800150
Summer Qin630c76b2020-05-20 10:32:58 +0800151 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800152}
153
Summer Qin630c76b2020-05-20 10:32:58 +0800154int32_t tfm_spm_validate_conn_handle(
155 const struct tfm_conn_handle_t *conn_handle,
156 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800157{
158 /* Check the handle address is validated */
159 if (is_valid_chunk_data_in_pool(conn_handle_pool,
160 (uint8_t *)conn_handle) != true) {
Ken Liubcae38b2021-01-20 15:47:44 +0800161 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800162 }
163
164 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800165 if (conn_handle->client_id != client_id) {
Ken Liubcae38b2021-01-20 15:47:44 +0800166 return SPM_ERROR_GENERIC;
Summer Qin1ce712a2019-10-14 18:04:05 +0800167 }
168
Ken Liubcae38b2021-01-20 15:47:44 +0800169 return SPM_SUCCESS;
Summer Qin1ce712a2019-10-14 18:04:05 +0800170}
171
Summer Qin02f7f072020-08-24 16:02:54 +0800172int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
173 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800174{
Ken Liuf250b8b2019-12-27 16:31:24 +0800175 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800176 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800177
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200178 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800179 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200180
Edison Ai764d41f2018-09-21 15:56:36 +0800181 /* Remove node from handle list */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800182 BI_LIST_REMOVE_NODE(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800183
184 /* Back handle buffer to pool */
Ken Liu66ca6132021-02-24 08:49:51 +0800185 tfm_pool_free(conn_handle_pool, conn_handle);
Ken Liubcae38b2021-01-20 15:47:44 +0800186 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800187}
188
Summer Qin02f7f072020-08-24 16:02:54 +0800189int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
190 struct tfm_conn_handle_t *conn_handle,
191 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800192{
Ken Liuf250b8b2019-12-27 16:31:24 +0800193 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800194 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800195 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800196
Summer Qin630c76b2020-05-20 10:32:58 +0800197 conn_handle->rhandle = rhandle;
Ken Liubcae38b2021-01-20 15:47:44 +0800198 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800199}
200
Mingyang Sund44522a2020-01-16 16:48:37 +0800201/**
202 * \brief Get reverse handle value from connection hanlde.
203 *
204 * \param[in] service Target service context pointer
205 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800206 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800207 *
208 * \retval void * Success
209 * \retval "Does not return" Panic for those:
210 * service pointer are NULL
211 * hanlde is \ref PSA_NULL_HANDLE
212 * handle node does not be found
213 */
214static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800215 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800216{
Ken Liuf250b8b2019-12-27 16:31:24 +0800217 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800218 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800219 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800220
Summer Qin630c76b2020-05-20 10:32:58 +0800221 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800222}
223
224/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800225
Summer Qin02f7f072020-08-24 16:02:54 +0800226struct tfm_msg_body_t *tfm_spm_get_msg_by_signal(struct partition_t *partition,
227 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800228{
Ken Liu2c47f7f2021-01-22 11:06:04 +0800229 struct bi_list_node_t *node, *head;
Mingyang Sun73056b62020-07-03 15:18:46 +0800230 struct tfm_msg_body_t *tmp_msg, *msg = NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800231
Ken Liuf250b8b2019-12-27 16:31:24 +0800232 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800233
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800234 head = &partition->msg_list;
Mingyang Sun73056b62020-07-03 15:18:46 +0800235
Ken Liu2c47f7f2021-01-22 11:06:04 +0800236 if (BI_LIST_IS_EMPTY(head)) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800237 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800238 }
239
Mingyang Sun73056b62020-07-03 15:18:46 +0800240 /*
241 * There may be multiple messages for this RoT Service signal, do not clear
242 * partition mask until no remaining message. Search may be optimized.
243 */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800244 BI_LIST_FOR_EACH(node, head) {
Mingyang Sun73056b62020-07-03 15:18:46 +0800245 tmp_msg = TFM_GET_CONTAINER_PTR(node, struct tfm_msg_body_t, msg_node);
246 if (tmp_msg->service->service_db->signal == signal && msg) {
247 return msg;
248 } else if (tmp_msg->service->service_db->signal == signal) {
249 msg = tmp_msg;
Ken Liu2c47f7f2021-01-22 11:06:04 +0800250 BI_LIST_REMOVE_NODE(node);
Edison Ai764d41f2018-09-21 15:56:36 +0800251 }
252 }
Mingyang Sun73056b62020-07-03 15:18:46 +0800253
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800254 partition->signals_asserted &= ~signal;
Mingyang Sun73056b62020-07-03 15:18:46 +0800255
256 return msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800257}
258
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800259/**
260 * \brief Returns the index of the partition with the given partition ID.
261 *
262 * \param[in] partition_id Partition id
263 *
264 * \return the partition idx if partition_id is valid,
265 * \ref SPM_INVALID_PARTITION_IDX othervise
266 */
267static uint32_t get_partition_idx(uint32_t partition_id)
268{
269 uint32_t i;
270
271 if (partition_id == INVALID_PARTITION_ID) {
272 return SPM_INVALID_PARTITION_IDX;
273 }
274
275 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
Mingyang Sun56c59692020-07-20 17:02:19 +0800276 if (g_spm_partition_db.partitions[i].p_static->pid == partition_id) {
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800277 return i;
278 }
279 }
280 return SPM_INVALID_PARTITION_IDX;
281}
282
283/**
284 * \brief Get the flags associated with a partition
285 *
286 * \param[in] partition_idx Partition index
287 *
288 * \return Flags associated with the partition
289 *
290 * \note This function doesn't check if partition_idx is valid.
291 */
292static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
293{
Mingyang Sun56c59692020-07-20 17:02:19 +0800294 return g_spm_partition_db.partitions[partition_idx].p_static->flags;
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800295}
296
297#if TFM_LVL != 1
298/**
299 * \brief Change the privilege mode for partition thread mode.
300 *
301 * \param[in] privileged Privileged mode,
302 * \ref TFM_PARTITION_PRIVILEGED_MODE
303 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
304 *
305 * \note Barrier instructions are not called by this function, and if
306 * it is called in thread mode, it might be necessary to call
307 * them after this function returns.
308 */
309static void tfm_spm_partition_change_privilege(uint32_t privileged)
310{
311 CONTROL_Type ctrl;
312
313 ctrl.w = __get_CONTROL();
314
315 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
316 ctrl.b.nPRIV = 0;
317 } else {
318 ctrl.b.nPRIV = 1;
319 }
320
321 __set_CONTROL(ctrl.w);
322}
323#endif /* if(TFM_LVL != 1) */
324
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800325uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
326{
327 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
328 return TFM_PARTITION_PRIVILEGED_MODE;
329 } else {
330 return TFM_PARTITION_UNPRIVILEGED_MODE;
331 }
332}
333
334bool tfm_is_partition_privileged(uint32_t partition_idx)
335{
336 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
337
338 return tfm_spm_partition_get_privileged_mode(flags) ==
339 TFM_PARTITION_PRIVILEGED_MODE;
340}
341
Edison Ai764d41f2018-09-21 15:56:36 +0800342struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
343{
Summer Qin2fca1c82020-03-20 14:37:55 +0800344 uint32_t i, num;
Edison Ai764d41f2018-09-21 15:56:36 +0800345
Summer Qin2fca1c82020-03-20 14:37:55 +0800346 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
347 for (i = 0; i < num; i++) {
348 if (service[i].service_db->sid == sid) {
349 return &service[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800350 }
351 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800352
Edison Ai764d41f2018-09-21 15:56:36 +0800353 return NULL;
354}
355
Mingyang Sund44522a2020-01-16 16:48:37 +0800356/**
357 * \brief Get the partition context by partition ID.
358 *
359 * \param[in] partition_id Partition identity
360 *
361 * \retval NULL Failed
362 * \retval "Not NULL" Target partition context pointer,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800363 * \ref partition_t structures
Mingyang Sund44522a2020-01-16 16:48:37 +0800364 */
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800365static struct partition_t *tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800366{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800367 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800368
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800369 if (idx != SPM_INVALID_PARTITION_IDX) {
370 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800371 }
372 return NULL;
373}
374
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800375struct partition_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800376{
Kevin Peng82fbca52021-03-09 13:48:48 +0800377 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr();
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800378 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800379
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800380 partition = TFM_GET_CONTAINER_PTR(pth, struct partition_t, sp_thread);
381
Kevin Peng79c2bda2020-07-24 16:31:12 +0800382 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800383}
384
385int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530386 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800387{
Ken Liuf250b8b2019-12-27 16:31:24 +0800388 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800389
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530390 switch (service->service_db->version_policy) {
Edison Ai764d41f2018-09-21 15:56:36 +0800391 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530392 if (version > service->service_db->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800393 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800394 }
395 break;
396 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530397 if (version != service->service_db->version) {
Ken Liubcae38b2021-01-20 15:47:44 +0800398 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800399 }
400 break;
401 default:
Ken Liubcae38b2021-01-20 15:47:44 +0800402 return SPM_ERROR_VERSION;
Edison Ai764d41f2018-09-21 15:56:36 +0800403 }
Ken Liubcae38b2021-01-20 15:47:44 +0800404 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800405}
406
Edison Aie728fbf2019-11-13 09:37:12 +0800407int32_t tfm_spm_check_authorization(uint32_t sid,
408 struct tfm_spm_service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800409 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800410{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800411 struct partition_t *partition = NULL;
Edison Aie728fbf2019-11-13 09:37:12 +0800412 int32_t i;
413
Ken Liuf250b8b2019-12-27 16:31:24 +0800414 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800415
416 if (ns_caller) {
417 if (!service->service_db->non_secure_client) {
Ken Liubcae38b2021-01-20 15:47:44 +0800418 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800419 }
420 } else {
421 partition = tfm_spm_get_running_partition();
422 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800423 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800424 }
425
Mingyang Sun56c59692020-07-20 17:02:19 +0800426 for (i = 0; i < partition->p_static->ndeps; i++) {
427 if (partition->p_static->deps[i] == sid) {
Edison Aie728fbf2019-11-13 09:37:12 +0800428 break;
429 }
430 }
431
Mingyang Sun56c59692020-07-20 17:02:19 +0800432 if (i == partition->p_static->ndeps) {
Ken Liubcae38b2021-01-20 15:47:44 +0800433 return SPM_ERROR_GENERIC;
Edison Aie728fbf2019-11-13 09:37:12 +0800434 }
435 }
Ken Liubcae38b2021-01-20 15:47:44 +0800436 return SPM_SUCCESS;
Edison Aie728fbf2019-11-13 09:37:12 +0800437}
438
Edison Ai764d41f2018-09-21 15:56:36 +0800439/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800440
Summer Qin02f7f072020-08-24 16:02:54 +0800441struct tfm_msg_body_t *tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800442{
443 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200444 * The message handler passed by the caller is considered invalid in the
445 * following cases:
446 * 1. Not a valid message handle. (The address of a message is not the
447 * address of a possible handle from the pool
448 * 2. Handle not belongs to the caller partition (The handle is either
449 * unused, or owned by anither partition)
450 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800451 */
Ken Liu505b1702020-05-29 13:19:58 +0800452 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800453 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800454 struct tfm_conn_handle_t *p_conn_handle =
455 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200456
457 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800458 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800459 return NULL;
460 }
461
Ken Liu505b1702020-05-29 13:19:58 +0800462 p_msg = &p_conn_handle->internal_msg;
463
Edison Ai764d41f2018-09-21 15:56:36 +0800464 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200465 * Check that the magic number is correct. This proves that the message
466 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800467 */
Ken Liu505b1702020-05-29 13:19:58 +0800468 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800469 return NULL;
470 }
471
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200472 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800473 partition_id = tfm_spm_partition_get_running_partition_id();
Mingyang Sun56c59692020-07-20 17:02:19 +0800474 if (partition_id != p_msg->service->partition->p_static->pid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800475 return NULL;
476 }
477
Ken Liu505b1702020-05-29 13:19:58 +0800478 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800479}
480
Kevin Pengdf6aa292021-03-11 17:58:50 +0800481struct tfm_msg_body_t *
482 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
483{
484 TFM_CORE_ASSERT(conn_handle != NULL);
485
486 return &(conn_handle->internal_msg);
487}
488
Edison Ai97115822019-08-01 14:22:19 +0800489void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
490 struct tfm_spm_service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800491 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800492 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800493 psa_invec *invec, size_t in_len,
494 psa_outvec *outvec, size_t out_len,
495 psa_outvec *caller_outvec)
496{
Edison Ai764d41f2018-09-21 15:56:36 +0800497 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800498 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800499
Ken Liuf250b8b2019-12-27 16:31:24 +0800500 TFM_CORE_ASSERT(msg);
501 TFM_CORE_ASSERT(service);
502 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
503 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
504 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
505 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
506 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800507
Edison Ai764d41f2018-09-21 15:56:36 +0800508 /* Clear message buffer before using it */
Summer Qinf24dbb52020-07-23 14:53:54 +0800509 spm_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800510
Ken Liu35f89392019-03-14 14:51:05 +0800511 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800512 msg->magic = TFM_MSG_MAGIC;
513 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800514 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800515 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800516
517 /* Copy contents */
518 msg->msg.type = type;
519
520 for (i = 0; i < in_len; i++) {
521 msg->msg.in_size[i] = invec[i].len;
522 msg->invec[i].base = invec[i].base;
523 }
524
525 for (i = 0; i < out_len; i++) {
526 msg->msg.out_size[i] = outvec[i].len;
527 msg->outvec[i].base = outvec[i].base;
528 /* Out len is used to record the writed number, set 0 here again */
529 msg->outvec[i].len = 0;
530 }
531
Ken Liu505b1702020-05-29 13:19:58 +0800532 /* Use the user connect handle as the message handle */
533 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800534
Ken Liu505b1702020-05-29 13:19:58 +0800535 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800536 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800537 if (conn_handle) {
538 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800539 }
David Hu46603dd2019-12-11 18:05:16 +0800540
541 /* Set the private data of NSPE client caller in multi-core topology */
542 if (TFM_CLIENT_ID_IS_NS(client_id)) {
543 tfm_rpc_set_caller_data(msg, client_id);
544 }
Edison Ai764d41f2018-09-21 15:56:36 +0800545}
546
547int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
548 struct tfm_msg_body_t *msg)
549{
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800550 struct partition_t *partition = service->partition;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800551
Ken Liuf250b8b2019-12-27 16:31:24 +0800552 TFM_CORE_ASSERT(service);
553 TFM_CORE_ASSERT(msg);
Edison Ai764d41f2018-09-21 15:56:36 +0800554
Mingyang Sun73056b62020-07-03 15:18:46 +0800555 /* Add message to partition message list tail */
Ken Liu2c47f7f2021-01-22 11:06:04 +0800556 BI_LIST_INSERT_BEFORE(&partition->msg_list, &msg->msg_node);
Edison Ai764d41f2018-09-21 15:56:36 +0800557
558 /* Messages put. Update signals */
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800559 partition->signals_asserted |= service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800560
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800561 tfm_event_wake(&partition->event,
562 (partition->signals_asserted & partition->signals_waiting));
Edison Ai764d41f2018-09-21 15:56:36 +0800563
David Hufb38d562019-09-23 15:58:34 +0800564 /*
565 * If it is a NS request via RPC, it is unnecessary to block current
566 * thread.
567 */
568 if (!is_tfm_rpc_msg(msg)) {
569 tfm_event_wait(&msg->ack_evnt);
570 }
Edison Ai764d41f2018-09-21 15:56:36 +0800571
Ken Liubcae38b2021-01-20 15:47:44 +0800572 return SPM_SUCCESS;
Edison Ai764d41f2018-09-21 15:56:36 +0800573}
574
Mingyang Sunf3d29892019-07-10 17:50:23 +0800575uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800576{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800577 struct partition_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800578
Kevin Peng79c2bda2020-07-24 16:31:12 +0800579 partition = tfm_spm_get_running_partition();
Mingyang Sun56c59692020-07-20 17:02:19 +0800580 if (partition && partition->p_static) {
581 return partition->p_static->pid;
Kevin Peng79c2bda2020-07-24 16:31:12 +0800582 } else {
583 return INVALID_PARTITION_ID;
584 }
Edison Ai764d41f2018-09-21 15:56:36 +0800585}
586
Summer Qin43c185d2019-10-10 15:44:42 +0800587int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800588 enum tfm_memory_access_e access,
589 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800590{
Mingyang Sund1ed6732020-08-26 15:52:21 +0800591 enum tfm_hal_status_t err;
592 uint32_t attr = 0;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800593
594 /* If len is zero, this indicates an empty buffer and base is ignored */
595 if (len == 0) {
Ken Liubcae38b2021-01-20 15:47:44 +0800596 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800597 }
598
599 if (!buffer) {
Ken Liubcae38b2021-01-20 15:47:44 +0800600 return SPM_ERROR_BAD_PARAMETERS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800601 }
602
603 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
Ken Liubcae38b2021-01-20 15:47:44 +0800604 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800605 }
606
Summer Qin424d4db2019-03-25 14:09:51 +0800607 if (access == TFM_MEMORY_ACCESS_RW) {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800608 attr |= (TFM_HAL_ACCESS_READABLE | TFM_HAL_ACCESS_WRITABLE);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800609 } else {
Mingyang Sund1ed6732020-08-26 15:52:21 +0800610 attr |= TFM_HAL_ACCESS_READABLE;
Summer Qin424d4db2019-03-25 14:09:51 +0800611 }
Mingyang Sund1ed6732020-08-26 15:52:21 +0800612
613 if (privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
614 attr |= TFM_HAL_ACCESS_UNPRIVILEGED;
615 } else {
616 attr &= ~TFM_HAL_ACCESS_UNPRIVILEGED;
617 }
618
619 if (ns_caller) {
620 attr |= TFM_HAL_ACCESS_NS;
621 }
622
623 err = tfm_hal_memory_has_access((uintptr_t)buffer, len, attr);
624
625 if (err == TFM_HAL_SUCCESS) {
Ken Liubcae38b2021-01-20 15:47:44 +0800626 return SPM_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800627 }
628
Ken Liubcae38b2021-01-20 15:47:44 +0800629 return SPM_ERROR_MEMORY_CHECK;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800630}
631
Ken Liuce2692d2020-02-11 12:39:36 +0800632uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800633{
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800634 uint32_t i, j, num;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800635 struct partition_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800636 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Ken Liu172f1e32021-02-05 16:31:03 +0800637 const struct platform_data_t **platform_data_p;
Edison Ai764d41f2018-09-21 15:56:36 +0800638
639 tfm_pool_init(conn_handle_pool,
640 POOL_BUFFER_SIZE(conn_handle_pool),
641 sizeof(struct tfm_conn_handle_t),
642 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800643
644 /* Init partition first for it will be used when init service */
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200645 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Ken Liu3669fd82021-02-08 16:46:22 +0800646
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800647 partition = &g_spm_partition_db.partitions[i];
Edison Aif0501702019-10-11 14:36:42 +0800648
Ken Liu3669fd82021-02-08 16:46:22 +0800649 /* Skip NULL checking on statically reserved arraries. */
650 partition->p_static = &static_data_list[i];
651 partition->memory_data = &memory_data_list[i];
Kevin Peng79c2bda2020-07-24 16:31:12 +0800652
Mingyang Sun56c59692020-07-20 17:02:19 +0800653 if (!(partition->p_static->flags & SPM_PART_FLAG_IPC)) {
Kevin Peng79c2bda2020-07-24 16:31:12 +0800654 tfm_core_panic();
655 }
656
Edison Aif0501702019-10-11 14:36:42 +0800657 /* Check if the PSA framework version matches. */
Kevin Penga876d432021-01-07 16:14:28 +0800658 if (partition->p_static->psa_ff_ver >
Edison Aif0501702019-10-11 14:36:42 +0800659 PSA_FRAMEWORK_VERSION) {
Kevin Penga876d432021-01-07 16:14:28 +0800660 ERROR_MSG("Warning: Partition requires higher framework version!");
Edison Aif0501702019-10-11 14:36:42 +0800661 continue;
662 }
663
Ken Liu172f1e32021-02-05 16:31:03 +0800664 platform_data_p =
665 (const struct platform_data_t **)partition->p_static->platform_data;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100666 if (platform_data_p != NULL) {
667 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +0800668 if (tfm_spm_hal_configure_default_isolation(i,
669 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
670 tfm_core_panic();
671 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100672 ++platform_data_p;
673 }
674 }
675
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800676 /* Add PSA_DOORBELL signal to assigned_signals */
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800677 partition->signals_allowed |= PSA_DOORBELL;
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800678
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800679 /* TODO: This can be optimized by generating the assigned signal
680 * in code generation time.
681 */
682 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
683 if (tfm_core_irq_signals[j].partition_id ==
Mingyang Sun56c59692020-07-20 17:02:19 +0800684 partition->p_static->pid) {
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800685 partition->signals_allowed |=
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800686 tfm_core_irq_signals[j].signal_value;
Kevin Pengc6976262021-01-11 15:52:55 +0800687 if (partition->p_static->psa_ff_ver == 0x0100) {
688 tfm_spm_hal_enable_irq(tfm_core_irq_signals[j].irq_line);
689 } else if (partition->p_static->psa_ff_ver == 0x0101) {
690 tfm_spm_hal_disable_irq(tfm_core_irq_signals[j].irq_line);
691 }
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800692 }
693 }
694
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800695 tfm_event_init(&partition->event);
Ken Liu2c47f7f2021-01-22 11:06:04 +0800696 BI_LIST_INIT_NODE(&partition->msg_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800697
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800698 pth = &partition->sp_thread;
Edison Ai764d41f2018-09-21 15:56:36 +0800699 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800700 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800701 }
702
Summer Qin66f1e032020-01-06 15:40:03 +0800703 tfm_core_thrd_init(pth,
Mingyang Sun56c59692020-07-20 17:02:19 +0800704 (tfm_core_thrd_entry_t)partition->p_static->entry,
Summer Qin66f1e032020-01-06 15:40:03 +0800705 NULL,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800706 (uintptr_t)partition->memory_data->stack_top,
707 (uintptr_t)partition->memory_data->stack_bottom);
Edison Ai788bae22019-02-18 17:38:59 +0800708
Mingyang Sun56c59692020-07-20 17:02:19 +0800709 pth->prior = partition->p_static->priority;
Edison Ai764d41f2018-09-21 15:56:36 +0800710
Mingyang Sun56c59692020-07-20 17:02:19 +0800711 if (partition->p_static->pid == TFM_SP_NON_SECURE_ID) {
Ken Liu490281d2019-12-30 15:55:26 +0800712 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800713 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800714 }
715
Edison Ai764d41f2018-09-21 15:56:36 +0800716 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800717 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800718 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800719 }
720 }
721
722 /* Init Service */
Summer Qind99509f2019-08-02 17:36:58 +0800723 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
Edison Ai764d41f2018-09-21 15:56:36 +0800724 for (i = 0; i < num; i++) {
Summer Qine578c5b2019-08-16 16:42:16 +0800725 service[i].service_db = &service_db[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800726 partition =
Summer Qine578c5b2019-08-16 16:42:16 +0800727 tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800728 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800729 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800730 }
Summer Qind99509f2019-08-02 17:36:58 +0800731 service[i].partition = partition;
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800732 partition->signals_allowed |= service[i].service_db->signal;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800733
Ken Liu2c47f7f2021-01-22 11:06:04 +0800734 BI_LIST_INIT_NODE(&service[i].handle_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800735 }
736
Ken Liu483f5da2019-04-24 10:45:21 +0800737 /*
738 * All threads initialized, start the scheduler.
739 *
740 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800741 * It is worthy to give the thread object to scheduler if the background
742 * context belongs to one of the threads. Here the background thread is the
743 * initialization thread who calls SPM SVC, which re-uses the non-secure
744 * entry thread's stack. After SPM initialization is done, this stack is
745 * cleaned up and the background context is never going to return. Tell
746 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800747 */
Summer Qin66f1e032020-01-06 15:40:03 +0800748 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800749
Summer Qind2ad7e72020-01-06 18:16:35 +0800750 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800751}
Ken Liu2d175172019-03-21 17:08:41 +0800752
Summer Qind2ad7e72020-01-06 18:16:35 +0800753void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800754{
Kevin Peng25b190b2020-10-30 17:10:45 +0800755#if TFM_LVL != 1
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800756 struct partition_t *p_next_partition;
Ken Liu2d175172019-03-21 17:08:41 +0800757 uint32_t is_privileged;
758#endif
Kevin Peng82fbca52021-03-09 13:48:48 +0800759 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next();
760 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr();
Ken Liu2d175172019-03-21 17:08:41 +0800761
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200762 if (pth_next != NULL && pth_curr != pth_next) {
Kevin Peng25b190b2020-10-30 17:10:45 +0800763#if TFM_LVL != 1
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800764 p_next_partition = TFM_GET_CONTAINER_PTR(pth_next,
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800765 struct partition_t,
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800766 sp_thread);
Ken Liu2d175172019-03-21 17:08:41 +0800767
Mingyang Sun56c59692020-07-20 17:02:19 +0800768 if (p_next_partition->p_static->flags & SPM_PART_FLAG_PSA_ROT) {
Ken Liu2d175172019-03-21 17:08:41 +0800769 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
770 } else {
771 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
772 }
773
774 tfm_spm_partition_change_privilege(is_privileged);
Kevin Peng25b190b2020-10-30 17:10:45 +0800775#if TFM_LVL == 3
776 /*
777 * FIXME: To implement isolations among partitions in isolation level 3,
778 * each partition needs to run in unprivileged mode. Currently some
779 * PRoTs cannot work in unprivileged mode, make them privileged now.
780 */
781 if (is_privileged == TFM_PARTITION_UNPRIVILEGED_MODE) {
782 /* FIXME: only MPU-based implementations are supported currently */
783 if (tfm_hal_mpu_update_partition_boundary(
784 p_next_partition->memory_data->data_start,
785 p_next_partition->memory_data->data_limit)
786 != TFM_HAL_SUCCESS) {
787 tfm_core_panic();
788 }
789 }
790#endif /* TFM_LVL == 3 */
791#endif /* TFM_LVL != 1 */
Mate Toth-Palc430b992019-05-09 21:01:14 +0200792
Summer Qind2ad7e72020-01-06 18:16:35 +0800793 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800794 }
David Hufb38d562019-09-23 15:58:34 +0800795
796 /*
797 * Handle pending mailbox message from NS in multi-core topology.
798 * Empty operation on single Armv8-M platform.
799 */
800 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800801}
Mingyang Sund44522a2020-01-16 16:48:37 +0800802
Summer Qin02f7f072020-08-24 16:02:54 +0800803void update_caller_outvec_len(struct tfm_msg_body_t *msg)
Mingyang Sund44522a2020-01-16 16:48:37 +0800804{
805 uint32_t i;
806
807 /*
808 * FixeMe: abstract these part into dedicated functions to avoid
809 * accessing thread context in psa layer
810 */
811 /* If it is a NS request via RPC, the owner of this message is not set */
812 if (!is_tfm_rpc_msg(msg)) {
813 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
814 }
815
816 for (i = 0; i < PSA_MAX_IOVEC; i++) {
817 if (msg->msg.out_size[i] == 0) {
818 continue;
819 }
820
821 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
822
823 msg->caller_outvec[i].len = msg->outvec[i].len;
824 }
825}
826
Summer Qin02f7f072020-08-24 16:02:54 +0800827void notify_with_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800828{
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800829 struct partition_t *partition = NULL;
Mingyang Sund44522a2020-01-16 16:48:37 +0800830
831 /*
832 * The value of partition_id must be greater than zero as the target of
833 * notification must be a Secure Partition, providing a Non-secure
834 * Partition ID is a fatal error.
835 */
836 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
837 tfm_core_panic();
838 }
839
840 /*
841 * It is a fatal error if partition_id does not correspond to a Secure
842 * Partition.
843 */
844 partition = tfm_spm_get_partition_by_id(partition_id);
845 if (!partition) {
846 tfm_core_panic();
847 }
848
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800849 partition->signals_asserted |= signal;
Mingyang Sund44522a2020-01-16 16:48:37 +0800850
851 /*
852 * The target partition may be blocked with waiting for signals after
853 * called psa_wait(). Set the return value with the available signals
854 * before wake it up with tfm_event_signal().
855 */
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800856 tfm_event_wake(&partition->event,
857 partition->signals_asserted & partition->signals_waiting);
Mingyang Sund44522a2020-01-16 16:48:37 +0800858}
859
Mingyang Sund44522a2020-01-16 16:48:37 +0800860/**
Kevin Pengdc791882021-03-12 10:57:12 +0800861 * \brief Sets signal to partition for Second-Level Interrupt Handling mode IRQ
Mingyang Sund44522a2020-01-16 16:48:37 +0800862 *
863 * \param[in] partition_id The ID of the partition which handles this IRQ
864 * \param[in] signal The signal associated with this IRQ
865 * \param[in] irq_line The number of the IRQ line
866 *
867 * \retval void Success.
868 * \retval "Does not return" Partition ID is invalid
869 */
Kevin Pengdc791882021-03-12 10:57:12 +0800870void tfm_set_irq_signal(uint32_t partition_id, psa_signal_t signal,
871 uint32_t irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +0800872{
Kevin Pengdc791882021-03-12 10:57:12 +0800873 __disable_irq();
874
Mingyang Sund44522a2020-01-16 16:48:37 +0800875 tfm_spm_hal_disable_irq(irq_line);
876 notify_with_signal(partition_id, signal);
Kevin Pengdc791882021-03-12 10:57:12 +0800877
878 __enable_irq();
Mingyang Sund44522a2020-01-16 16:48:37 +0800879}
880
Kevin Penga20b5af2021-01-11 11:20:52 +0800881int32_t get_irq_line_for_signal(int32_t partition_id, psa_signal_t signal)
Mingyang Sund44522a2020-01-16 16:48:37 +0800882{
883 size_t i;
884
Ken Liu24dffb22021-02-10 11:03:58 +0800885 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
Kevin Peng410bee52021-01-13 16:27:17 +0800886 return -1;
887 }
888
Mingyang Sund44522a2020-01-16 16:48:37 +0800889 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
890 if (tfm_core_irq_signals[i].partition_id == partition_id &&
891 tfm_core_irq_signals[i].signal_value == signal) {
Kevin Penga20b5af2021-01-11 11:20:52 +0800892 return tfm_core_irq_signals[i].irq_line;
Mingyang Sund44522a2020-01-16 16:48:37 +0800893 }
894 }
Kevin Penga20b5af2021-01-11 11:20:52 +0800895
Ken Liubcae38b2021-01-20 15:47:44 +0800896 return SPM_ERROR_GENERIC;
Mingyang Sund44522a2020-01-16 16:48:37 +0800897}
898
Summer Qindea1f2c2021-01-11 14:46:34 +0800899#if !defined(__ARM_ARCH_8_1M_MAIN__)
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800900void tfm_spm_validate_caller(struct partition_t *p_cur_sp, uint32_t *p_ctx,
901 uint32_t exc_return, bool ns_caller)
Mingyang Sund44522a2020-01-16 16:48:37 +0800902{
903 uintptr_t stacked_ctx_pos;
904
905 if (ns_caller) {
906 /*
907 * The background IRQ can't be supported, since if SP is executing,
908 * the preempted context of SP can be different with the one who
909 * preempts veneer.
910 */
Mingyang Sun56c59692020-07-20 17:02:19 +0800911 if (p_cur_sp->p_static->pid != TFM_SP_NON_SECURE_ID) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800912 tfm_core_panic();
913 }
914
915 /*
916 * It is non-secure caller, check if veneer stack contains
917 * multiple contexts.
918 */
919 stacked_ctx_pos = (uintptr_t)p_ctx +
920 sizeof(struct tfm_state_context_t) +
Ken Liu05e13ba2020-07-25 10:31:33 +0800921 TFM_STACK_SEALED_SIZE;
Mingyang Sund44522a2020-01-16 16:48:37 +0800922
923 if (is_stack_alloc_fp_space(exc_return)) {
924#if defined (__FPU_USED) && (__FPU_USED == 1U)
925 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
926 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
927 sizeof(uint32_t);
928 }
929#endif
930 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
931 }
932
Mingyang Sunaf22ffa2020-07-09 17:48:37 +0800933 if (stacked_ctx_pos != p_cur_sp->sp_thread.stk_top) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800934 tfm_core_panic();
935 }
Mingyang Sun56c59692020-07-20 17:02:19 +0800936 } else if (p_cur_sp->p_static->pid <= 0) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800937 tfm_core_panic();
938 }
939}
Summer Qindea1f2c2021-01-11 14:46:34 +0800940#endif
Summer Qin830c5542020-02-14 13:44:20 +0800941
942void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
943{
944 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
945 uint32_t running_partition_flags = 0;
Mingyang Sunae70d8d2020-06-30 15:56:05 +0800946 const struct partition_t *partition = NULL;
Summer Qin830c5542020-02-14 13:44:20 +0800947
948 /* Check permissions on request type basis */
949
950 switch (svc_ctx->r0) {
951 case TFM_SPM_REQUEST_RESET_VOTE:
952 partition = tfm_spm_get_running_partition();
953 if (!partition) {
954 tfm_core_panic();
955 }
Mingyang Sun56c59692020-07-20 17:02:19 +0800956 running_partition_flags = partition->p_static->flags;
Summer Qin830c5542020-02-14 13:44:20 +0800957
958 /* Currently only PSA Root of Trust services are allowed to make Reset
959 * vote request
960 */
961 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
962 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
963 }
964
965 /* FixMe: this is a placeholder for checks to be performed before
966 * allowing execution of reset
967 */
968 *res_ptr = (uint32_t)TFM_SUCCESS;
969
970 break;
971 default:
972 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
973 }
974}