blob: ea3fa36ad28ba38c13e82d20d39c05fde85642a6 [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Ken Liu5248af22019-12-29 12:47:13 +08002 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Jamie Foxcc31d402019-01-28 17:13:52 +000010#include "psa/client.h"
11#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080012#include "psa/lifecycle.h"
13#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080014#include "tfm_wait.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080015#include "tfm_utils.h"
16#include "tfm_internal_defines.h"
Edison Ai764d41f2018-09-21 15:56:36 +080017#include "tfm_message_queue.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_spm_hal.h"
19#include "tfm_irq_list.h"
20#include "tfm_api.h"
21#include "tfm_secure_api.h"
22#include "tfm_memory_utils.h"
23#include "spm_api.h"
24#include "tfm_peripherals_def.h"
25#include "spm_db.h"
26#include "tfm_core_utils.h"
27#include "spm_psa_client_call.h"
28#include "tfm_rpc.h"
29#include "tfm_internal.h"
30#include "tfm_core_trustzone.h"
31#include "tfm_core_mem_check.h"
Edison Ai764d41f2018-09-21 15:56:36 +080032#include "tfm_list.h"
33#include "tfm_pools.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080034#include "region_defs.h"
Summer Qin830c5542020-02-14 13:44:20 +080035#include "tfm_spm_services_api.h"
Edison Ai764d41f2018-09-21 15:56:36 +080036
Summer Qind99509f2019-08-02 17:36:58 +080037#include "secure_fw/services/tfm_service_list.inc"
38
39/* Extern service variable */
40extern struct tfm_spm_service_t service[];
Summer Qine578c5b2019-08-16 16:42:16 +080041extern const struct tfm_spm_service_db_t service_db[];
Summer Qind99509f2019-08-02 17:36:58 +080042
Edison Ai764d41f2018-09-21 15:56:36 +080043/* Extern SPM variable */
44extern struct spm_partition_db_t g_spm_partition_db;
45
46/* Pools */
47TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
48 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080049
Mingyang Sund44522a2020-01-16 16:48:37 +080050void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +010051 IRQn_Type irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080052
53#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080054
55/* Service handle management functions */
Summer Qin1ce712a2019-10-14 18:04:05 +080056psa_handle_t tfm_spm_create_conn_handle(struct tfm_spm_service_t *service,
57 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +080058{
Edison Ai9cc26242019-08-06 11:28:04 +080059 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +080060
Ken Liuf250b8b2019-12-27 16:31:24 +080061 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +080062
63 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +080064 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
65 if (!p_handle) {
Edison Ai764d41f2018-09-21 15:56:36 +080066 return PSA_NULL_HANDLE;
67 }
68
Edison Ai9cc26242019-08-06 11:28:04 +080069 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +080070 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +080071 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +080072
73 /* Add handle node to list for next psa functions */
Edison Ai9cc26242019-08-06 11:28:04 +080074 tfm_list_add_tail(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +080075
Edison Ai9cc26242019-08-06 11:28:04 +080076 return (psa_handle_t)p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +080077}
78
Summer Qin1ce712a2019-10-14 18:04:05 +080079int32_t tfm_spm_validate_conn_handle(psa_handle_t conn_handle,
80 int32_t client_id)
81{
82 /* Check the handle address is validated */
83 if (is_valid_chunk_data_in_pool(conn_handle_pool,
84 (uint8_t *)conn_handle) != true) {
85 return IPC_ERROR_GENERIC;
86 }
87
88 /* Check the handle caller is correct */
89 if (((struct tfm_conn_handle_t *)conn_handle)->client_id != client_id) {
90 return IPC_ERROR_GENERIC;
91 }
92
93 return IPC_SUCCESS;
94}
95
Edison Ai764d41f2018-09-21 15:56:36 +080096static struct tfm_conn_handle_t *
Mingyang Sun5e13aa72019-07-10 10:30:16 +080097 tfm_spm_find_conn_handle_node(struct tfm_spm_service_t *service,
98 psa_handle_t conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +080099{
Ken Liuf250b8b2019-12-27 16:31:24 +0800100 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800101
Edison Ai9cc26242019-08-06 11:28:04 +0800102 return (struct tfm_conn_handle_t *)conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800103}
104
Mingyang Sund44522a2020-01-16 16:48:37 +0800105/**
106 * \brief Free connection handle which not used anymore.
107 *
108 * \param[in] service Target service context pointer
109 * \param[in] conn_handle Connection handle created by
110 * tfm_spm_create_conn_handle(), \ref psa_handle_t
111 *
112 * \retval IPC_SUCCESS Success
113 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
114 * \retval "Does not return" Panic for not find service by handle
115 */
116static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
117 psa_handle_t conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800118{
Edison Ai9cc26242019-08-06 11:28:04 +0800119 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800120
Ken Liuf250b8b2019-12-27 16:31:24 +0800121 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800122
123 /* There are many handles for each RoT Service */
Edison Ai9cc26242019-08-06 11:28:04 +0800124 p_handle = tfm_spm_find_conn_handle_node(service, conn_handle);
125 if (!p_handle) {
Edison Ai9059ea02019-11-28 13:46:14 +0800126 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800127 }
128
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200129 /* Clear magic as the handler is not used anymore */
130 p_handle->internal_msg.magic = 0;
131
Edison Ai764d41f2018-09-21 15:56:36 +0800132 /* Remove node from handle list */
Edison Ai9cc26242019-08-06 11:28:04 +0800133 tfm_list_del_node(&p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800134
135 /* Back handle buffer to pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800136 tfm_pool_free(p_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800137 return IPC_SUCCESS;
138}
139
Mingyang Sund44522a2020-01-16 16:48:37 +0800140/**
141 * \brief Set reverse handle value for connection.
142 *
143 * \param[in] service Target service context pointer
144 * \param[in] conn_handle Connection handle created by
145 * tfm_spm_create_conn_handle(), \ref psa_handle_t
146 * \param[in] rhandle rhandle need to save
147 *
148 * \retval IPC_SUCCESS Success
149 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
150 * \retval "Does not return" Panic for not find handle node
151 */
152static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
153 psa_handle_t conn_handle,
154 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800155{
Edison Ai9cc26242019-08-06 11:28:04 +0800156 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800157
Ken Liuf250b8b2019-12-27 16:31:24 +0800158 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800159 /* Set reverse handle value only be allowed for a connected handle */
Ken Liuf250b8b2019-12-27 16:31:24 +0800160 TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE);
Edison Ai764d41f2018-09-21 15:56:36 +0800161
162 /* There are many handles for each RoT Service */
Edison Ai9cc26242019-08-06 11:28:04 +0800163 p_handle = tfm_spm_find_conn_handle_node(service, conn_handle);
164 if (!p_handle) {
Edison Ai9059ea02019-11-28 13:46:14 +0800165 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800166 }
167
Edison Ai9cc26242019-08-06 11:28:04 +0800168 p_handle->rhandle = rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800169 return IPC_SUCCESS;
170}
171
Mingyang Sund44522a2020-01-16 16:48:37 +0800172/**
173 * \brief Get reverse handle value from connection hanlde.
174 *
175 * \param[in] service Target service context pointer
176 * \param[in] conn_handle Connection handle created by
177 * tfm_spm_create_conn_handle(), \ref psa_handle_t
178 *
179 * \retval void * Success
180 * \retval "Does not return" Panic for those:
181 * service pointer are NULL
182 * hanlde is \ref PSA_NULL_HANDLE
183 * handle node does not be found
184 */
185static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
186 psa_handle_t conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800187{
Edison Ai9cc26242019-08-06 11:28:04 +0800188 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800189
Ken Liuf250b8b2019-12-27 16:31:24 +0800190 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800191 /* Get reverse handle value only be allowed for a connected handle */
Ken Liuf250b8b2019-12-27 16:31:24 +0800192 TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE);
Edison Ai764d41f2018-09-21 15:56:36 +0800193
194 /* There are many handles for each RoT Service */
Edison Ai9cc26242019-08-06 11:28:04 +0800195 p_handle = tfm_spm_find_conn_handle_node(service, conn_handle);
196 if (!p_handle) {
Edison Ai9059ea02019-11-28 13:46:14 +0800197 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800198 }
199
Edison Ai9cc26242019-08-06 11:28:04 +0800200 return p_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800201}
202
203/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800204
205/**
206 * \brief Get the service context by signal.
207 *
208 * \param[in] partition Partition context pointer
209 * \ref spm_partition_desc_t structures
210 * \param[in] signal Signal associated with inputs to the Secure
211 * Partition, \ref psa_signal_t
212 *
213 * \retval NULL Failed
214 * \retval "Not NULL" Target service context pointer,
215 * \ref tfm_spm_service_t structures
216 */
217static struct tfm_spm_service_t *
Mingyang Sunf3d29892019-07-10 17:50:23 +0800218 tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition,
219 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800220{
221 struct tfm_list_node_t *node, *head;
222 struct tfm_spm_service_t *service;
223
Ken Liuf250b8b2019-12-27 16:31:24 +0800224 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800225
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800226 if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
Edison Ai9059ea02019-11-28 13:46:14 +0800227 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800228 }
229
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800230 head = &partition->runtime_data.service_list;
Edison Ai764d41f2018-09-21 15:56:36 +0800231 TFM_LIST_FOR_EACH(node, head) {
232 service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list);
Summer Qine578c5b2019-08-16 16:42:16 +0800233 if (service->service_db->signal == signal) {
Edison Ai764d41f2018-09-21 15:56:36 +0800234 return service;
235 }
236 }
237 return NULL;
238}
239
240struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
241{
242 uint32_t i;
243 struct tfm_list_node_t *node, *head;
244 struct tfm_spm_service_t *service;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800245 struct spm_partition_desc_t *partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800246
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200247 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800248 partition = &g_spm_partition_db.partitions[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800249 /* Skip partition without IPC flag */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800250 if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) {
Edison Ai764d41f2018-09-21 15:56:36 +0800251 continue;
252 }
253
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800254 if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
Edison Ai764d41f2018-09-21 15:56:36 +0800255 continue;
256 }
257
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800258 head = &partition->runtime_data.service_list;
Edison Ai764d41f2018-09-21 15:56:36 +0800259 TFM_LIST_FOR_EACH(node, head) {
260 service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t,
261 list);
Summer Qine578c5b2019-08-16 16:42:16 +0800262 if (service->service_db->sid == sid) {
Edison Ai764d41f2018-09-21 15:56:36 +0800263 return service;
264 }
265 }
266 }
267 return NULL;
268}
269
270struct tfm_spm_service_t *
271 tfm_spm_get_service_by_handle(psa_handle_t conn_handle)
272{
Edison Ai9cc26242019-08-06 11:28:04 +0800273 return ((struct tfm_conn_handle_t *)conn_handle)->service;
Edison Ai764d41f2018-09-21 15:56:36 +0800274}
275
Mingyang Sund44522a2020-01-16 16:48:37 +0800276/**
277 * \brief Get the partition context by partition ID.
278 *
279 * \param[in] partition_id Partition identity
280 *
281 * \retval NULL Failed
282 * \retval "Not NULL" Target partition context pointer,
283 * \ref spm_partition_desc_t structures
284 */
285static struct spm_partition_desc_t *
286 tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800287{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800288 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800289
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800290 if (idx != SPM_INVALID_PARTITION_IDX) {
291 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800292 }
293 return NULL;
294}
295
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800296struct spm_partition_desc_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800297{
298 uint32_t spid;
299
Mingyang Sunf3d29892019-07-10 17:50:23 +0800300 spid = tfm_spm_partition_get_running_partition_id();
Edison Ai764d41f2018-09-21 15:56:36 +0800301
302 return tfm_spm_get_partition_by_id(spid);
303}
304
305int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530306 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800307{
Ken Liuf250b8b2019-12-27 16:31:24 +0800308 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800309
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530310 switch (service->service_db->version_policy) {
Edison Ai764d41f2018-09-21 15:56:36 +0800311 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530312 if (version > service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800313 return IPC_ERROR_VERSION;
314 }
315 break;
316 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530317 if (version != service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800318 return IPC_ERROR_VERSION;
319 }
320 break;
321 default:
322 return IPC_ERROR_VERSION;
323 }
324 return IPC_SUCCESS;
325}
326
Edison Aie728fbf2019-11-13 09:37:12 +0800327int32_t tfm_spm_check_authorization(uint32_t sid,
328 struct tfm_spm_service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800329 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800330{
331 struct spm_partition_desc_t *partition = NULL;
332 int32_t i;
333
Ken Liuf250b8b2019-12-27 16:31:24 +0800334 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800335
336 if (ns_caller) {
337 if (!service->service_db->non_secure_client) {
338 return IPC_ERROR_GENERIC;
339 }
340 } else {
341 partition = tfm_spm_get_running_partition();
342 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800343 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800344 }
345
346 for (i = 0; i < partition->static_data->dependencies_num; i++) {
347 if (partition->static_data->p_dependencies[i] == sid) {
348 break;
349 }
350 }
351
352 if (i == partition->static_data->dependencies_num) {
353 return IPC_ERROR_GENERIC;
354 }
355 }
356 return IPC_SUCCESS;
357}
358
Edison Ai764d41f2018-09-21 15:56:36 +0800359/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800360
361/**
362 * \brief Get message context by message handle.
363 *
364 * \param[in] msg_handle Message handle which is a reference generated
365 * by the SPM to a specific message.
366 *
367 * \return The message body context pointer
368 * \ref tfm_msg_body_t structures
369 */
370static struct tfm_msg_body_t *
371 tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800372{
373 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200374 * The message handler passed by the caller is considered invalid in the
375 * following cases:
376 * 1. Not a valid message handle. (The address of a message is not the
377 * address of a possible handle from the pool
378 * 2. Handle not belongs to the caller partition (The handle is either
379 * unused, or owned by anither partition)
380 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800381 */
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200382 struct tfm_conn_handle_t *connection_handle_address;
Edison Ai764d41f2018-09-21 15:56:36 +0800383 struct tfm_msg_body_t *msg;
384 uint32_t partition_id;
385
386 msg = (struct tfm_msg_body_t *)msg_handle;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200387
388 connection_handle_address =
389 TFM_GET_CONTAINER_PTR(msg, struct tfm_conn_handle_t, internal_msg);
390
391 if (is_valid_chunk_data_in_pool(
392 conn_handle_pool, (uint8_t *)connection_handle_address) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800393 return NULL;
394 }
395
396 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200397 * Check that the magic number is correct. This proves that the message
398 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800399 */
400 if (msg->magic != TFM_MSG_MAGIC) {
401 return NULL;
402 }
403
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200404 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800405 partition_id = tfm_spm_partition_get_running_partition_id();
Summer Qin423dbef2019-08-22 15:59:35 +0800406 if (partition_id != msg->service->partition->static_data->partition_id) {
Edison Ai764d41f2018-09-21 15:56:36 +0800407 return NULL;
408 }
409
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200410 /*
411 * FixMe: For condition 1 it should be checked whether the message belongs
412 * to the service. Skipping this check isn't a security risk as even if the
413 * message belongs to another service, the handle belongs to the calling
414 * partition.
415 */
416
Edison Ai764d41f2018-09-21 15:56:36 +0800417 return msg;
418}
419
Edison Ai97115822019-08-01 14:22:19 +0800420struct tfm_msg_body_t *
421 tfm_spm_get_msg_buffer_from_conn_handle(psa_handle_t conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800422{
Ken Liuf250b8b2019-12-27 16:31:24 +0800423 TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE);
Edison Ai97115822019-08-01 14:22:19 +0800424
425 return &(((struct tfm_conn_handle_t *)conn_handle)->internal_msg);
426}
427
428void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
429 struct tfm_spm_service_t *service,
430 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800431 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800432 psa_invec *invec, size_t in_len,
433 psa_outvec *outvec, size_t out_len,
434 psa_outvec *caller_outvec)
435{
Edison Ai764d41f2018-09-21 15:56:36 +0800436 uint32_t i;
437
Ken Liuf250b8b2019-12-27 16:31:24 +0800438 TFM_CORE_ASSERT(msg);
439 TFM_CORE_ASSERT(service);
440 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
441 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
442 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
443 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
444 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800445
Edison Ai764d41f2018-09-21 15:56:36 +0800446 /* Clear message buffer before using it */
Mingyang Sun94b1b412019-09-20 15:11:14 +0800447 tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800448
Ken Liu35f89392019-03-14 14:51:05 +0800449 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800450 msg->magic = TFM_MSG_MAGIC;
451 msg->service = service;
452 msg->handle = handle;
453 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800454 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800455
456 /* Copy contents */
457 msg->msg.type = type;
458
459 for (i = 0; i < in_len; i++) {
460 msg->msg.in_size[i] = invec[i].len;
461 msg->invec[i].base = invec[i].base;
462 }
463
464 for (i = 0; i < out_len; i++) {
465 msg->msg.out_size[i] = outvec[i].len;
466 msg->outvec[i].base = outvec[i].base;
467 /* Out len is used to record the writed number, set 0 here again */
468 msg->outvec[i].len = 0;
469 }
470
471 /* Use message address as handle */
472 msg->msg.handle = (psa_handle_t)msg;
473
474 /* For connected handle, set rhandle to every message */
475 if (handle != PSA_NULL_HANDLE) {
476 msg->msg.rhandle = tfm_spm_get_rhandle(service, handle);
477 }
David Hu46603dd2019-12-11 18:05:16 +0800478
479 /* Set the private data of NSPE client caller in multi-core topology */
480 if (TFM_CLIENT_ID_IS_NS(client_id)) {
481 tfm_rpc_set_caller_data(msg, client_id);
482 }
Edison Ai764d41f2018-09-21 15:56:36 +0800483}
484
485int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
486 struct tfm_msg_body_t *msg)
487{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800488 struct spm_partition_runtime_data_t *p_runtime_data =
489 &service->partition->runtime_data;
490
Ken Liuf250b8b2019-12-27 16:31:24 +0800491 TFM_CORE_ASSERT(service);
492 TFM_CORE_ASSERT(msg);
Edison Ai764d41f2018-09-21 15:56:36 +0800493
494 /* Enqueue message to service message queue */
495 if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) {
496 return IPC_ERROR_GENERIC;
497 }
498
499 /* Messages put. Update signals */
Summer Qine578c5b2019-08-16 16:42:16 +0800500 p_runtime_data->signals |= service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800501
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800502 tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals &
503 p_runtime_data->signal_mask));
Edison Ai764d41f2018-09-21 15:56:36 +0800504
David Hufb38d562019-09-23 15:58:34 +0800505 /*
506 * If it is a NS request via RPC, it is unnecessary to block current
507 * thread.
508 */
509 if (!is_tfm_rpc_msg(msg)) {
510 tfm_event_wait(&msg->ack_evnt);
511 }
Edison Ai764d41f2018-09-21 15:56:36 +0800512
513 return IPC_SUCCESS;
514}
515
Mingyang Sund44522a2020-01-16 16:48:37 +0800516/**
517 * \brief Get bottom of stack region for a partition
518 *
519 * \param[in] partition_idx Partition index
520 *
521 * \return Stack region bottom value
522 *
523 * \note This function doesn't check if partition_idx is valid.
524 */
525static uint32_t tfm_spm_partition_get_stack_bottom(uint32_t partition_idx)
Edison Ai7aff9e82019-07-11 14:56:46 +0800526{
527 return g_spm_partition_db.partitions[partition_idx].
Summer Qin423dbef2019-08-22 15:59:35 +0800528 memory_data->stack_bottom;
Edison Ai7aff9e82019-07-11 14:56:46 +0800529}
530
Mingyang Sund44522a2020-01-16 16:48:37 +0800531/**
532 * \brief Get top of stack region for a partition
533 *
534 * \param[in] partition_idx Partition index
535 *
536 * \return Stack region top value
537 *
538 * \note This function doesn't check if partition_idx is valid.
539 */
540static uint32_t tfm_spm_partition_get_stack_top(uint32_t partition_idx)
Edison Ai7aff9e82019-07-11 14:56:46 +0800541{
Summer Qin423dbef2019-08-22 15:59:35 +0800542 return g_spm_partition_db.partitions[partition_idx].memory_data->stack_top;
Edison Ai7aff9e82019-07-11 14:56:46 +0800543}
544
Mingyang Sunf3d29892019-07-10 17:50:23 +0800545uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800546{
Summer Qin66f1e032020-01-06 15:40:03 +0800547 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread();
Edison Ai764d41f2018-09-21 15:56:36 +0800548 struct spm_partition_desc_t *partition;
Summer Qinb5da9cc2019-08-26 15:19:45 +0800549 struct spm_partition_runtime_data_t *r_data;
Edison Ai764d41f2018-09-21 15:56:36 +0800550
Summer Qinb5da9cc2019-08-26 15:19:45 +0800551 r_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t,
552 sp_thrd);
553 partition = TFM_GET_CONTAINER_PTR(r_data, struct spm_partition_desc_t,
554 runtime_data);
Summer Qin423dbef2019-08-22 15:59:35 +0800555 return partition->static_data->partition_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800556}
557
Summer Qin66f1e032020-01-06 15:40:03 +0800558static struct tfm_core_thread_t *
Mingyang Sunf3d29892019-07-10 17:50:23 +0800559 tfm_spm_partition_get_thread_info(uint32_t partition_idx)
Edison Ai764d41f2018-09-21 15:56:36 +0800560{
Summer Qinb5da9cc2019-08-26 15:19:45 +0800561 return &g_spm_partition_db.partitions[partition_idx].runtime_data.sp_thrd;
Edison Ai764d41f2018-09-21 15:56:36 +0800562}
563
Summer Qin66f1e032020-01-06 15:40:03 +0800564static tfm_core_thrd_entry_t
Mingyang Sunf3d29892019-07-10 17:50:23 +0800565 tfm_spm_partition_get_init_func(uint32_t partition_idx)
Edison Ai764d41f2018-09-21 15:56:36 +0800566{
Summer Qin66f1e032020-01-06 15:40:03 +0800567 return (tfm_core_thrd_entry_t)(g_spm_partition_db.partitions[partition_idx].
Summer Qin423dbef2019-08-22 15:59:35 +0800568 static_data->partition_init);
Edison Ai764d41f2018-09-21 15:56:36 +0800569}
570
Mingyang Sunf3d29892019-07-10 17:50:23 +0800571static uint32_t tfm_spm_partition_get_priority(uint32_t partition_idx)
Edison Ai764d41f2018-09-21 15:56:36 +0800572{
Summer Qin423dbef2019-08-22 15:59:35 +0800573 return g_spm_partition_db.partitions[partition_idx].static_data->
Edison Ai764d41f2018-09-21 15:56:36 +0800574 partition_priority;
575}
576
Summer Qin43c185d2019-10-10 15:44:42 +0800577int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800578 enum tfm_memory_access_e access,
579 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800580{
Hugues de Valon99578562019-06-18 16:08:51 +0100581 enum tfm_status_e err;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800582
583 /* If len is zero, this indicates an empty buffer and base is ignored */
584 if (len == 0) {
585 return IPC_SUCCESS;
586 }
587
588 if (!buffer) {
589 return IPC_ERROR_BAD_PARAMETERS;
590 }
591
592 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
593 return IPC_ERROR_MEMORY_CHECK;
594 }
595
Summer Qin424d4db2019-03-25 14:09:51 +0800596 if (access == TFM_MEMORY_ACCESS_RW) {
Summer Qineb537e52019-03-29 09:57:10 +0800597 err = tfm_core_has_write_access_to_region(buffer, len, ns_caller,
598 privileged);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800599 } else {
Summer Qineb537e52019-03-29 09:57:10 +0800600 err = tfm_core_has_read_access_to_region(buffer, len, ns_caller,
601 privileged);
Summer Qin424d4db2019-03-25 14:09:51 +0800602 }
Summer Qin0fc3f592019-04-11 16:00:10 +0800603 if (err == TFM_SUCCESS) {
Summer Qin424d4db2019-03-25 14:09:51 +0800604 return IPC_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800605 }
606
607 return IPC_ERROR_MEMORY_CHECK;
608}
609
Ken Liuce2692d2020-02-11 12:39:36 +0800610uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800611{
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800612 uint32_t i, j, num;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800613 struct spm_partition_desc_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800614 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100615 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Edison Ai764d41f2018-09-21 15:56:36 +0800616
617 tfm_pool_init(conn_handle_pool,
618 POOL_BUFFER_SIZE(conn_handle_pool),
619 sizeof(struct tfm_conn_handle_t),
620 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800621
622 /* Init partition first for it will be used when init service */
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200623 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800624 partition = &g_spm_partition_db.partitions[i];
Edison Aif0501702019-10-11 14:36:42 +0800625
626 /* Check if the PSA framework version matches. */
627 if (partition->static_data->psa_framework_version !=
628 PSA_FRAMEWORK_VERSION) {
629 ERROR_MSG("Warning: PSA Framework Verison is not matched!");
630 continue;
631 }
632
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100633 platform_data_p = partition->platform_data_list;
634 if (platform_data_p != NULL) {
635 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +0800636 if (tfm_spm_hal_configure_default_isolation(i,
637 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
638 tfm_core_panic();
639 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100640 ++platform_data_p;
641 }
642 }
643
Edison Ai764d41f2018-09-21 15:56:36 +0800644 if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) {
645 continue;
646 }
Ken Liu35f89392019-03-14 14:51:05 +0800647
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800648 /* Add PSA_DOORBELL signal to assigned_signals */
649 partition->runtime_data.assigned_signals |= PSA_DOORBELL;
650
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800651 /* TODO: This can be optimized by generating the assigned signal
652 * in code generation time.
653 */
654 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
655 if (tfm_core_irq_signals[j].partition_id ==
656 partition->static_data->partition_id) {
657 partition->runtime_data.assigned_signals |=
658 tfm_core_irq_signals[j].signal_value;
659 }
660 }
661
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800662 tfm_event_init(&partition->runtime_data.signal_evnt);
663 tfm_list_init(&partition->runtime_data.service_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800664
Mingyang Sunf3d29892019-07-10 17:50:23 +0800665 pth = tfm_spm_partition_get_thread_info(i);
Edison Ai764d41f2018-09-21 15:56:36 +0800666 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800667 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800668 }
669
Summer Qin66f1e032020-01-06 15:40:03 +0800670 tfm_core_thrd_init(pth,
671 tfm_spm_partition_get_init_func(i),
672 NULL,
673 (uintptr_t)tfm_spm_partition_get_stack_top(i),
674 (uintptr_t)tfm_spm_partition_get_stack_bottom(i));
Edison Ai788bae22019-02-18 17:38:59 +0800675
Mingyang Sunf3d29892019-07-10 17:50:23 +0800676 pth->prior = tfm_spm_partition_get_priority(i);
Edison Ai764d41f2018-09-21 15:56:36 +0800677
Ken Liu490281d2019-12-30 15:55:26 +0800678 if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) {
679 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800680 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800681 }
682
Edison Ai764d41f2018-09-21 15:56:36 +0800683 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800684 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800685 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800686 }
687 }
688
689 /* Init Service */
Summer Qind99509f2019-08-02 17:36:58 +0800690 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
Edison Ai764d41f2018-09-21 15:56:36 +0800691 for (i = 0; i < num; i++) {
Summer Qine578c5b2019-08-16 16:42:16 +0800692 service[i].service_db = &service_db[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800693 partition =
Summer Qine578c5b2019-08-16 16:42:16 +0800694 tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800695 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800696 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800697 }
Summer Qind99509f2019-08-02 17:36:58 +0800698 service[i].partition = partition;
Jaykumar Pitambarbhai Patel0c7a0382020-01-09 15:25:58 +0530699 partition->runtime_data.assigned_signals |= service[i].service_db->signal;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800700
Summer Qind99509f2019-08-02 17:36:58 +0800701 tfm_list_init(&service[i].handle_list);
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800702 tfm_list_add_tail(&partition->runtime_data.service_list,
Summer Qind99509f2019-08-02 17:36:58 +0800703 &service[i].list);
Edison Ai764d41f2018-09-21 15:56:36 +0800704 }
705
Ken Liu483f5da2019-04-24 10:45:21 +0800706 /*
707 * All threads initialized, start the scheduler.
708 *
709 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800710 * It is worthy to give the thread object to scheduler if the background
711 * context belongs to one of the threads. Here the background thread is the
712 * initialization thread who calls SPM SVC, which re-uses the non-secure
713 * entry thread's stack. After SPM initialization is done, this stack is
714 * cleaned up and the background context is never going to return. Tell
715 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800716 */
Summer Qin66f1e032020-01-06 15:40:03 +0800717 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800718
Summer Qind2ad7e72020-01-06 18:16:35 +0800719 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800720}
Ken Liu2d175172019-03-21 17:08:41 +0800721
Summer Qind2ad7e72020-01-06 18:16:35 +0800722void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800723{
724#if TFM_LVL == 2
725 struct spm_partition_desc_t *p_next_partition;
Summer Qinb5da9cc2019-08-26 15:19:45 +0800726 struct spm_partition_runtime_data_t *r_data;
Ken Liu2d175172019-03-21 17:08:41 +0800727 uint32_t is_privileged;
728#endif
Summer Qin66f1e032020-01-06 15:40:03 +0800729 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread();
730 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread();
Ken Liu2d175172019-03-21 17:08:41 +0800731
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200732 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liu2d175172019-03-21 17:08:41 +0800733#if TFM_LVL == 2
Summer Qinb5da9cc2019-08-26 15:19:45 +0800734 r_data = TFM_GET_CONTAINER_PTR(pth_next,
735 struct spm_partition_runtime_data_t,
736 sp_thrd);
737 p_next_partition = TFM_GET_CONTAINER_PTR(r_data,
Ken Liu2d175172019-03-21 17:08:41 +0800738 struct spm_partition_desc_t,
Summer Qinb5da9cc2019-08-26 15:19:45 +0800739 runtime_data);
Ken Liu2d175172019-03-21 17:08:41 +0800740
Summer Qin423dbef2019-08-22 15:59:35 +0800741 if (p_next_partition->static_data->partition_flags &
Ken Liu2d175172019-03-21 17:08:41 +0800742 SPM_PART_FLAG_PSA_ROT) {
743 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
744 } else {
745 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
746 }
747
748 tfm_spm_partition_change_privilege(is_privileged);
749#endif
Mate Toth-Palc430b992019-05-09 21:01:14 +0200750
Summer Qind2ad7e72020-01-06 18:16:35 +0800751 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800752 }
David Hufb38d562019-09-23 15:58:34 +0800753
754 /*
755 * Handle pending mailbox message from NS in multi-core topology.
756 * Empty operation on single Armv8-M platform.
757 */
758 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800759}
Mingyang Sund44522a2020-01-16 16:48:37 +0800760
761/*********************** SPM functions for PSA Client APIs *******************/
762
763uint32_t tfm_spm_psa_framework_version(void)
764{
765 return tfm_spm_client_psa_framework_version();
766}
767
768uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller)
769{
770 uint32_t sid;
771
772 TFM_CORE_ASSERT(args != NULL);
773 sid = (uint32_t)args[0];
774
775 return tfm_spm_client_psa_version(sid, ns_caller);
776}
777
778psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller)
779{
780 uint32_t sid;
781 uint32_t version;
782
783 TFM_CORE_ASSERT(args != NULL);
784 sid = (uint32_t)args[0];
785 version = (uint32_t)args[1];
786
787 return tfm_spm_client_psa_connect(sid, version, ns_caller);
788}
789
790psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr)
791{
792 psa_handle_t handle;
793 psa_invec *inptr;
794 psa_outvec *outptr;
795 size_t in_num, out_num;
796 struct spm_partition_desc_t *partition = NULL;
797 uint32_t privileged;
798 int32_t type;
799 struct tfm_control_parameter_t ctrl_param;
800
801 TFM_CORE_ASSERT(args != NULL);
802 handle = (psa_handle_t)args[0];
803
804 partition = tfm_spm_get_running_partition();
805 if (!partition) {
806 tfm_core_panic();
807 }
808 privileged = tfm_spm_partition_get_privileged_mode(
809 partition->static_data->partition_flags);
810
811 /*
812 * Read parameters from the arguments. It is a fatal error if the
813 * memory reference for buffer is invalid or not readable.
814 */
815 if (tfm_memory_check((const void *)args[1],
816 sizeof(struct tfm_control_parameter_t), ns_caller,
817 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
818 tfm_core_panic();
819 }
820
821 tfm_core_util_memcpy(&ctrl_param,
822 (const void *)args[1],
823 sizeof(ctrl_param));
824
825 type = ctrl_param.type;
826 in_num = ctrl_param.in_len;
827 out_num = ctrl_param.out_len;
828 inptr = (psa_invec *)args[2];
829 outptr = (psa_outvec *)args[3];
830
831 /* The request type must be zero or positive. */
832 if (type < 0) {
833 tfm_core_panic();
834 }
835
836 return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num,
837 ns_caller, privileged);
838}
839
840void tfm_spm_psa_close(uint32_t *args, bool ns_caller)
841{
842 psa_handle_t handle;
843
844 TFM_CORE_ASSERT(args != NULL);
845 handle = args[0];
846
847 tfm_spm_client_psa_close(handle, ns_caller);
848}
849
850uint32_t tfm_spm_get_lifecycle_state(void)
851{
852 /*
853 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
854 * implemented in the future.
855 */
856 return PSA_LIFECYCLE_UNKNOWN;
857}
858
859/********************* SPM functions for PSA Service APIs ********************/
860
861psa_signal_t tfm_spm_psa_wait(uint32_t *args)
862{
863 psa_signal_t signal_mask;
864 uint32_t timeout;
865 struct spm_partition_desc_t *partition = NULL;
866
867 TFM_CORE_ASSERT(args != NULL);
868 signal_mask = (psa_signal_t)args[0];
869 timeout = args[1];
870
871 /*
872 * Timeout[30:0] are reserved for future use.
873 * SPM must ignore the value of RES.
874 */
875 timeout &= PSA_TIMEOUT_MASK;
876
877 partition = tfm_spm_get_running_partition();
878 if (!partition) {
879 tfm_core_panic();
880 }
881
882 /*
883 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
884 * signals.
885 */
886 if ((partition->runtime_data.assigned_signals & signal_mask) == 0) {
887 tfm_core_panic();
888 }
889
890 /*
891 * Expected signals are included in signal wait mask, ignored signals
892 * should not be set and affect caller thread state. Save this mask for
893 * further checking while signals are ready to be set.
894 */
895 partition->runtime_data.signal_mask = signal_mask;
896
897 /*
898 * tfm_event_wait() blocks the caller thread if no signals are available.
899 * In this case, the return value of this function is temporary set into
900 * runtime context. After new signal(s) are available, the return value
901 * is updated with the available signal(s) and blocked thread gets to run.
902 */
903 if (timeout == PSA_BLOCK &&
904 (partition->runtime_data.signals & signal_mask) == 0) {
905 tfm_event_wait(&partition->runtime_data.signal_evnt);
906 }
907
908 return partition->runtime_data.signals & signal_mask;
909}
910
911psa_status_t tfm_spm_psa_get(uint32_t *args)
912{
913 psa_signal_t signal;
914 psa_msg_t *msg = NULL;
915 struct tfm_spm_service_t *service = NULL;
916 struct tfm_msg_body_t *tmp_msg = NULL;
917 struct spm_partition_desc_t *partition = NULL;
918 uint32_t privileged;
919
920 TFM_CORE_ASSERT(args != NULL);
921 signal = (psa_signal_t)args[0];
922 msg = (psa_msg_t *)args[1];
923
924 /*
925 * Only one message could be retrieved every time for psa_get(). It is a
926 * fatal error if the input signal has more than a signal bit set.
927 */
Ken Liu410ada52020-01-08 11:37:27 +0800928 if (!tfm_is_one_bit_set(signal)) {
Mingyang Sund44522a2020-01-16 16:48:37 +0800929 tfm_core_panic();
930 }
931
932 partition = tfm_spm_get_running_partition();
933 if (!partition) {
934 tfm_core_panic();
935 }
936 privileged = tfm_spm_partition_get_privileged_mode(
937 partition->static_data->partition_flags);
938
939 /*
940 * Write the message to the service buffer. It is a fatal error if the
941 * input msg pointer is not a valid memory reference or not read-write.
942 */
943 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
944 privileged) != IPC_SUCCESS) {
945 tfm_core_panic();
946 }
947
948 /*
949 * It is a fatal error if the caller call psa_get() when no message has
950 * been set. The caller must call this function after an RoT Service signal
951 * is returned by psa_wait().
952 */
953 if (partition->runtime_data.signals == 0) {
954 tfm_core_panic();
955 }
956
957 /*
958 * It is a fatal error if the RoT Service signal is not currently asserted.
959 */
960 if ((partition->runtime_data.signals & signal) == 0) {
961 tfm_core_panic();
962 }
963
964 /*
965 * Get RoT service by signal from partition. It is a fatal error if getting
966 * failed, which means the input signal is not correspond to an RoT service.
967 */
968 service = tfm_spm_get_service_by_signal(partition, signal);
969 if (!service) {
970 tfm_core_panic();
971 }
972
973 tmp_msg = tfm_msg_dequeue(&service->msg_queue);
974 if (!tmp_msg) {
975 return PSA_ERROR_DOES_NOT_EXIST;
976 }
977
978 ((struct tfm_conn_handle_t *)(tmp_msg->handle))->status =
979 TFM_HANDLE_STATUS_ACTIVE;
980
981 tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
982
983 /*
984 * There may be multiple messages for this RoT Service signal, do not clear
985 * its mask until no remaining message.
986 */
987 if (tfm_msg_queue_is_empty(&service->msg_queue)) {
988 partition->runtime_data.signals &= ~signal;
989 }
990
991 return PSA_SUCCESS;
992}
993
994void tfm_spm_psa_set_rhandle(uint32_t *args)
995{
996 psa_handle_t msg_handle;
997 void *rhandle = NULL;
998 struct tfm_msg_body_t *msg = NULL;
999
1000 TFM_CORE_ASSERT(args != NULL);
1001 msg_handle = (psa_handle_t)args[0];
1002 rhandle = (void *)args[1];
1003
1004 /* It is a fatal error if message handle is invalid */
1005 msg = tfm_spm_get_msg_from_handle(msg_handle);
1006 if (!msg) {
1007 tfm_core_panic();
1008 }
1009
1010 msg->msg.rhandle = rhandle;
1011
1012 /* Store reverse handle for following client calls. */
1013 tfm_spm_set_rhandle(msg->service, msg->handle, rhandle);
1014}
1015
1016size_t tfm_spm_psa_read(uint32_t *args)
1017{
1018 psa_handle_t msg_handle;
1019 uint32_t invec_idx;
1020 void *buffer = NULL;
1021 size_t num_bytes;
1022 size_t bytes;
1023 struct tfm_msg_body_t *msg = NULL;
1024 uint32_t privileged;
1025 struct spm_partition_desc_t *partition = NULL;
1026
1027 TFM_CORE_ASSERT(args != NULL);
1028 msg_handle = (psa_handle_t)args[0];
1029 invec_idx = args[1];
1030 buffer = (void *)args[2];
1031 num_bytes = (size_t)args[3];
1032
1033 /* It is a fatal error if message handle is invalid */
1034 msg = tfm_spm_get_msg_from_handle(msg_handle);
1035 if (!msg) {
1036 tfm_core_panic();
1037 }
1038
1039 partition = msg->service->partition;
1040 privileged = tfm_spm_partition_get_privileged_mode(
1041 partition->static_data->partition_flags);
1042
1043 /*
1044 * It is a fatal error if message handle does not refer to a request
1045 * message
1046 */
1047 if (msg->msg.type < PSA_IPC_CALL) {
1048 tfm_core_panic();
1049 }
1050
1051 /*
1052 * It is a fatal error if invec_idx is equal to or greater than
1053 * PSA_MAX_IOVEC
1054 */
1055 if (invec_idx >= PSA_MAX_IOVEC) {
1056 tfm_core_panic();
1057 }
1058
1059 /* There was no remaining data in this input vector */
1060 if (msg->msg.in_size[invec_idx] == 0) {
1061 return 0;
1062 }
1063
1064 /*
1065 * Copy the client data to the service buffer. It is a fatal error
1066 * if the memory reference for buffer is invalid or not read-write.
1067 */
1068 if (tfm_memory_check(buffer, num_bytes, false,
1069 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
1070 tfm_core_panic();
1071 }
1072
1073 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
1074 msg->msg.in_size[invec_idx] : num_bytes;
1075
1076 tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes);
1077
1078 /* There maybe some remaining data */
1079 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
1080 msg->msg.in_size[invec_idx] -= bytes;
1081
1082 return bytes;
1083}
1084
1085size_t tfm_spm_psa_skip(uint32_t *args)
1086{
1087 psa_handle_t msg_handle;
1088 uint32_t invec_idx;
1089 size_t num_bytes;
1090 struct tfm_msg_body_t *msg = NULL;
1091
1092 TFM_CORE_ASSERT(args != NULL);
1093 msg_handle = (psa_handle_t)args[0];
1094 invec_idx = args[1];
1095 num_bytes = (size_t)args[2];
1096
1097 /* It is a fatal error if message handle is invalid */
1098 msg = tfm_spm_get_msg_from_handle(msg_handle);
1099 if (!msg) {
1100 tfm_core_panic();
1101 }
1102
1103 /*
1104 * It is a fatal error if message handle does not refer to a request
1105 * message
1106 */
1107 if (msg->msg.type < PSA_IPC_CALL) {
1108 tfm_core_panic();
1109 }
1110
1111 /*
1112 * It is a fatal error if invec_idx is equal to or greater than
1113 * PSA_MAX_IOVEC
1114 */
1115 if (invec_idx >= PSA_MAX_IOVEC) {
1116 tfm_core_panic();
1117 }
1118
1119 /* There was no remaining data in this input vector */
1120 if (msg->msg.in_size[invec_idx] == 0) {
1121 return 0;
1122 }
1123
1124 /*
1125 * If num_bytes is greater than the remaining size of the input vector then
1126 * the remaining size of the input vector is used.
1127 */
1128 if (num_bytes > msg->msg.in_size[invec_idx]) {
1129 num_bytes = msg->msg.in_size[invec_idx];
1130 }
1131
1132 /* There maybe some remaining data */
1133 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
1134 num_bytes;
1135 msg->msg.in_size[invec_idx] -= num_bytes;
1136
1137 return num_bytes;
1138}
1139
1140void tfm_spm_psa_write(uint32_t *args)
1141{
1142 psa_handle_t msg_handle;
1143 uint32_t outvec_idx;
1144 void *buffer = NULL;
1145 size_t num_bytes;
1146 struct tfm_msg_body_t *msg = NULL;
1147 uint32_t privileged;
1148 struct spm_partition_desc_t *partition = NULL;
1149
1150 TFM_CORE_ASSERT(args != NULL);
1151 msg_handle = (psa_handle_t)args[0];
1152 outvec_idx = args[1];
1153 buffer = (void *)args[2];
1154 num_bytes = (size_t)args[3];
1155
1156 /* It is a fatal error if message handle is invalid */
1157 msg = tfm_spm_get_msg_from_handle(msg_handle);
1158 if (!msg) {
1159 tfm_core_panic();
1160 }
1161
1162 partition = msg->service->partition;
1163 privileged = tfm_spm_partition_get_privileged_mode(
1164 partition->static_data->partition_flags);
1165
1166 /*
1167 * It is a fatal error if message handle does not refer to a request
1168 * message
1169 */
1170 if (msg->msg.type < PSA_IPC_CALL) {
1171 tfm_core_panic();
1172 }
1173
1174 /*
1175 * It is a fatal error if outvec_idx is equal to or greater than
1176 * PSA_MAX_IOVEC
1177 */
1178 if (outvec_idx >= PSA_MAX_IOVEC) {
1179 tfm_core_panic();
1180 }
1181
1182 /*
1183 * It is a fatal error if the call attempts to write data past the end of
1184 * the client output vector
1185 */
1186 if (num_bytes > msg->msg.out_size[outvec_idx] -
1187 msg->outvec[outvec_idx].len) {
1188 tfm_core_panic();
1189 }
1190
1191 /*
1192 * Copy the service buffer to client outvecs. It is a fatal error
1193 * if the memory reference for buffer is invalid or not readable.
1194 */
1195 if (tfm_memory_check(buffer, num_bytes, false,
1196 TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
1197 tfm_core_panic();
1198 }
1199
1200 tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base +
1201 msg->outvec[outvec_idx].len, buffer, num_bytes);
1202
1203 /* Update the write number */
1204 msg->outvec[outvec_idx].len += num_bytes;
1205}
1206
1207static void update_caller_outvec_len(struct tfm_msg_body_t *msg)
1208{
1209 uint32_t i;
1210
1211 /*
1212 * FixeMe: abstract these part into dedicated functions to avoid
1213 * accessing thread context in psa layer
1214 */
1215 /* If it is a NS request via RPC, the owner of this message is not set */
1216 if (!is_tfm_rpc_msg(msg)) {
1217 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
1218 }
1219
1220 for (i = 0; i < PSA_MAX_IOVEC; i++) {
1221 if (msg->msg.out_size[i] == 0) {
1222 continue;
1223 }
1224
1225 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
1226
1227 msg->caller_outvec[i].len = msg->outvec[i].len;
1228 }
1229}
1230
1231void tfm_spm_psa_reply(uint32_t *args)
1232{
1233 psa_handle_t msg_handle;
1234 psa_status_t status;
1235 struct tfm_spm_service_t *service = NULL;
1236 struct tfm_msg_body_t *msg = NULL;
1237 int32_t ret = PSA_SUCCESS;
1238
1239 TFM_CORE_ASSERT(args != NULL);
1240 msg_handle = (psa_handle_t)args[0];
1241 status = (psa_status_t)args[1];
1242
1243 /* It is a fatal error if message handle is invalid */
1244 msg = tfm_spm_get_msg_from_handle(msg_handle);
1245 if (!msg) {
1246 tfm_core_panic();
1247 }
1248
1249 /*
1250 * RoT Service information is needed in this function, stored it in message
1251 * body structure. Only two parameters are passed in this function: handle
1252 * and status, so it is useful and simply to do like this.
1253 */
1254 service = msg->service;
1255 if (!service) {
1256 tfm_core_panic();
1257 }
1258
1259 /*
1260 * Three type of message are passed in this function: CONNECTION, REQUEST,
1261 * DISCONNECTION. It needs to process differently for each type.
1262 */
1263 switch (msg->msg.type) {
1264 case PSA_IPC_CONNECT:
1265 /*
1266 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
1267 * input status is PSA_SUCCESS. Others return values are based on the
1268 * input status.
1269 */
1270 if (status == PSA_SUCCESS) {
1271 ret = msg->handle;
1272 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
1273 /* Refuse the client connection, indicating a permanent error. */
1274 tfm_spm_free_conn_handle(service, msg->handle);
1275 ret = PSA_ERROR_CONNECTION_REFUSED;
1276 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
1277 /* Fail the client connection, indicating a transient error. */
1278 ret = PSA_ERROR_CONNECTION_BUSY;
1279 } else {
1280 tfm_core_panic();
1281 }
1282 break;
1283 case PSA_IPC_DISCONNECT:
1284 /* Service handle is not used anymore */
1285 tfm_spm_free_conn_handle(service, msg->handle);
1286
1287 /*
1288 * If the message type is PSA_IPC_DISCONNECT, then the status code is
1289 * ignored
1290 */
1291 break;
1292 default:
1293 if (msg->msg.type >= PSA_IPC_CALL) {
1294 /* Reply to a request message. Return values are based on status */
1295 ret = status;
1296 /*
1297 * The total number of bytes written to a single parameter must be
1298 * reported to the client by updating the len member of the
1299 * psa_outvec structure for the parameter before returning from
1300 * psa_call().
1301 */
1302 update_caller_outvec_len(msg);
1303 } else {
1304 tfm_core_panic();
1305 }
1306 }
1307
1308 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
1309 /*
1310 * If the source of the programmer error is a Secure Partition, the SPM
1311 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
1312 */
1313 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
1314 ((struct tfm_conn_handle_t *)(msg->handle))->status =
1315 TFM_HANDLE_STATUS_CONNECT_ERROR;
1316 } else {
1317 tfm_core_panic();
1318 }
1319 } else {
1320 ((struct tfm_conn_handle_t *)(msg->handle))->status =
1321 TFM_HANDLE_STATUS_IDLE;
1322 }
1323
1324 if (is_tfm_rpc_msg(msg)) {
1325 tfm_rpc_client_call_reply(msg, ret);
1326 } else {
1327 tfm_event_wake(&msg->ack_evnt, ret);
1328 }
1329}
1330
1331/**
1332 * \brief notify the partition with the signal.
1333 *
1334 * \param[in] partition_id The ID of the partition to be notified.
1335 * \param[in] signal The signal that the partition is to be notified
1336 * with.
1337 *
1338 * \retval void Success.
1339 * \retval "Does not return" If partition_id is invalid.
1340 */
1341static void notify_with_signal(int32_t partition_id, psa_signal_t signal)
1342{
1343 struct spm_partition_desc_t *partition = NULL;
1344
1345 /*
1346 * The value of partition_id must be greater than zero as the target of
1347 * notification must be a Secure Partition, providing a Non-secure
1348 * Partition ID is a fatal error.
1349 */
1350 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
1351 tfm_core_panic();
1352 }
1353
1354 /*
1355 * It is a fatal error if partition_id does not correspond to a Secure
1356 * Partition.
1357 */
1358 partition = tfm_spm_get_partition_by_id(partition_id);
1359 if (!partition) {
1360 tfm_core_panic();
1361 }
1362
1363 partition->runtime_data.signals |= signal;
1364
1365 /*
1366 * The target partition may be blocked with waiting for signals after
1367 * called psa_wait(). Set the return value with the available signals
1368 * before wake it up with tfm_event_signal().
1369 */
1370 tfm_event_wake(&partition->runtime_data.signal_evnt,
1371 partition->runtime_data.signals &
1372 partition->runtime_data.signal_mask);
1373}
1374
1375void tfm_spm_psa_notify(uint32_t *args)
1376{
1377 int32_t partition_id;
1378
1379 TFM_CORE_ASSERT(args != NULL);
1380 partition_id = (int32_t)args[0];
1381
1382 notify_with_signal(partition_id, PSA_DOORBELL);
1383}
1384
1385/**
1386 * \brief assert signal for a given IRQ line.
1387 *
1388 * \param[in] partition_id The ID of the partition which handles this IRQ
1389 * \param[in] signal The signal associated with this IRQ
1390 * \param[in] irq_line The number of the IRQ line
1391 *
1392 * \retval void Success.
1393 * \retval "Does not return" Partition ID is invalid
1394 */
1395void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001396 IRQn_Type irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001397{
1398 tfm_spm_hal_disable_irq(irq_line);
1399 notify_with_signal(partition_id, signal);
1400}
1401
1402void tfm_spm_psa_clear(void)
1403{
1404 struct spm_partition_desc_t *partition = NULL;
1405
1406 partition = tfm_spm_get_running_partition();
1407 if (!partition) {
1408 tfm_core_panic();
1409 }
1410
1411 /*
1412 * It is a fatal error if the Secure Partition's doorbell signal is not
1413 * currently asserted.
1414 */
1415 if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) {
1416 tfm_core_panic();
1417 }
1418 partition->runtime_data.signals &= ~PSA_DOORBELL;
1419}
1420
1421void tfm_spm_psa_panic(void)
1422{
1423 /*
1424 * PSA FF recommends that the SPM causes the system to restart when a secure
1425 * partition panics.
1426 */
1427 tfm_spm_hal_system_reset();
1428}
1429
1430/**
1431 * \brief Return the IRQ line number associated with a signal
1432 *
1433 * \param[in] partition_id The ID of the partition in which we look for
1434 * the signal.
1435 * \param[in] signal The signal we do the query for.
1436 * \param[out] irq_line The irq line associated with signal
1437 *
1438 * \retval IPC_SUCCESS Execution successful, irq_line contains a valid
1439 * value.
1440 * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the
1441 * signal. irq_line is unchanged.
1442 */
1443static int32_t get_irq_line_for_signal(int32_t partition_id,
1444 psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001445 IRQn_Type *irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001446{
1447 size_t i;
1448
1449 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1450 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1451 tfm_core_irq_signals[i].signal_value == signal) {
1452 *irq_line = tfm_core_irq_signals[i].irq_line;
1453 return IPC_SUCCESS;
1454 }
1455 }
1456 return IPC_ERROR_GENERIC;
1457}
1458
1459void tfm_spm_psa_eoi(uint32_t *args)
1460{
1461 psa_signal_t irq_signal;
TTornblomfaf74f52020-03-04 17:56:27 +01001462 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001463 int32_t ret;
1464 struct spm_partition_desc_t *partition = NULL;
1465
1466 TFM_CORE_ASSERT(args != NULL);
1467 irq_signal = (psa_signal_t)args[0];
1468
1469 /* It is a fatal error if passed signal indicates more than one signals. */
1470 if (!tfm_is_one_bit_set(irq_signal)) {
1471 tfm_core_panic();
1472 }
1473
1474 partition = tfm_spm_get_running_partition();
1475 if (!partition) {
1476 tfm_core_panic();
1477 }
1478
1479 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1480 irq_signal, &irq_line);
1481 /* It is a fatal error if passed signal is not an interrupt signal. */
1482 if (ret != IPC_SUCCESS) {
1483 tfm_core_panic();
1484 }
1485
1486 /* It is a fatal error if passed signal is not currently asserted */
1487 if ((partition->runtime_data.signals & irq_signal) == 0) {
1488 tfm_core_panic();
1489 }
1490
1491 partition->runtime_data.signals &= ~irq_signal;
1492
1493 tfm_spm_hal_clear_pending_irq(irq_line);
1494 tfm_spm_hal_enable_irq(irq_line);
1495}
1496
1497void tfm_spm_enable_irq(uint32_t *args)
1498{
1499 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1500 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001501 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001502 int32_t ret;
1503 struct spm_partition_desc_t *partition = NULL;
1504
1505 /* It is a fatal error if passed signal indicates more than one signals. */
1506 if (!tfm_is_one_bit_set(irq_signal)) {
1507 tfm_core_panic();
1508 }
1509
1510 partition = tfm_spm_get_running_partition();
1511 if (!partition) {
1512 tfm_core_panic();
1513 }
1514
1515 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1516 irq_signal, &irq_line);
1517 /* It is a fatal error if passed signal is not an interrupt signal. */
1518 if (ret != IPC_SUCCESS) {
1519 tfm_core_panic();
1520 }
1521
1522 tfm_spm_hal_enable_irq(irq_line);
1523}
1524
1525void tfm_spm_disable_irq(uint32_t *args)
1526{
1527 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1528 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001529 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001530 int32_t ret;
1531 struct spm_partition_desc_t *partition = NULL;
1532
1533 /* It is a fatal error if passed signal indicates more than one signals. */
1534 if (!tfm_is_one_bit_set(irq_signal)) {
1535 tfm_core_panic();
1536 }
1537
1538 partition = tfm_spm_get_running_partition();
1539 if (!partition) {
1540 tfm_core_panic();
1541 }
1542
1543 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1544 irq_signal, &irq_line);
1545 /* It is a fatal error if passed signal is not an interrupt signal. */
1546 if (ret != IPC_SUCCESS) {
1547 tfm_core_panic();
1548 }
1549
1550 tfm_spm_hal_disable_irq(irq_line);
1551}
1552
1553void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp,
1554 uint32_t *p_ctx, uint32_t exc_return,
1555 bool ns_caller)
1556{
1557 uintptr_t stacked_ctx_pos;
1558
1559 if (ns_caller) {
1560 /*
1561 * The background IRQ can't be supported, since if SP is executing,
1562 * the preempted context of SP can be different with the one who
1563 * preempts veneer.
1564 */
1565 if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) {
1566 tfm_core_panic();
1567 }
1568
1569 /*
1570 * It is non-secure caller, check if veneer stack contains
1571 * multiple contexts.
1572 */
1573 stacked_ctx_pos = (uintptr_t)p_ctx +
1574 sizeof(struct tfm_state_context_t) +
1575 TFM_VENEER_STACK_GUARD_SIZE;
1576
1577 if (is_stack_alloc_fp_space(exc_return)) {
1578#if defined (__FPU_USED) && (__FPU_USED == 1U)
1579 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1580 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1581 sizeof(uint32_t);
1582 }
1583#endif
1584 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1585 }
1586
1587 if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) {
1588 tfm_core_panic();
1589 }
1590 } else if (p_cur_sp->static_data->partition_id <= 0) {
1591 tfm_core_panic();
1592 }
1593}
Summer Qin830c5542020-02-14 13:44:20 +08001594
1595void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1596{
1597 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1598 uint32_t running_partition_flags = 0;
1599 const struct spm_partition_desc_t *partition = NULL;
1600
1601 /* Check permissions on request type basis */
1602
1603 switch (svc_ctx->r0) {
1604 case TFM_SPM_REQUEST_RESET_VOTE:
1605 partition = tfm_spm_get_running_partition();
1606 if (!partition) {
1607 tfm_core_panic();
1608 }
1609 running_partition_flags = partition->static_data->partition_flags;
1610
1611 /* Currently only PSA Root of Trust services are allowed to make Reset
1612 * vote request
1613 */
1614 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1615 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1616 }
1617
1618 /* FixMe: this is a placeholder for checks to be performed before
1619 * allowing execution of reset
1620 */
1621 *res_ptr = (uint32_t)TFM_SUCCESS;
1622
1623 break;
1624 default:
1625 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1626 }
1627}