blob: cf5ca72dc5d4bc1d10c99e15fff9483156d1dbc2 [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Ken Liu5248af22019-12-29 12:47:13 +08002 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Jamie Foxcc31d402019-01-28 17:13:52 +000010#include "psa/client.h"
11#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080012#include "psa/lifecycle.h"
13#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080014#include "tfm_wait.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080015#include "utilities.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080016#include "tfm_internal_defines.h"
Edison Ai764d41f2018-09-21 15:56:36 +080017#include "tfm_message_queue.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_spm_hal.h"
19#include "tfm_irq_list.h"
20#include "tfm_api.h"
21#include "tfm_secure_api.h"
22#include "tfm_memory_utils.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080025#include "tfm_core_utils.h"
26#include "spm_psa_client_call.h"
27#include "tfm_rpc.h"
28#include "tfm_internal.h"
29#include "tfm_core_trustzone.h"
30#include "tfm_core_mem_check.h"
Edison Ai764d41f2018-09-21 15:56:36 +080031#include "tfm_list.h"
32#include "tfm_pools.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080033#include "region.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080034#include "region_defs.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080035#include "spm_partition_defs.h"
36#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080037#include "tfm/tfm_spm_services.h"
Edison Ai764d41f2018-09-21 15:56:36 +080038
Ken Liu1f345b02020-05-30 21:11:05 +080039#include "secure_fw/partitions/tfm_service_list.inc"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080040#include "tfm_spm_db_ipc.inc"
Summer Qind99509f2019-08-02 17:36:58 +080041
42/* Extern service variable */
43extern struct tfm_spm_service_t service[];
Summer Qine578c5b2019-08-16 16:42:16 +080044extern const struct tfm_spm_service_db_t service_db[];
Summer Qind99509f2019-08-02 17:36:58 +080045
Edison Ai764d41f2018-09-21 15:56:36 +080046/* Pools */
47TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
48 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080049
Mingyang Sund44522a2020-01-16 16:48:37 +080050void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +010051 IRQn_Type irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080052
53#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080054
Summer Qin373feb12020-03-27 15:35:33 +080055/*********************** Connection handle conversion APIs *******************/
56
57/* Set a minimal value here for feature expansion. */
58#define CLIENT_HANDLE_VALUE_MIN 32
59
60#define CONVERSION_FACTOR_BITOFFSET 3
61#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
62/* Set 32 as the maximum */
63#define CONVERSION_FACTOR_VALUE_MAX 0x20
64
65#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
66#error "CONVERSION FACTOR OUT OF RANGE"
67#endif
68
69static uint32_t loop_index;
70
71/*
72 * A handle instance psa_handle_t allocated inside SPM is actually a memory
73 * address among the handle pool. Return this handle to the client directly
74 * exposes information of secure memory address. In this case, converting the
75 * handle into another value does not represent the memory address to avoid
76 * exposing secure memory directly to clients.
77 *
78 * This function converts the handle instance into another value by scaling the
79 * handle in pool offset, the converted value is named as a user handle.
80 *
81 * The formula:
82 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
83 * CLIENT_HANDLE_VALUE_MIN + loop_index
84 * where:
85 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
86 * exceed CONVERSION_FACTOR_VALUE_MAX.
87 *
88 * handle_instance in RANGE[POOL_START, POOL_END]
89 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
90 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
91 *
92 * note:
93 * loop_index is used to promise same handle instance is converted into
94 * different user handles in short time.
95 */
Ken Liu505b1702020-05-29 13:19:58 +080096psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080097{
98 psa_handle_t user_handle;
99
100 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
101 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
102 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
103 CLIENT_HANDLE_VALUE_MIN + loop_index);
104
105 return user_handle;
106}
107
108/*
109 * This function converts a user handle into a corresponded handle instance.
110 * The converted value is validated before returning, an invalid handle instance
111 * is returned as NULL.
112 *
113 * The formula:
114 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
115 * CONVERSION_FACTOR_VALUE) + POOL_START
116 * where:
117 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
118 * exceed CONVERSION_FACTOR_VALUE_MAX.
119 *
120 * handle_instance in RANGE[POOL_START, POOL_END]
121 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
122 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
123 */
124struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
125{
126 struct tfm_conn_handle_t *handle_instance;
127
128 if (user_handle == PSA_NULL_HANDLE) {
129 return NULL;
130 }
131
132 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
133 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
134 (uintptr_t)conn_handle_pool);
135
136 return handle_instance;
137}
138
Edison Ai764d41f2018-09-21 15:56:36 +0800139/* Service handle management functions */
Summer Qin630c76b2020-05-20 10:32:58 +0800140struct tfm_conn_handle_t *tfm_spm_create_conn_handle(
141 struct tfm_spm_service_t *service,
Summer Qin1ce712a2019-10-14 18:04:05 +0800142 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800143{
Edison Ai9cc26242019-08-06 11:28:04 +0800144 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800145
Ken Liuf250b8b2019-12-27 16:31:24 +0800146 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800147
148 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800149 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
150 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800151 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800152 }
153
Edison Ai9cc26242019-08-06 11:28:04 +0800154 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800155 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800156 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800157
158 /* Add handle node to list for next psa functions */
Edison Ai9cc26242019-08-06 11:28:04 +0800159 tfm_list_add_tail(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800160
Summer Qin630c76b2020-05-20 10:32:58 +0800161 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800162}
163
Summer Qin630c76b2020-05-20 10:32:58 +0800164int32_t tfm_spm_validate_conn_handle(
165 const struct tfm_conn_handle_t *conn_handle,
166 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800167{
168 /* Check the handle address is validated */
169 if (is_valid_chunk_data_in_pool(conn_handle_pool,
170 (uint8_t *)conn_handle) != true) {
171 return IPC_ERROR_GENERIC;
172 }
173
174 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800175 if (conn_handle->client_id != client_id) {
Summer Qin1ce712a2019-10-14 18:04:05 +0800176 return IPC_ERROR_GENERIC;
177 }
178
179 return IPC_SUCCESS;
180}
181
Mingyang Sund44522a2020-01-16 16:48:37 +0800182/**
183 * \brief Free connection handle which not used anymore.
184 *
185 * \param[in] service Target service context pointer
186 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800187 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800188 *
189 * \retval IPC_SUCCESS Success
190 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
191 * \retval "Does not return" Panic for not find service by handle
192 */
193static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800194 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800195{
Ken Liuf250b8b2019-12-27 16:31:24 +0800196 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800197 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800198
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200199 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800200 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200201
Edison Ai764d41f2018-09-21 15:56:36 +0800202 /* Remove node from handle list */
Summer Qin630c76b2020-05-20 10:32:58 +0800203 tfm_list_del_node(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800204
205 /* Back handle buffer to pool */
Summer Qin630c76b2020-05-20 10:32:58 +0800206 tfm_pool_free(conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800207 return IPC_SUCCESS;
208}
209
Mingyang Sund44522a2020-01-16 16:48:37 +0800210/**
211 * \brief Set reverse handle value for connection.
212 *
213 * \param[in] service Target service context pointer
214 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800215 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800216 * \param[in] rhandle rhandle need to save
217 *
218 * \retval IPC_SUCCESS Success
219 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
220 * \retval "Does not return" Panic for not find handle node
221 */
222static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800223 struct tfm_conn_handle_t *conn_handle,
Mingyang Sund44522a2020-01-16 16:48:37 +0800224 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800225{
Ken Liuf250b8b2019-12-27 16:31:24 +0800226 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800227 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800228 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800229
Summer Qin630c76b2020-05-20 10:32:58 +0800230 conn_handle->rhandle = rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800231 return IPC_SUCCESS;
232}
233
Mingyang Sund44522a2020-01-16 16:48:37 +0800234/**
235 * \brief Get reverse handle value from connection hanlde.
236 *
237 * \param[in] service Target service context pointer
238 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800239 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800240 *
241 * \retval void * Success
242 * \retval "Does not return" Panic for those:
243 * service pointer are NULL
244 * hanlde is \ref PSA_NULL_HANDLE
245 * handle node does not be found
246 */
247static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800248 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800249{
Ken Liuf250b8b2019-12-27 16:31:24 +0800250 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800251 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800252 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800253
Summer Qin630c76b2020-05-20 10:32:58 +0800254 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800255}
256
257/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800258
259/**
260 * \brief Get the service context by signal.
261 *
262 * \param[in] partition Partition context pointer
263 * \ref spm_partition_desc_t structures
264 * \param[in] signal Signal associated with inputs to the Secure
265 * Partition, \ref psa_signal_t
266 *
267 * \retval NULL Failed
268 * \retval "Not NULL" Target service context pointer,
269 * \ref tfm_spm_service_t structures
270 */
271static struct tfm_spm_service_t *
Mingyang Sunf3d29892019-07-10 17:50:23 +0800272 tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition,
273 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800274{
275 struct tfm_list_node_t *node, *head;
276 struct tfm_spm_service_t *service;
277
Ken Liuf250b8b2019-12-27 16:31:24 +0800278 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800279
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800280 if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
Edison Ai9059ea02019-11-28 13:46:14 +0800281 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800282 }
283
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800284 head = &partition->runtime_data.service_list;
Edison Ai764d41f2018-09-21 15:56:36 +0800285 TFM_LIST_FOR_EACH(node, head) {
286 service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list);
Summer Qine578c5b2019-08-16 16:42:16 +0800287 if (service->service_db->signal == signal) {
Edison Ai764d41f2018-09-21 15:56:36 +0800288 return service;
289 }
290 }
291 return NULL;
292}
293
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800294/**
295 * \brief Returns the index of the partition with the given partition ID.
296 *
297 * \param[in] partition_id Partition id
298 *
299 * \return the partition idx if partition_id is valid,
300 * \ref SPM_INVALID_PARTITION_IDX othervise
301 */
302static uint32_t get_partition_idx(uint32_t partition_id)
303{
304 uint32_t i;
305
306 if (partition_id == INVALID_PARTITION_ID) {
307 return SPM_INVALID_PARTITION_IDX;
308 }
309
310 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
311 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
312 partition_id) {
313 return i;
314 }
315 }
316 return SPM_INVALID_PARTITION_IDX;
317}
318
319/**
320 * \brief Get the flags associated with a partition
321 *
322 * \param[in] partition_idx Partition index
323 *
324 * \return Flags associated with the partition
325 *
326 * \note This function doesn't check if partition_idx is valid.
327 */
328static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
329{
330 return g_spm_partition_db.partitions[partition_idx].static_data->
331 partition_flags;
332}
333
334#if TFM_LVL != 1
335/**
336 * \brief Change the privilege mode for partition thread mode.
337 *
338 * \param[in] privileged Privileged mode,
339 * \ref TFM_PARTITION_PRIVILEGED_MODE
340 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
341 *
342 * \note Barrier instructions are not called by this function, and if
343 * it is called in thread mode, it might be necessary to call
344 * them after this function returns.
345 */
346static void tfm_spm_partition_change_privilege(uint32_t privileged)
347{
348 CONTROL_Type ctrl;
349
350 ctrl.w = __get_CONTROL();
351
352 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
353 ctrl.b.nPRIV = 0;
354 } else {
355 ctrl.b.nPRIV = 1;
356 }
357
358 __set_CONTROL(ctrl.w);
359}
360#endif /* if(TFM_LVL != 1) */
361
362uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
363{
364 return g_spm_partition_db.partitions[partition_idx].static_data->
365 partition_id;
366}
367
368uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
369{
370 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
371 return TFM_PARTITION_PRIVILEGED_MODE;
372 } else {
373 return TFM_PARTITION_UNPRIVILEGED_MODE;
374 }
375}
376
377bool tfm_is_partition_privileged(uint32_t partition_idx)
378{
379 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
380
381 return tfm_spm_partition_get_privileged_mode(flags) ==
382 TFM_PARTITION_PRIVILEGED_MODE;
383}
384
Edison Ai764d41f2018-09-21 15:56:36 +0800385struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
386{
Summer Qin2fca1c82020-03-20 14:37:55 +0800387 uint32_t i, num;
Edison Ai764d41f2018-09-21 15:56:36 +0800388
Summer Qin2fca1c82020-03-20 14:37:55 +0800389 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
390 for (i = 0; i < num; i++) {
391 if (service[i].service_db->sid == sid) {
392 return &service[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800393 }
394 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800395
Edison Ai764d41f2018-09-21 15:56:36 +0800396 return NULL;
397}
398
Mingyang Sund44522a2020-01-16 16:48:37 +0800399/**
400 * \brief Get the partition context by partition ID.
401 *
402 * \param[in] partition_id Partition identity
403 *
404 * \retval NULL Failed
405 * \retval "Not NULL" Target partition context pointer,
406 * \ref spm_partition_desc_t structures
407 */
408static struct spm_partition_desc_t *
409 tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800410{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800411 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800412
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800413 if (idx != SPM_INVALID_PARTITION_IDX) {
414 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800415 }
416 return NULL;
417}
418
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800419struct spm_partition_desc_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800420{
Kevin Peng79c2bda2020-07-24 16:31:12 +0800421 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread();
422 struct spm_partition_desc_t *partition;
423 struct spm_partition_runtime_data_t *rt_data;
Edison Ai764d41f2018-09-21 15:56:36 +0800424
Kevin Peng79c2bda2020-07-24 16:31:12 +0800425 rt_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t,
426 sp_thrd);
427 partition = TFM_GET_CONTAINER_PTR(rt_data, struct spm_partition_desc_t,
428 runtime_data);
429 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800430}
431
432int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530433 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800434{
Ken Liuf250b8b2019-12-27 16:31:24 +0800435 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800436
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530437 switch (service->service_db->version_policy) {
Edison Ai764d41f2018-09-21 15:56:36 +0800438 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530439 if (version > service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800440 return IPC_ERROR_VERSION;
441 }
442 break;
443 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530444 if (version != service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800445 return IPC_ERROR_VERSION;
446 }
447 break;
448 default:
449 return IPC_ERROR_VERSION;
450 }
451 return IPC_SUCCESS;
452}
453
Edison Aie728fbf2019-11-13 09:37:12 +0800454int32_t tfm_spm_check_authorization(uint32_t sid,
455 struct tfm_spm_service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800456 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800457{
458 struct spm_partition_desc_t *partition = NULL;
459 int32_t i;
460
Ken Liuf250b8b2019-12-27 16:31:24 +0800461 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800462
463 if (ns_caller) {
464 if (!service->service_db->non_secure_client) {
465 return IPC_ERROR_GENERIC;
466 }
467 } else {
468 partition = tfm_spm_get_running_partition();
469 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800470 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800471 }
472
473 for (i = 0; i < partition->static_data->dependencies_num; i++) {
474 if (partition->static_data->p_dependencies[i] == sid) {
475 break;
476 }
477 }
478
479 if (i == partition->static_data->dependencies_num) {
480 return IPC_ERROR_GENERIC;
481 }
482 }
483 return IPC_SUCCESS;
484}
485
Edison Ai764d41f2018-09-21 15:56:36 +0800486/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800487
488/**
489 * \brief Get message context by message handle.
490 *
491 * \param[in] msg_handle Message handle which is a reference generated
492 * by the SPM to a specific message.
493 *
494 * \return The message body context pointer
495 * \ref tfm_msg_body_t structures
496 */
497static struct tfm_msg_body_t *
498 tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800499{
500 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200501 * The message handler passed by the caller is considered invalid in the
502 * following cases:
503 * 1. Not a valid message handle. (The address of a message is not the
504 * address of a possible handle from the pool
505 * 2. Handle not belongs to the caller partition (The handle is either
506 * unused, or owned by anither partition)
507 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800508 */
Ken Liu505b1702020-05-29 13:19:58 +0800509 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800510 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800511 struct tfm_conn_handle_t *p_conn_handle =
512 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200513
514 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800515 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800516 return NULL;
517 }
518
Ken Liu505b1702020-05-29 13:19:58 +0800519 p_msg = &p_conn_handle->internal_msg;
520
Edison Ai764d41f2018-09-21 15:56:36 +0800521 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200522 * Check that the magic number is correct. This proves that the message
523 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800524 */
Ken Liu505b1702020-05-29 13:19:58 +0800525 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800526 return NULL;
527 }
528
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200529 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800530 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liu505b1702020-05-29 13:19:58 +0800531 if (partition_id != p_msg->service->partition->static_data->partition_id) {
Edison Ai764d41f2018-09-21 15:56:36 +0800532 return NULL;
533 }
534
Ken Liu505b1702020-05-29 13:19:58 +0800535 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800536}
537
Edison Ai97115822019-08-01 14:22:19 +0800538struct tfm_msg_body_t *
Summer Qin630c76b2020-05-20 10:32:58 +0800539 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800540{
Summer Qin630c76b2020-05-20 10:32:58 +0800541 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai97115822019-08-01 14:22:19 +0800542
Summer Qin630c76b2020-05-20 10:32:58 +0800543 return &(conn_handle->internal_msg);
Edison Ai97115822019-08-01 14:22:19 +0800544}
545
546void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
547 struct tfm_spm_service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800548 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800549 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800550 psa_invec *invec, size_t in_len,
551 psa_outvec *outvec, size_t out_len,
552 psa_outvec *caller_outvec)
553{
Edison Ai764d41f2018-09-21 15:56:36 +0800554 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800555 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800556
Ken Liuf250b8b2019-12-27 16:31:24 +0800557 TFM_CORE_ASSERT(msg);
558 TFM_CORE_ASSERT(service);
559 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
560 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
561 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
562 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
563 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800564
Edison Ai764d41f2018-09-21 15:56:36 +0800565 /* Clear message buffer before using it */
Mingyang Sun94b1b412019-09-20 15:11:14 +0800566 tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800567
Ken Liu35f89392019-03-14 14:51:05 +0800568 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800569 msg->magic = TFM_MSG_MAGIC;
570 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800571 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800572 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800573
574 /* Copy contents */
575 msg->msg.type = type;
576
577 for (i = 0; i < in_len; i++) {
578 msg->msg.in_size[i] = invec[i].len;
579 msg->invec[i].base = invec[i].base;
580 }
581
582 for (i = 0; i < out_len; i++) {
583 msg->msg.out_size[i] = outvec[i].len;
584 msg->outvec[i].base = outvec[i].base;
585 /* Out len is used to record the writed number, set 0 here again */
586 msg->outvec[i].len = 0;
587 }
588
Ken Liu505b1702020-05-29 13:19:58 +0800589 /* Use the user connect handle as the message handle */
590 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800591
Ken Liu505b1702020-05-29 13:19:58 +0800592 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800593 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800594 if (conn_handle) {
595 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800596 }
David Hu46603dd2019-12-11 18:05:16 +0800597
598 /* Set the private data of NSPE client caller in multi-core topology */
599 if (TFM_CLIENT_ID_IS_NS(client_id)) {
600 tfm_rpc_set_caller_data(msg, client_id);
601 }
Edison Ai764d41f2018-09-21 15:56:36 +0800602}
603
604int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
605 struct tfm_msg_body_t *msg)
606{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800607 struct spm_partition_runtime_data_t *p_runtime_data =
608 &service->partition->runtime_data;
609
Ken Liuf250b8b2019-12-27 16:31:24 +0800610 TFM_CORE_ASSERT(service);
611 TFM_CORE_ASSERT(msg);
Edison Ai764d41f2018-09-21 15:56:36 +0800612
613 /* Enqueue message to service message queue */
614 if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) {
615 return IPC_ERROR_GENERIC;
616 }
617
618 /* Messages put. Update signals */
Summer Qine578c5b2019-08-16 16:42:16 +0800619 p_runtime_data->signals |= service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800620
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800621 tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals &
622 p_runtime_data->signal_mask));
Edison Ai764d41f2018-09-21 15:56:36 +0800623
David Hufb38d562019-09-23 15:58:34 +0800624 /*
625 * If it is a NS request via RPC, it is unnecessary to block current
626 * thread.
627 */
628 if (!is_tfm_rpc_msg(msg)) {
629 tfm_event_wait(&msg->ack_evnt);
630 }
Edison Ai764d41f2018-09-21 15:56:36 +0800631
632 return IPC_SUCCESS;
633}
634
Mingyang Sunf3d29892019-07-10 17:50:23 +0800635uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800636{
Edison Ai764d41f2018-09-21 15:56:36 +0800637 struct spm_partition_desc_t *partition;
638
Kevin Peng79c2bda2020-07-24 16:31:12 +0800639 partition = tfm_spm_get_running_partition();
640 if (partition && partition->static_data) {
641 return partition->static_data->partition_id;
642 } else {
643 return INVALID_PARTITION_ID;
644 }
Edison Ai764d41f2018-09-21 15:56:36 +0800645}
646
Summer Qin43c185d2019-10-10 15:44:42 +0800647int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800648 enum tfm_memory_access_e access,
649 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800650{
Hugues de Valon99578562019-06-18 16:08:51 +0100651 enum tfm_status_e err;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800652
653 /* If len is zero, this indicates an empty buffer and base is ignored */
654 if (len == 0) {
655 return IPC_SUCCESS;
656 }
657
658 if (!buffer) {
659 return IPC_ERROR_BAD_PARAMETERS;
660 }
661
662 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
663 return IPC_ERROR_MEMORY_CHECK;
664 }
665
Summer Qin424d4db2019-03-25 14:09:51 +0800666 if (access == TFM_MEMORY_ACCESS_RW) {
Summer Qineb537e52019-03-29 09:57:10 +0800667 err = tfm_core_has_write_access_to_region(buffer, len, ns_caller,
668 privileged);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800669 } else {
Summer Qineb537e52019-03-29 09:57:10 +0800670 err = tfm_core_has_read_access_to_region(buffer, len, ns_caller,
671 privileged);
Summer Qin424d4db2019-03-25 14:09:51 +0800672 }
Summer Qin0fc3f592019-04-11 16:00:10 +0800673 if (err == TFM_SUCCESS) {
Summer Qin424d4db2019-03-25 14:09:51 +0800674 return IPC_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800675 }
676
677 return IPC_ERROR_MEMORY_CHECK;
678}
679
Ken Liuce2692d2020-02-11 12:39:36 +0800680uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800681{
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800682 uint32_t i, j, num;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800683 struct spm_partition_desc_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800684 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100685 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Edison Ai764d41f2018-09-21 15:56:36 +0800686
687 tfm_pool_init(conn_handle_pool,
688 POOL_BUFFER_SIZE(conn_handle_pool),
689 sizeof(struct tfm_conn_handle_t),
690 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800691
692 /* Init partition first for it will be used when init service */
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200693 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800694 partition = &g_spm_partition_db.partitions[i];
Edison Aif0501702019-10-11 14:36:42 +0800695
Kevin Peng79c2bda2020-07-24 16:31:12 +0800696 if (!partition || !partition->memory_data || !partition->static_data) {
697 tfm_core_panic();
698 }
699
700 if (!(partition->static_data->partition_flags & SPM_PART_FLAG_IPC)) {
701 tfm_core_panic();
702 }
703
Edison Aif0501702019-10-11 14:36:42 +0800704 /* Check if the PSA framework version matches. */
705 if (partition->static_data->psa_framework_version !=
706 PSA_FRAMEWORK_VERSION) {
Kevin Peng79c2bda2020-07-24 16:31:12 +0800707 ERROR_MSG("Warning: PSA Framework Verison does not match!");
Edison Aif0501702019-10-11 14:36:42 +0800708 continue;
709 }
710
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100711 platform_data_p = partition->platform_data_list;
712 if (platform_data_p != NULL) {
713 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +0800714 if (tfm_spm_hal_configure_default_isolation(i,
715 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
716 tfm_core_panic();
717 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100718 ++platform_data_p;
719 }
720 }
721
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800722 /* Add PSA_DOORBELL signal to assigned_signals */
723 partition->runtime_data.assigned_signals |= PSA_DOORBELL;
724
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800725 /* TODO: This can be optimized by generating the assigned signal
726 * in code generation time.
727 */
728 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
729 if (tfm_core_irq_signals[j].partition_id ==
730 partition->static_data->partition_id) {
731 partition->runtime_data.assigned_signals |=
732 tfm_core_irq_signals[j].signal_value;
733 }
734 }
735
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800736 tfm_event_init(&partition->runtime_data.signal_evnt);
737 tfm_list_init(&partition->runtime_data.service_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800738
Kevin Peng79c2bda2020-07-24 16:31:12 +0800739 pth = &partition->runtime_data.sp_thrd;
Edison Ai764d41f2018-09-21 15:56:36 +0800740 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800741 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800742 }
743
Summer Qin66f1e032020-01-06 15:40:03 +0800744 tfm_core_thrd_init(pth,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800745 (tfm_core_thrd_entry_t)
746 partition->static_data->partition_init,
Summer Qin66f1e032020-01-06 15:40:03 +0800747 NULL,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800748 (uintptr_t)partition->memory_data->stack_top,
749 (uintptr_t)partition->memory_data->stack_bottom);
Edison Ai788bae22019-02-18 17:38:59 +0800750
Kevin Peng79c2bda2020-07-24 16:31:12 +0800751 pth->prior = partition->static_data->partition_priority;
Edison Ai764d41f2018-09-21 15:56:36 +0800752
Ken Liu490281d2019-12-30 15:55:26 +0800753 if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) {
754 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800755 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800756 }
757
Edison Ai764d41f2018-09-21 15:56:36 +0800758 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800759 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800760 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800761 }
762 }
763
764 /* Init Service */
Summer Qind99509f2019-08-02 17:36:58 +0800765 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
Edison Ai764d41f2018-09-21 15:56:36 +0800766 for (i = 0; i < num; i++) {
Summer Qine578c5b2019-08-16 16:42:16 +0800767 service[i].service_db = &service_db[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800768 partition =
Summer Qine578c5b2019-08-16 16:42:16 +0800769 tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800770 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800771 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800772 }
Summer Qind99509f2019-08-02 17:36:58 +0800773 service[i].partition = partition;
Jaykumar Pitambarbhai Patel0c7a0382020-01-09 15:25:58 +0530774 partition->runtime_data.assigned_signals |= service[i].service_db->signal;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800775
Summer Qind99509f2019-08-02 17:36:58 +0800776 tfm_list_init(&service[i].handle_list);
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800777 tfm_list_add_tail(&partition->runtime_data.service_list,
Summer Qind99509f2019-08-02 17:36:58 +0800778 &service[i].list);
Edison Ai764d41f2018-09-21 15:56:36 +0800779 }
780
Ken Liu483f5da2019-04-24 10:45:21 +0800781 /*
782 * All threads initialized, start the scheduler.
783 *
784 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800785 * It is worthy to give the thread object to scheduler if the background
786 * context belongs to one of the threads. Here the background thread is the
787 * initialization thread who calls SPM SVC, which re-uses the non-secure
788 * entry thread's stack. After SPM initialization is done, this stack is
789 * cleaned up and the background context is never going to return. Tell
790 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800791 */
Summer Qin66f1e032020-01-06 15:40:03 +0800792 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800793
Summer Qind2ad7e72020-01-06 18:16:35 +0800794 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800795}
Ken Liu2d175172019-03-21 17:08:41 +0800796
Summer Qind2ad7e72020-01-06 18:16:35 +0800797void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800798{
799#if TFM_LVL == 2
800 struct spm_partition_desc_t *p_next_partition;
Summer Qinb5da9cc2019-08-26 15:19:45 +0800801 struct spm_partition_runtime_data_t *r_data;
Ken Liu2d175172019-03-21 17:08:41 +0800802 uint32_t is_privileged;
803#endif
Summer Qin66f1e032020-01-06 15:40:03 +0800804 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread();
805 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread();
Ken Liu2d175172019-03-21 17:08:41 +0800806
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200807 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liu2d175172019-03-21 17:08:41 +0800808#if TFM_LVL == 2
Summer Qinb5da9cc2019-08-26 15:19:45 +0800809 r_data = TFM_GET_CONTAINER_PTR(pth_next,
810 struct spm_partition_runtime_data_t,
811 sp_thrd);
812 p_next_partition = TFM_GET_CONTAINER_PTR(r_data,
Ken Liu2d175172019-03-21 17:08:41 +0800813 struct spm_partition_desc_t,
Summer Qinb5da9cc2019-08-26 15:19:45 +0800814 runtime_data);
Ken Liu2d175172019-03-21 17:08:41 +0800815
Summer Qin423dbef2019-08-22 15:59:35 +0800816 if (p_next_partition->static_data->partition_flags &
Ken Liu2d175172019-03-21 17:08:41 +0800817 SPM_PART_FLAG_PSA_ROT) {
818 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
819 } else {
820 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
821 }
822
823 tfm_spm_partition_change_privilege(is_privileged);
824#endif
Mate Toth-Palc430b992019-05-09 21:01:14 +0200825
Summer Qind2ad7e72020-01-06 18:16:35 +0800826 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800827 }
David Hufb38d562019-09-23 15:58:34 +0800828
829 /*
830 * Handle pending mailbox message from NS in multi-core topology.
831 * Empty operation on single Armv8-M platform.
832 */
833 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800834}
Mingyang Sund44522a2020-01-16 16:48:37 +0800835
836/*********************** SPM functions for PSA Client APIs *******************/
837
838uint32_t tfm_spm_psa_framework_version(void)
839{
840 return tfm_spm_client_psa_framework_version();
841}
842
843uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller)
844{
845 uint32_t sid;
846
847 TFM_CORE_ASSERT(args != NULL);
848 sid = (uint32_t)args[0];
849
850 return tfm_spm_client_psa_version(sid, ns_caller);
851}
852
853psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller)
854{
855 uint32_t sid;
856 uint32_t version;
857
858 TFM_CORE_ASSERT(args != NULL);
859 sid = (uint32_t)args[0];
860 version = (uint32_t)args[1];
861
862 return tfm_spm_client_psa_connect(sid, version, ns_caller);
863}
864
865psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr)
866{
867 psa_handle_t handle;
868 psa_invec *inptr;
869 psa_outvec *outptr;
870 size_t in_num, out_num;
871 struct spm_partition_desc_t *partition = NULL;
872 uint32_t privileged;
873 int32_t type;
874 struct tfm_control_parameter_t ctrl_param;
875
876 TFM_CORE_ASSERT(args != NULL);
877 handle = (psa_handle_t)args[0];
878
879 partition = tfm_spm_get_running_partition();
880 if (!partition) {
881 tfm_core_panic();
882 }
883 privileged = tfm_spm_partition_get_privileged_mode(
884 partition->static_data->partition_flags);
885
886 /*
887 * Read parameters from the arguments. It is a fatal error if the
888 * memory reference for buffer is invalid or not readable.
889 */
890 if (tfm_memory_check((const void *)args[1],
891 sizeof(struct tfm_control_parameter_t), ns_caller,
892 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
893 tfm_core_panic();
894 }
895
896 tfm_core_util_memcpy(&ctrl_param,
897 (const void *)args[1],
898 sizeof(ctrl_param));
899
900 type = ctrl_param.type;
901 in_num = ctrl_param.in_len;
902 out_num = ctrl_param.out_len;
903 inptr = (psa_invec *)args[2];
904 outptr = (psa_outvec *)args[3];
905
906 /* The request type must be zero or positive. */
907 if (type < 0) {
908 tfm_core_panic();
909 }
910
911 return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num,
912 ns_caller, privileged);
913}
914
915void tfm_spm_psa_close(uint32_t *args, bool ns_caller)
916{
917 psa_handle_t handle;
918
919 TFM_CORE_ASSERT(args != NULL);
920 handle = args[0];
921
922 tfm_spm_client_psa_close(handle, ns_caller);
923}
924
925uint32_t tfm_spm_get_lifecycle_state(void)
926{
927 /*
928 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
929 * implemented in the future.
930 */
931 return PSA_LIFECYCLE_UNKNOWN;
932}
933
934/********************* SPM functions for PSA Service APIs ********************/
935
936psa_signal_t tfm_spm_psa_wait(uint32_t *args)
937{
938 psa_signal_t signal_mask;
939 uint32_t timeout;
940 struct spm_partition_desc_t *partition = NULL;
941
942 TFM_CORE_ASSERT(args != NULL);
943 signal_mask = (psa_signal_t)args[0];
944 timeout = args[1];
945
946 /*
947 * Timeout[30:0] are reserved for future use.
948 * SPM must ignore the value of RES.
949 */
950 timeout &= PSA_TIMEOUT_MASK;
951
952 partition = tfm_spm_get_running_partition();
953 if (!partition) {
954 tfm_core_panic();
955 }
956
957 /*
958 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
959 * signals.
960 */
961 if ((partition->runtime_data.assigned_signals & signal_mask) == 0) {
962 tfm_core_panic();
963 }
964
965 /*
966 * Expected signals are included in signal wait mask, ignored signals
967 * should not be set and affect caller thread state. Save this mask for
968 * further checking while signals are ready to be set.
969 */
970 partition->runtime_data.signal_mask = signal_mask;
971
972 /*
973 * tfm_event_wait() blocks the caller thread if no signals are available.
974 * In this case, the return value of this function is temporary set into
975 * runtime context. After new signal(s) are available, the return value
976 * is updated with the available signal(s) and blocked thread gets to run.
977 */
978 if (timeout == PSA_BLOCK &&
979 (partition->runtime_data.signals & signal_mask) == 0) {
980 tfm_event_wait(&partition->runtime_data.signal_evnt);
981 }
982
983 return partition->runtime_data.signals & signal_mask;
984}
985
986psa_status_t tfm_spm_psa_get(uint32_t *args)
987{
988 psa_signal_t signal;
989 psa_msg_t *msg = NULL;
990 struct tfm_spm_service_t *service = NULL;
991 struct tfm_msg_body_t *tmp_msg = NULL;
992 struct spm_partition_desc_t *partition = NULL;
993 uint32_t privileged;
994
995 TFM_CORE_ASSERT(args != NULL);
996 signal = (psa_signal_t)args[0];
997 msg = (psa_msg_t *)args[1];
998
999 /*
1000 * Only one message could be retrieved every time for psa_get(). It is a
1001 * fatal error if the input signal has more than a signal bit set.
1002 */
Ken Liu410ada52020-01-08 11:37:27 +08001003 if (!tfm_is_one_bit_set(signal)) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001004 tfm_core_panic();
1005 }
1006
1007 partition = tfm_spm_get_running_partition();
1008 if (!partition) {
1009 tfm_core_panic();
1010 }
1011 privileged = tfm_spm_partition_get_privileged_mode(
1012 partition->static_data->partition_flags);
1013
1014 /*
1015 * Write the message to the service buffer. It is a fatal error if the
1016 * input msg pointer is not a valid memory reference or not read-write.
1017 */
1018 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
1019 privileged) != IPC_SUCCESS) {
1020 tfm_core_panic();
1021 }
1022
1023 /*
1024 * It is a fatal error if the caller call psa_get() when no message has
1025 * been set. The caller must call this function after an RoT Service signal
1026 * is returned by psa_wait().
1027 */
1028 if (partition->runtime_data.signals == 0) {
1029 tfm_core_panic();
1030 }
1031
1032 /*
1033 * It is a fatal error if the RoT Service signal is not currently asserted.
1034 */
1035 if ((partition->runtime_data.signals & signal) == 0) {
1036 tfm_core_panic();
1037 }
1038
1039 /*
1040 * Get RoT service by signal from partition. It is a fatal error if getting
1041 * failed, which means the input signal is not correspond to an RoT service.
1042 */
1043 service = tfm_spm_get_service_by_signal(partition, signal);
1044 if (!service) {
1045 tfm_core_panic();
1046 }
1047
1048 tmp_msg = tfm_msg_dequeue(&service->msg_queue);
1049 if (!tmp_msg) {
1050 return PSA_ERROR_DOES_NOT_EXIST;
1051 }
1052
Ken Liu505b1702020-05-29 13:19:58 +08001053 (TFM_GET_CONTAINER_PTR(tmp_msg,
1054 struct tfm_conn_handle_t,
1055 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001056
1057 tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
1058
1059 /*
1060 * There may be multiple messages for this RoT Service signal, do not clear
1061 * its mask until no remaining message.
1062 */
1063 if (tfm_msg_queue_is_empty(&service->msg_queue)) {
1064 partition->runtime_data.signals &= ~signal;
1065 }
1066
1067 return PSA_SUCCESS;
1068}
1069
1070void tfm_spm_psa_set_rhandle(uint32_t *args)
1071{
1072 psa_handle_t msg_handle;
1073 void *rhandle = NULL;
1074 struct tfm_msg_body_t *msg = NULL;
Ken Liu505b1702020-05-29 13:19:58 +08001075 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001076
1077 TFM_CORE_ASSERT(args != NULL);
1078 msg_handle = (psa_handle_t)args[0];
1079 rhandle = (void *)args[1];
1080
1081 /* It is a fatal error if message handle is invalid */
1082 msg = tfm_spm_get_msg_from_handle(msg_handle);
1083 if (!msg) {
1084 tfm_core_panic();
1085 }
1086
1087 msg->msg.rhandle = rhandle;
Ken Liu505b1702020-05-29 13:19:58 +08001088 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001089
1090 /* Store reverse handle for following client calls. */
Ken Liu505b1702020-05-29 13:19:58 +08001091 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001092}
1093
1094size_t tfm_spm_psa_read(uint32_t *args)
1095{
1096 psa_handle_t msg_handle;
1097 uint32_t invec_idx;
1098 void *buffer = NULL;
1099 size_t num_bytes;
1100 size_t bytes;
1101 struct tfm_msg_body_t *msg = NULL;
1102 uint32_t privileged;
1103 struct spm_partition_desc_t *partition = NULL;
1104
1105 TFM_CORE_ASSERT(args != NULL);
1106 msg_handle = (psa_handle_t)args[0];
1107 invec_idx = args[1];
1108 buffer = (void *)args[2];
1109 num_bytes = (size_t)args[3];
1110
1111 /* It is a fatal error if message handle is invalid */
1112 msg = tfm_spm_get_msg_from_handle(msg_handle);
1113 if (!msg) {
1114 tfm_core_panic();
1115 }
1116
1117 partition = msg->service->partition;
1118 privileged = tfm_spm_partition_get_privileged_mode(
1119 partition->static_data->partition_flags);
1120
1121 /*
1122 * It is a fatal error if message handle does not refer to a request
1123 * message
1124 */
1125 if (msg->msg.type < PSA_IPC_CALL) {
1126 tfm_core_panic();
1127 }
1128
1129 /*
1130 * It is a fatal error if invec_idx is equal to or greater than
1131 * PSA_MAX_IOVEC
1132 */
1133 if (invec_idx >= PSA_MAX_IOVEC) {
1134 tfm_core_panic();
1135 }
1136
1137 /* There was no remaining data in this input vector */
1138 if (msg->msg.in_size[invec_idx] == 0) {
1139 return 0;
1140 }
1141
1142 /*
1143 * Copy the client data to the service buffer. It is a fatal error
1144 * if the memory reference for buffer is invalid or not read-write.
1145 */
1146 if (tfm_memory_check(buffer, num_bytes, false,
1147 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
1148 tfm_core_panic();
1149 }
1150
1151 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
1152 msg->msg.in_size[invec_idx] : num_bytes;
1153
1154 tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes);
1155
1156 /* There maybe some remaining data */
1157 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
1158 msg->msg.in_size[invec_idx] -= bytes;
1159
1160 return bytes;
1161}
1162
1163size_t tfm_spm_psa_skip(uint32_t *args)
1164{
1165 psa_handle_t msg_handle;
1166 uint32_t invec_idx;
1167 size_t num_bytes;
1168 struct tfm_msg_body_t *msg = NULL;
1169
1170 TFM_CORE_ASSERT(args != NULL);
1171 msg_handle = (psa_handle_t)args[0];
1172 invec_idx = args[1];
1173 num_bytes = (size_t)args[2];
1174
1175 /* It is a fatal error if message handle is invalid */
1176 msg = tfm_spm_get_msg_from_handle(msg_handle);
1177 if (!msg) {
1178 tfm_core_panic();
1179 }
1180
1181 /*
1182 * It is a fatal error if message handle does not refer to a request
1183 * message
1184 */
1185 if (msg->msg.type < PSA_IPC_CALL) {
1186 tfm_core_panic();
1187 }
1188
1189 /*
1190 * It is a fatal error if invec_idx is equal to or greater than
1191 * PSA_MAX_IOVEC
1192 */
1193 if (invec_idx >= PSA_MAX_IOVEC) {
1194 tfm_core_panic();
1195 }
1196
1197 /* There was no remaining data in this input vector */
1198 if (msg->msg.in_size[invec_idx] == 0) {
1199 return 0;
1200 }
1201
1202 /*
1203 * If num_bytes is greater than the remaining size of the input vector then
1204 * the remaining size of the input vector is used.
1205 */
1206 if (num_bytes > msg->msg.in_size[invec_idx]) {
1207 num_bytes = msg->msg.in_size[invec_idx];
1208 }
1209
1210 /* There maybe some remaining data */
1211 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
1212 num_bytes;
1213 msg->msg.in_size[invec_idx] -= num_bytes;
1214
1215 return num_bytes;
1216}
1217
1218void tfm_spm_psa_write(uint32_t *args)
1219{
1220 psa_handle_t msg_handle;
1221 uint32_t outvec_idx;
1222 void *buffer = NULL;
1223 size_t num_bytes;
1224 struct tfm_msg_body_t *msg = NULL;
1225 uint32_t privileged;
1226 struct spm_partition_desc_t *partition = NULL;
1227
1228 TFM_CORE_ASSERT(args != NULL);
1229 msg_handle = (psa_handle_t)args[0];
1230 outvec_idx = args[1];
1231 buffer = (void *)args[2];
1232 num_bytes = (size_t)args[3];
1233
1234 /* It is a fatal error if message handle is invalid */
1235 msg = tfm_spm_get_msg_from_handle(msg_handle);
1236 if (!msg) {
1237 tfm_core_panic();
1238 }
1239
1240 partition = msg->service->partition;
1241 privileged = tfm_spm_partition_get_privileged_mode(
1242 partition->static_data->partition_flags);
1243
1244 /*
1245 * It is a fatal error if message handle does not refer to a request
1246 * message
1247 */
1248 if (msg->msg.type < PSA_IPC_CALL) {
1249 tfm_core_panic();
1250 }
1251
1252 /*
1253 * It is a fatal error if outvec_idx is equal to or greater than
1254 * PSA_MAX_IOVEC
1255 */
1256 if (outvec_idx >= PSA_MAX_IOVEC) {
1257 tfm_core_panic();
1258 }
1259
1260 /*
1261 * It is a fatal error if the call attempts to write data past the end of
1262 * the client output vector
1263 */
1264 if (num_bytes > msg->msg.out_size[outvec_idx] -
1265 msg->outvec[outvec_idx].len) {
1266 tfm_core_panic();
1267 }
1268
1269 /*
1270 * Copy the service buffer to client outvecs. It is a fatal error
1271 * if the memory reference for buffer is invalid or not readable.
1272 */
1273 if (tfm_memory_check(buffer, num_bytes, false,
1274 TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
1275 tfm_core_panic();
1276 }
1277
1278 tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base +
1279 msg->outvec[outvec_idx].len, buffer, num_bytes);
1280
1281 /* Update the write number */
1282 msg->outvec[outvec_idx].len += num_bytes;
1283}
1284
1285static void update_caller_outvec_len(struct tfm_msg_body_t *msg)
1286{
1287 uint32_t i;
1288
1289 /*
1290 * FixeMe: abstract these part into dedicated functions to avoid
1291 * accessing thread context in psa layer
1292 */
1293 /* If it is a NS request via RPC, the owner of this message is not set */
1294 if (!is_tfm_rpc_msg(msg)) {
1295 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
1296 }
1297
1298 for (i = 0; i < PSA_MAX_IOVEC; i++) {
1299 if (msg->msg.out_size[i] == 0) {
1300 continue;
1301 }
1302
1303 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
1304
1305 msg->caller_outvec[i].len = msg->outvec[i].len;
1306 }
1307}
1308
1309void tfm_spm_psa_reply(uint32_t *args)
1310{
1311 psa_handle_t msg_handle;
1312 psa_status_t status;
1313 struct tfm_spm_service_t *service = NULL;
1314 struct tfm_msg_body_t *msg = NULL;
1315 int32_t ret = PSA_SUCCESS;
Ken Liu505b1702020-05-29 13:19:58 +08001316 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001317
1318 TFM_CORE_ASSERT(args != NULL);
1319 msg_handle = (psa_handle_t)args[0];
1320 status = (psa_status_t)args[1];
1321
1322 /* It is a fatal error if message handle is invalid */
1323 msg = tfm_spm_get_msg_from_handle(msg_handle);
1324 if (!msg) {
1325 tfm_core_panic();
1326 }
1327
1328 /*
1329 * RoT Service information is needed in this function, stored it in message
1330 * body structure. Only two parameters are passed in this function: handle
1331 * and status, so it is useful and simply to do like this.
1332 */
1333 service = msg->service;
1334 if (!service) {
1335 tfm_core_panic();
1336 }
1337
1338 /*
1339 * Three type of message are passed in this function: CONNECTION, REQUEST,
1340 * DISCONNECTION. It needs to process differently for each type.
1341 */
Ken Liu505b1702020-05-29 13:19:58 +08001342 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001343 switch (msg->msg.type) {
1344 case PSA_IPC_CONNECT:
1345 /*
1346 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
1347 * input status is PSA_SUCCESS. Others return values are based on the
1348 * input status.
1349 */
1350 if (status == PSA_SUCCESS) {
Ken Liu505b1702020-05-29 13:19:58 +08001351 ret = msg_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001352 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
1353 /* Refuse the client connection, indicating a permanent error. */
Ken Liu505b1702020-05-29 13:19:58 +08001354 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001355 ret = PSA_ERROR_CONNECTION_REFUSED;
1356 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
1357 /* Fail the client connection, indicating a transient error. */
1358 ret = PSA_ERROR_CONNECTION_BUSY;
1359 } else {
1360 tfm_core_panic();
1361 }
1362 break;
1363 case PSA_IPC_DISCONNECT:
1364 /* Service handle is not used anymore */
Ken Liu505b1702020-05-29 13:19:58 +08001365 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001366
1367 /*
1368 * If the message type is PSA_IPC_DISCONNECT, then the status code is
1369 * ignored
1370 */
1371 break;
1372 default:
1373 if (msg->msg.type >= PSA_IPC_CALL) {
1374 /* Reply to a request message. Return values are based on status */
1375 ret = status;
1376 /*
1377 * The total number of bytes written to a single parameter must be
1378 * reported to the client by updating the len member of the
1379 * psa_outvec structure for the parameter before returning from
1380 * psa_call().
1381 */
1382 update_caller_outvec_len(msg);
1383 } else {
1384 tfm_core_panic();
1385 }
1386 }
1387
1388 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
1389 /*
1390 * If the source of the programmer error is a Secure Partition, the SPM
1391 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
1392 */
1393 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
Ken Liu505b1702020-05-29 13:19:58 +08001394 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
Mingyang Sund44522a2020-01-16 16:48:37 +08001395 } else {
1396 tfm_core_panic();
1397 }
1398 } else {
Ken Liu505b1702020-05-29 13:19:58 +08001399 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001400 }
1401
1402 if (is_tfm_rpc_msg(msg)) {
1403 tfm_rpc_client_call_reply(msg, ret);
1404 } else {
1405 tfm_event_wake(&msg->ack_evnt, ret);
1406 }
1407}
1408
1409/**
1410 * \brief notify the partition with the signal.
1411 *
1412 * \param[in] partition_id The ID of the partition to be notified.
1413 * \param[in] signal The signal that the partition is to be notified
1414 * with.
1415 *
1416 * \retval void Success.
1417 * \retval "Does not return" If partition_id is invalid.
1418 */
1419static void notify_with_signal(int32_t partition_id, psa_signal_t signal)
1420{
1421 struct spm_partition_desc_t *partition = NULL;
1422
1423 /*
1424 * The value of partition_id must be greater than zero as the target of
1425 * notification must be a Secure Partition, providing a Non-secure
1426 * Partition ID is a fatal error.
1427 */
1428 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
1429 tfm_core_panic();
1430 }
1431
1432 /*
1433 * It is a fatal error if partition_id does not correspond to a Secure
1434 * Partition.
1435 */
1436 partition = tfm_spm_get_partition_by_id(partition_id);
1437 if (!partition) {
1438 tfm_core_panic();
1439 }
1440
1441 partition->runtime_data.signals |= signal;
1442
1443 /*
1444 * The target partition may be blocked with waiting for signals after
1445 * called psa_wait(). Set the return value with the available signals
1446 * before wake it up with tfm_event_signal().
1447 */
1448 tfm_event_wake(&partition->runtime_data.signal_evnt,
1449 partition->runtime_data.signals &
1450 partition->runtime_data.signal_mask);
1451}
1452
1453void tfm_spm_psa_notify(uint32_t *args)
1454{
1455 int32_t partition_id;
1456
1457 TFM_CORE_ASSERT(args != NULL);
1458 partition_id = (int32_t)args[0];
1459
1460 notify_with_signal(partition_id, PSA_DOORBELL);
1461}
1462
1463/**
1464 * \brief assert signal for a given IRQ line.
1465 *
1466 * \param[in] partition_id The ID of the partition which handles this IRQ
1467 * \param[in] signal The signal associated with this IRQ
1468 * \param[in] irq_line The number of the IRQ line
1469 *
1470 * \retval void Success.
1471 * \retval "Does not return" Partition ID is invalid
1472 */
1473void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001474 IRQn_Type irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001475{
1476 tfm_spm_hal_disable_irq(irq_line);
1477 notify_with_signal(partition_id, signal);
1478}
1479
1480void tfm_spm_psa_clear(void)
1481{
1482 struct spm_partition_desc_t *partition = NULL;
1483
1484 partition = tfm_spm_get_running_partition();
1485 if (!partition) {
1486 tfm_core_panic();
1487 }
1488
1489 /*
1490 * It is a fatal error if the Secure Partition's doorbell signal is not
1491 * currently asserted.
1492 */
1493 if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) {
1494 tfm_core_panic();
1495 }
1496 partition->runtime_data.signals &= ~PSA_DOORBELL;
1497}
1498
1499void tfm_spm_psa_panic(void)
1500{
1501 /*
1502 * PSA FF recommends that the SPM causes the system to restart when a secure
1503 * partition panics.
1504 */
1505 tfm_spm_hal_system_reset();
1506}
1507
1508/**
1509 * \brief Return the IRQ line number associated with a signal
1510 *
1511 * \param[in] partition_id The ID of the partition in which we look for
1512 * the signal.
1513 * \param[in] signal The signal we do the query for.
1514 * \param[out] irq_line The irq line associated with signal
1515 *
1516 * \retval IPC_SUCCESS Execution successful, irq_line contains a valid
1517 * value.
1518 * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the
1519 * signal. irq_line is unchanged.
1520 */
1521static int32_t get_irq_line_for_signal(int32_t partition_id,
1522 psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001523 IRQn_Type *irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001524{
1525 size_t i;
1526
1527 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1528 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1529 tfm_core_irq_signals[i].signal_value == signal) {
1530 *irq_line = tfm_core_irq_signals[i].irq_line;
1531 return IPC_SUCCESS;
1532 }
1533 }
1534 return IPC_ERROR_GENERIC;
1535}
1536
1537void tfm_spm_psa_eoi(uint32_t *args)
1538{
1539 psa_signal_t irq_signal;
TTornblomfaf74f52020-03-04 17:56:27 +01001540 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001541 int32_t ret;
1542 struct spm_partition_desc_t *partition = NULL;
1543
1544 TFM_CORE_ASSERT(args != NULL);
1545 irq_signal = (psa_signal_t)args[0];
1546
1547 /* It is a fatal error if passed signal indicates more than one signals. */
1548 if (!tfm_is_one_bit_set(irq_signal)) {
1549 tfm_core_panic();
1550 }
1551
1552 partition = tfm_spm_get_running_partition();
1553 if (!partition) {
1554 tfm_core_panic();
1555 }
1556
1557 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1558 irq_signal, &irq_line);
1559 /* It is a fatal error if passed signal is not an interrupt signal. */
1560 if (ret != IPC_SUCCESS) {
1561 tfm_core_panic();
1562 }
1563
1564 /* It is a fatal error if passed signal is not currently asserted */
1565 if ((partition->runtime_data.signals & irq_signal) == 0) {
1566 tfm_core_panic();
1567 }
1568
1569 partition->runtime_data.signals &= ~irq_signal;
1570
1571 tfm_spm_hal_clear_pending_irq(irq_line);
1572 tfm_spm_hal_enable_irq(irq_line);
1573}
1574
1575void tfm_spm_enable_irq(uint32_t *args)
1576{
1577 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1578 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001579 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001580 int32_t ret;
1581 struct spm_partition_desc_t *partition = NULL;
1582
1583 /* It is a fatal error if passed signal indicates more than one signals. */
1584 if (!tfm_is_one_bit_set(irq_signal)) {
1585 tfm_core_panic();
1586 }
1587
1588 partition = tfm_spm_get_running_partition();
1589 if (!partition) {
1590 tfm_core_panic();
1591 }
1592
1593 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1594 irq_signal, &irq_line);
1595 /* It is a fatal error if passed signal is not an interrupt signal. */
1596 if (ret != IPC_SUCCESS) {
1597 tfm_core_panic();
1598 }
1599
1600 tfm_spm_hal_enable_irq(irq_line);
1601}
1602
1603void tfm_spm_disable_irq(uint32_t *args)
1604{
1605 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1606 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001607 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001608 int32_t ret;
1609 struct spm_partition_desc_t *partition = NULL;
1610
1611 /* It is a fatal error if passed signal indicates more than one signals. */
1612 if (!tfm_is_one_bit_set(irq_signal)) {
1613 tfm_core_panic();
1614 }
1615
1616 partition = tfm_spm_get_running_partition();
1617 if (!partition) {
1618 tfm_core_panic();
1619 }
1620
1621 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1622 irq_signal, &irq_line);
1623 /* It is a fatal error if passed signal is not an interrupt signal. */
1624 if (ret != IPC_SUCCESS) {
1625 tfm_core_panic();
1626 }
1627
1628 tfm_spm_hal_disable_irq(irq_line);
1629}
1630
1631void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp,
1632 uint32_t *p_ctx, uint32_t exc_return,
1633 bool ns_caller)
1634{
1635 uintptr_t stacked_ctx_pos;
1636
1637 if (ns_caller) {
1638 /*
1639 * The background IRQ can't be supported, since if SP is executing,
1640 * the preempted context of SP can be different with the one who
1641 * preempts veneer.
1642 */
1643 if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) {
1644 tfm_core_panic();
1645 }
1646
1647 /*
1648 * It is non-secure caller, check if veneer stack contains
1649 * multiple contexts.
1650 */
1651 stacked_ctx_pos = (uintptr_t)p_ctx +
1652 sizeof(struct tfm_state_context_t) +
1653 TFM_VENEER_STACK_GUARD_SIZE;
1654
1655 if (is_stack_alloc_fp_space(exc_return)) {
1656#if defined (__FPU_USED) && (__FPU_USED == 1U)
1657 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1658 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1659 sizeof(uint32_t);
1660 }
1661#endif
1662 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1663 }
1664
1665 if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) {
1666 tfm_core_panic();
1667 }
1668 } else if (p_cur_sp->static_data->partition_id <= 0) {
1669 tfm_core_panic();
1670 }
1671}
Summer Qin830c5542020-02-14 13:44:20 +08001672
1673void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1674{
1675 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1676 uint32_t running_partition_flags = 0;
1677 const struct spm_partition_desc_t *partition = NULL;
1678
1679 /* Check permissions on request type basis */
1680
1681 switch (svc_ctx->r0) {
1682 case TFM_SPM_REQUEST_RESET_VOTE:
1683 partition = tfm_spm_get_running_partition();
1684 if (!partition) {
1685 tfm_core_panic();
1686 }
1687 running_partition_flags = partition->static_data->partition_flags;
1688
1689 /* Currently only PSA Root of Trust services are allowed to make Reset
1690 * vote request
1691 */
1692 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1693 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1694 }
1695
1696 /* FixMe: this is a placeholder for checks to be performed before
1697 * allowing execution of reset
1698 */
1699 *res_ptr = (uint32_t)TFM_SUCCESS;
1700
1701 break;
1702 default:
1703 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1704 }
1705}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001706
1707enum spm_err_t tfm_spm_db_init(void)
1708{
1709 uint32_t i;
1710
1711 /* This function initialises partition db */
1712
1713 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1714 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1715 g_spm_partition_db.partitions[i].platform_data_list =
1716 platform_data_list_list[i];
1717 g_spm_partition_db.partitions[i].memory_data = &memory_data_list[i];
1718 }
1719 g_spm_partition_db.is_init = 1;
1720
1721 return SPM_ERR_OK;
1722}