blob: f15344c57fcf4c734233ecc527e737d0317de968 [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Ken Liu5248af22019-12-29 12:47:13 +08002 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Jamie Foxcc31d402019-01-28 17:13:52 +000010#include "psa/client.h"
11#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080012#include "psa/lifecycle.h"
13#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080014#include "tfm_wait.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080015#include "utilities.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080016#include "tfm_internal_defines.h"
Edison Ai764d41f2018-09-21 15:56:36 +080017#include "tfm_message_queue.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_spm_hal.h"
19#include "tfm_irq_list.h"
20#include "tfm_api.h"
21#include "tfm_secure_api.h"
22#include "tfm_memory_utils.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080023#include "spm_api.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080025#include "spm_db.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_core_utils.h"
27#include "spm_psa_client_call.h"
28#include "tfm_rpc.h"
29#include "tfm_internal.h"
30#include "tfm_core_trustzone.h"
31#include "tfm_core_mem_check.h"
Edison Ai764d41f2018-09-21 15:56:36 +080032#include "tfm_list.h"
33#include "tfm_pools.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080034#include "region.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080035#include "region_defs.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080036#include "tfm/tfm_spm_services.h"
Edison Ai764d41f2018-09-21 15:56:36 +080037
Ken Liu1f345b02020-05-30 21:11:05 +080038#include "secure_fw/partitions/tfm_service_list.inc"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080039#include "tfm_spm_db_ipc.inc"
Summer Qind99509f2019-08-02 17:36:58 +080040
41/* Extern service variable */
42extern struct tfm_spm_service_t service[];
Summer Qine578c5b2019-08-16 16:42:16 +080043extern const struct tfm_spm_service_db_t service_db[];
Summer Qind99509f2019-08-02 17:36:58 +080044
Edison Ai764d41f2018-09-21 15:56:36 +080045/* Pools */
46TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
47 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080048
Mingyang Sund44522a2020-01-16 16:48:37 +080049void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +010050 IRQn_Type irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080051
52#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080053
Summer Qin373feb12020-03-27 15:35:33 +080054/*********************** Connection handle conversion APIs *******************/
55
56/* Set a minimal value here for feature expansion. */
57#define CLIENT_HANDLE_VALUE_MIN 32
58
59#define CONVERSION_FACTOR_BITOFFSET 3
60#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
61/* Set 32 as the maximum */
62#define CONVERSION_FACTOR_VALUE_MAX 0x20
63
64#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
65#error "CONVERSION FACTOR OUT OF RANGE"
66#endif
67
68static uint32_t loop_index;
69
70/*
71 * A handle instance psa_handle_t allocated inside SPM is actually a memory
72 * address among the handle pool. Return this handle to the client directly
73 * exposes information of secure memory address. In this case, converting the
74 * handle into another value does not represent the memory address to avoid
75 * exposing secure memory directly to clients.
76 *
77 * This function converts the handle instance into another value by scaling the
78 * handle in pool offset, the converted value is named as a user handle.
79 *
80 * The formula:
81 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
82 * CLIENT_HANDLE_VALUE_MIN + loop_index
83 * where:
84 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
85 * exceed CONVERSION_FACTOR_VALUE_MAX.
86 *
87 * handle_instance in RANGE[POOL_START, POOL_END]
88 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
89 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
90 *
91 * note:
92 * loop_index is used to promise same handle instance is converted into
93 * different user handles in short time.
94 */
Ken Liu505b1702020-05-29 13:19:58 +080095psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080096{
97 psa_handle_t user_handle;
98
99 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
100 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
101 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
102 CLIENT_HANDLE_VALUE_MIN + loop_index);
103
104 return user_handle;
105}
106
107/*
108 * This function converts a user handle into a corresponded handle instance.
109 * The converted value is validated before returning, an invalid handle instance
110 * is returned as NULL.
111 *
112 * The formula:
113 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
114 * CONVERSION_FACTOR_VALUE) + POOL_START
115 * where:
116 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
117 * exceed CONVERSION_FACTOR_VALUE_MAX.
118 *
119 * handle_instance in RANGE[POOL_START, POOL_END]
120 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
121 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
122 */
123struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
124{
125 struct tfm_conn_handle_t *handle_instance;
126
127 if (user_handle == PSA_NULL_HANDLE) {
128 return NULL;
129 }
130
131 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
132 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
133 (uintptr_t)conn_handle_pool);
134
135 return handle_instance;
136}
137
Edison Ai764d41f2018-09-21 15:56:36 +0800138/* Service handle management functions */
Summer Qin630c76b2020-05-20 10:32:58 +0800139struct tfm_conn_handle_t *tfm_spm_create_conn_handle(
140 struct tfm_spm_service_t *service,
Summer Qin1ce712a2019-10-14 18:04:05 +0800141 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800142{
Edison Ai9cc26242019-08-06 11:28:04 +0800143 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800144
Ken Liuf250b8b2019-12-27 16:31:24 +0800145 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800146
147 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800148 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
149 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800150 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800151 }
152
Edison Ai9cc26242019-08-06 11:28:04 +0800153 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800154 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800155 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800156
157 /* Add handle node to list for next psa functions */
Edison Ai9cc26242019-08-06 11:28:04 +0800158 tfm_list_add_tail(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800159
Summer Qin630c76b2020-05-20 10:32:58 +0800160 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800161}
162
Summer Qin630c76b2020-05-20 10:32:58 +0800163int32_t tfm_spm_validate_conn_handle(
164 const struct tfm_conn_handle_t *conn_handle,
165 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800166{
167 /* Check the handle address is validated */
168 if (is_valid_chunk_data_in_pool(conn_handle_pool,
169 (uint8_t *)conn_handle) != true) {
170 return IPC_ERROR_GENERIC;
171 }
172
173 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800174 if (conn_handle->client_id != client_id) {
Summer Qin1ce712a2019-10-14 18:04:05 +0800175 return IPC_ERROR_GENERIC;
176 }
177
178 return IPC_SUCCESS;
179}
180
Mingyang Sund44522a2020-01-16 16:48:37 +0800181/**
182 * \brief Free connection handle which not used anymore.
183 *
184 * \param[in] service Target service context pointer
185 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800186 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800187 *
188 * \retval IPC_SUCCESS Success
189 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
190 * \retval "Does not return" Panic for not find service by handle
191 */
192static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800193 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800194{
Ken Liuf250b8b2019-12-27 16:31:24 +0800195 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800196 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800197
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200198 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800199 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200200
Edison Ai764d41f2018-09-21 15:56:36 +0800201 /* Remove node from handle list */
Summer Qin630c76b2020-05-20 10:32:58 +0800202 tfm_list_del_node(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800203
204 /* Back handle buffer to pool */
Summer Qin630c76b2020-05-20 10:32:58 +0800205 tfm_pool_free(conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800206 return IPC_SUCCESS;
207}
208
Mingyang Sund44522a2020-01-16 16:48:37 +0800209/**
210 * \brief Set reverse handle value for connection.
211 *
212 * \param[in] service Target service context pointer
213 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800214 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800215 * \param[in] rhandle rhandle need to save
216 *
217 * \retval IPC_SUCCESS Success
218 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
219 * \retval "Does not return" Panic for not find handle node
220 */
221static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800222 struct tfm_conn_handle_t *conn_handle,
Mingyang Sund44522a2020-01-16 16:48:37 +0800223 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800224{
Ken Liuf250b8b2019-12-27 16:31:24 +0800225 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800226 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800227 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800228
Summer Qin630c76b2020-05-20 10:32:58 +0800229 conn_handle->rhandle = rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800230 return IPC_SUCCESS;
231}
232
Mingyang Sund44522a2020-01-16 16:48:37 +0800233/**
234 * \brief Get reverse handle value from connection hanlde.
235 *
236 * \param[in] service Target service context pointer
237 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800238 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800239 *
240 * \retval void * Success
241 * \retval "Does not return" Panic for those:
242 * service pointer are NULL
243 * hanlde is \ref PSA_NULL_HANDLE
244 * handle node does not be found
245 */
246static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800247 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800248{
Ken Liuf250b8b2019-12-27 16:31:24 +0800249 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800250 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800251 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800252
Summer Qin630c76b2020-05-20 10:32:58 +0800253 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800254}
255
256/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800257
258/**
259 * \brief Get the service context by signal.
260 *
261 * \param[in] partition Partition context pointer
262 * \ref spm_partition_desc_t structures
263 * \param[in] signal Signal associated with inputs to the Secure
264 * Partition, \ref psa_signal_t
265 *
266 * \retval NULL Failed
267 * \retval "Not NULL" Target service context pointer,
268 * \ref tfm_spm_service_t structures
269 */
270static struct tfm_spm_service_t *
Mingyang Sunf3d29892019-07-10 17:50:23 +0800271 tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition,
272 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800273{
274 struct tfm_list_node_t *node, *head;
275 struct tfm_spm_service_t *service;
276
Ken Liuf250b8b2019-12-27 16:31:24 +0800277 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800278
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800279 if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
Edison Ai9059ea02019-11-28 13:46:14 +0800280 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800281 }
282
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800283 head = &partition->runtime_data.service_list;
Edison Ai764d41f2018-09-21 15:56:36 +0800284 TFM_LIST_FOR_EACH(node, head) {
285 service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list);
Summer Qine578c5b2019-08-16 16:42:16 +0800286 if (service->service_db->signal == signal) {
Edison Ai764d41f2018-09-21 15:56:36 +0800287 return service;
288 }
289 }
290 return NULL;
291}
292
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800293/**
294 * \brief Returns the index of the partition with the given partition ID.
295 *
296 * \param[in] partition_id Partition id
297 *
298 * \return the partition idx if partition_id is valid,
299 * \ref SPM_INVALID_PARTITION_IDX othervise
300 */
301static uint32_t get_partition_idx(uint32_t partition_id)
302{
303 uint32_t i;
304
305 if (partition_id == INVALID_PARTITION_ID) {
306 return SPM_INVALID_PARTITION_IDX;
307 }
308
309 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
310 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
311 partition_id) {
312 return i;
313 }
314 }
315 return SPM_INVALID_PARTITION_IDX;
316}
317
318/**
319 * \brief Get the flags associated with a partition
320 *
321 * \param[in] partition_idx Partition index
322 *
323 * \return Flags associated with the partition
324 *
325 * \note This function doesn't check if partition_idx is valid.
326 */
327static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
328{
329 return g_spm_partition_db.partitions[partition_idx].static_data->
330 partition_flags;
331}
332
333#if TFM_LVL != 1
334/**
335 * \brief Change the privilege mode for partition thread mode.
336 *
337 * \param[in] privileged Privileged mode,
338 * \ref TFM_PARTITION_PRIVILEGED_MODE
339 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
340 *
341 * \note Barrier instructions are not called by this function, and if
342 * it is called in thread mode, it might be necessary to call
343 * them after this function returns.
344 */
345static void tfm_spm_partition_change_privilege(uint32_t privileged)
346{
347 CONTROL_Type ctrl;
348
349 ctrl.w = __get_CONTROL();
350
351 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
352 ctrl.b.nPRIV = 0;
353 } else {
354 ctrl.b.nPRIV = 1;
355 }
356
357 __set_CONTROL(ctrl.w);
358}
359#endif /* if(TFM_LVL != 1) */
360
361uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
362{
363 return g_spm_partition_db.partitions[partition_idx].static_data->
364 partition_id;
365}
366
367uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
368{
369 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
370 return TFM_PARTITION_PRIVILEGED_MODE;
371 } else {
372 return TFM_PARTITION_UNPRIVILEGED_MODE;
373 }
374}
375
376bool tfm_is_partition_privileged(uint32_t partition_idx)
377{
378 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
379
380 return tfm_spm_partition_get_privileged_mode(flags) ==
381 TFM_PARTITION_PRIVILEGED_MODE;
382}
383
Edison Ai764d41f2018-09-21 15:56:36 +0800384struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
385{
Summer Qin2fca1c82020-03-20 14:37:55 +0800386 uint32_t i, num;
Edison Ai764d41f2018-09-21 15:56:36 +0800387
Summer Qin2fca1c82020-03-20 14:37:55 +0800388 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
389 for (i = 0; i < num; i++) {
390 if (service[i].service_db->sid == sid) {
391 return &service[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800392 }
393 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800394
Edison Ai764d41f2018-09-21 15:56:36 +0800395 return NULL;
396}
397
Mingyang Sund44522a2020-01-16 16:48:37 +0800398/**
399 * \brief Get the partition context by partition ID.
400 *
401 * \param[in] partition_id Partition identity
402 *
403 * \retval NULL Failed
404 * \retval "Not NULL" Target partition context pointer,
405 * \ref spm_partition_desc_t structures
406 */
407static struct spm_partition_desc_t *
408 tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800409{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800410 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800411
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800412 if (idx != SPM_INVALID_PARTITION_IDX) {
413 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800414 }
415 return NULL;
416}
417
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800418struct spm_partition_desc_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800419{
Kevin Peng79c2bda2020-07-24 16:31:12 +0800420 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread();
421 struct spm_partition_desc_t *partition;
422 struct spm_partition_runtime_data_t *rt_data;
Edison Ai764d41f2018-09-21 15:56:36 +0800423
Kevin Peng79c2bda2020-07-24 16:31:12 +0800424 rt_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t,
425 sp_thrd);
426 partition = TFM_GET_CONTAINER_PTR(rt_data, struct spm_partition_desc_t,
427 runtime_data);
428 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800429}
430
431int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530432 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800433{
Ken Liuf250b8b2019-12-27 16:31:24 +0800434 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800435
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530436 switch (service->service_db->version_policy) {
Edison Ai764d41f2018-09-21 15:56:36 +0800437 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530438 if (version > service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800439 return IPC_ERROR_VERSION;
440 }
441 break;
442 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530443 if (version != service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800444 return IPC_ERROR_VERSION;
445 }
446 break;
447 default:
448 return IPC_ERROR_VERSION;
449 }
450 return IPC_SUCCESS;
451}
452
Edison Aie728fbf2019-11-13 09:37:12 +0800453int32_t tfm_spm_check_authorization(uint32_t sid,
454 struct tfm_spm_service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800455 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800456{
457 struct spm_partition_desc_t *partition = NULL;
458 int32_t i;
459
Ken Liuf250b8b2019-12-27 16:31:24 +0800460 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800461
462 if (ns_caller) {
463 if (!service->service_db->non_secure_client) {
464 return IPC_ERROR_GENERIC;
465 }
466 } else {
467 partition = tfm_spm_get_running_partition();
468 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800469 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800470 }
471
472 for (i = 0; i < partition->static_data->dependencies_num; i++) {
473 if (partition->static_data->p_dependencies[i] == sid) {
474 break;
475 }
476 }
477
478 if (i == partition->static_data->dependencies_num) {
479 return IPC_ERROR_GENERIC;
480 }
481 }
482 return IPC_SUCCESS;
483}
484
Edison Ai764d41f2018-09-21 15:56:36 +0800485/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800486
487/**
488 * \brief Get message context by message handle.
489 *
490 * \param[in] msg_handle Message handle which is a reference generated
491 * by the SPM to a specific message.
492 *
493 * \return The message body context pointer
494 * \ref tfm_msg_body_t structures
495 */
496static struct tfm_msg_body_t *
497 tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800498{
499 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200500 * The message handler passed by the caller is considered invalid in the
501 * following cases:
502 * 1. Not a valid message handle. (The address of a message is not the
503 * address of a possible handle from the pool
504 * 2. Handle not belongs to the caller partition (The handle is either
505 * unused, or owned by anither partition)
506 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800507 */
Ken Liu505b1702020-05-29 13:19:58 +0800508 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800509 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800510 struct tfm_conn_handle_t *p_conn_handle =
511 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200512
513 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800514 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800515 return NULL;
516 }
517
Ken Liu505b1702020-05-29 13:19:58 +0800518 p_msg = &p_conn_handle->internal_msg;
519
Edison Ai764d41f2018-09-21 15:56:36 +0800520 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200521 * Check that the magic number is correct. This proves that the message
522 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800523 */
Ken Liu505b1702020-05-29 13:19:58 +0800524 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800525 return NULL;
526 }
527
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200528 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800529 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liu505b1702020-05-29 13:19:58 +0800530 if (partition_id != p_msg->service->partition->static_data->partition_id) {
Edison Ai764d41f2018-09-21 15:56:36 +0800531 return NULL;
532 }
533
Ken Liu505b1702020-05-29 13:19:58 +0800534 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800535}
536
Edison Ai97115822019-08-01 14:22:19 +0800537struct tfm_msg_body_t *
Summer Qin630c76b2020-05-20 10:32:58 +0800538 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800539{
Summer Qin630c76b2020-05-20 10:32:58 +0800540 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai97115822019-08-01 14:22:19 +0800541
Summer Qin630c76b2020-05-20 10:32:58 +0800542 return &(conn_handle->internal_msg);
Edison Ai97115822019-08-01 14:22:19 +0800543}
544
545void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
546 struct tfm_spm_service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800547 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800548 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800549 psa_invec *invec, size_t in_len,
550 psa_outvec *outvec, size_t out_len,
551 psa_outvec *caller_outvec)
552{
Edison Ai764d41f2018-09-21 15:56:36 +0800553 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800554 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800555
Ken Liuf250b8b2019-12-27 16:31:24 +0800556 TFM_CORE_ASSERT(msg);
557 TFM_CORE_ASSERT(service);
558 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
559 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
560 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
561 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
562 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800563
Edison Ai764d41f2018-09-21 15:56:36 +0800564 /* Clear message buffer before using it */
Mingyang Sun94b1b412019-09-20 15:11:14 +0800565 tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800566
Ken Liu35f89392019-03-14 14:51:05 +0800567 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800568 msg->magic = TFM_MSG_MAGIC;
569 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800570 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800571 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800572
573 /* Copy contents */
574 msg->msg.type = type;
575
576 for (i = 0; i < in_len; i++) {
577 msg->msg.in_size[i] = invec[i].len;
578 msg->invec[i].base = invec[i].base;
579 }
580
581 for (i = 0; i < out_len; i++) {
582 msg->msg.out_size[i] = outvec[i].len;
583 msg->outvec[i].base = outvec[i].base;
584 /* Out len is used to record the writed number, set 0 here again */
585 msg->outvec[i].len = 0;
586 }
587
Ken Liu505b1702020-05-29 13:19:58 +0800588 /* Use the user connect handle as the message handle */
589 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800590
Ken Liu505b1702020-05-29 13:19:58 +0800591 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800592 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800593 if (conn_handle) {
594 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800595 }
David Hu46603dd2019-12-11 18:05:16 +0800596
597 /* Set the private data of NSPE client caller in multi-core topology */
598 if (TFM_CLIENT_ID_IS_NS(client_id)) {
599 tfm_rpc_set_caller_data(msg, client_id);
600 }
Edison Ai764d41f2018-09-21 15:56:36 +0800601}
602
603int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
604 struct tfm_msg_body_t *msg)
605{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800606 struct spm_partition_runtime_data_t *p_runtime_data =
607 &service->partition->runtime_data;
608
Ken Liuf250b8b2019-12-27 16:31:24 +0800609 TFM_CORE_ASSERT(service);
610 TFM_CORE_ASSERT(msg);
Edison Ai764d41f2018-09-21 15:56:36 +0800611
612 /* Enqueue message to service message queue */
613 if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) {
614 return IPC_ERROR_GENERIC;
615 }
616
617 /* Messages put. Update signals */
Summer Qine578c5b2019-08-16 16:42:16 +0800618 p_runtime_data->signals |= service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800619
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800620 tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals &
621 p_runtime_data->signal_mask));
Edison Ai764d41f2018-09-21 15:56:36 +0800622
David Hufb38d562019-09-23 15:58:34 +0800623 /*
624 * If it is a NS request via RPC, it is unnecessary to block current
625 * thread.
626 */
627 if (!is_tfm_rpc_msg(msg)) {
628 tfm_event_wait(&msg->ack_evnt);
629 }
Edison Ai764d41f2018-09-21 15:56:36 +0800630
631 return IPC_SUCCESS;
632}
633
Mingyang Sunf3d29892019-07-10 17:50:23 +0800634uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800635{
Edison Ai764d41f2018-09-21 15:56:36 +0800636 struct spm_partition_desc_t *partition;
637
Kevin Peng79c2bda2020-07-24 16:31:12 +0800638 partition = tfm_spm_get_running_partition();
639 if (partition && partition->static_data) {
640 return partition->static_data->partition_id;
641 } else {
642 return INVALID_PARTITION_ID;
643 }
Edison Ai764d41f2018-09-21 15:56:36 +0800644}
645
Summer Qin43c185d2019-10-10 15:44:42 +0800646int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800647 enum tfm_memory_access_e access,
648 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800649{
Hugues de Valon99578562019-06-18 16:08:51 +0100650 enum tfm_status_e err;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800651
652 /* If len is zero, this indicates an empty buffer and base is ignored */
653 if (len == 0) {
654 return IPC_SUCCESS;
655 }
656
657 if (!buffer) {
658 return IPC_ERROR_BAD_PARAMETERS;
659 }
660
661 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
662 return IPC_ERROR_MEMORY_CHECK;
663 }
664
Summer Qin424d4db2019-03-25 14:09:51 +0800665 if (access == TFM_MEMORY_ACCESS_RW) {
Summer Qineb537e52019-03-29 09:57:10 +0800666 err = tfm_core_has_write_access_to_region(buffer, len, ns_caller,
667 privileged);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800668 } else {
Summer Qineb537e52019-03-29 09:57:10 +0800669 err = tfm_core_has_read_access_to_region(buffer, len, ns_caller,
670 privileged);
Summer Qin424d4db2019-03-25 14:09:51 +0800671 }
Summer Qin0fc3f592019-04-11 16:00:10 +0800672 if (err == TFM_SUCCESS) {
Summer Qin424d4db2019-03-25 14:09:51 +0800673 return IPC_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800674 }
675
676 return IPC_ERROR_MEMORY_CHECK;
677}
678
Ken Liuce2692d2020-02-11 12:39:36 +0800679uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800680{
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800681 uint32_t i, j, num;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800682 struct spm_partition_desc_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800683 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100684 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Edison Ai764d41f2018-09-21 15:56:36 +0800685
686 tfm_pool_init(conn_handle_pool,
687 POOL_BUFFER_SIZE(conn_handle_pool),
688 sizeof(struct tfm_conn_handle_t),
689 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800690
691 /* Init partition first for it will be used when init service */
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200692 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800693 partition = &g_spm_partition_db.partitions[i];
Edison Aif0501702019-10-11 14:36:42 +0800694
Kevin Peng79c2bda2020-07-24 16:31:12 +0800695 if (!partition || !partition->memory_data || !partition->static_data) {
696 tfm_core_panic();
697 }
698
699 if (!(partition->static_data->partition_flags & SPM_PART_FLAG_IPC)) {
700 tfm_core_panic();
701 }
702
Edison Aif0501702019-10-11 14:36:42 +0800703 /* Check if the PSA framework version matches. */
704 if (partition->static_data->psa_framework_version !=
705 PSA_FRAMEWORK_VERSION) {
Kevin Peng79c2bda2020-07-24 16:31:12 +0800706 ERROR_MSG("Warning: PSA Framework Verison does not match!");
Edison Aif0501702019-10-11 14:36:42 +0800707 continue;
708 }
709
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100710 platform_data_p = partition->platform_data_list;
711 if (platform_data_p != NULL) {
712 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +0800713 if (tfm_spm_hal_configure_default_isolation(i,
714 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
715 tfm_core_panic();
716 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100717 ++platform_data_p;
718 }
719 }
720
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800721 /* Add PSA_DOORBELL signal to assigned_signals */
722 partition->runtime_data.assigned_signals |= PSA_DOORBELL;
723
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800724 /* TODO: This can be optimized by generating the assigned signal
725 * in code generation time.
726 */
727 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
728 if (tfm_core_irq_signals[j].partition_id ==
729 partition->static_data->partition_id) {
730 partition->runtime_data.assigned_signals |=
731 tfm_core_irq_signals[j].signal_value;
732 }
733 }
734
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800735 tfm_event_init(&partition->runtime_data.signal_evnt);
736 tfm_list_init(&partition->runtime_data.service_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800737
Kevin Peng79c2bda2020-07-24 16:31:12 +0800738 pth = &partition->runtime_data.sp_thrd;
Edison Ai764d41f2018-09-21 15:56:36 +0800739 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800740 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800741 }
742
Summer Qin66f1e032020-01-06 15:40:03 +0800743 tfm_core_thrd_init(pth,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800744 (tfm_core_thrd_entry_t)
745 partition->static_data->partition_init,
Summer Qin66f1e032020-01-06 15:40:03 +0800746 NULL,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800747 (uintptr_t)partition->memory_data->stack_top,
748 (uintptr_t)partition->memory_data->stack_bottom);
Edison Ai788bae22019-02-18 17:38:59 +0800749
Kevin Peng79c2bda2020-07-24 16:31:12 +0800750 pth->prior = partition->static_data->partition_priority;
Edison Ai764d41f2018-09-21 15:56:36 +0800751
Ken Liu490281d2019-12-30 15:55:26 +0800752 if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) {
753 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800754 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800755 }
756
Edison Ai764d41f2018-09-21 15:56:36 +0800757 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800758 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800759 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800760 }
761 }
762
763 /* Init Service */
Summer Qind99509f2019-08-02 17:36:58 +0800764 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
Edison Ai764d41f2018-09-21 15:56:36 +0800765 for (i = 0; i < num; i++) {
Summer Qine578c5b2019-08-16 16:42:16 +0800766 service[i].service_db = &service_db[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800767 partition =
Summer Qine578c5b2019-08-16 16:42:16 +0800768 tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800769 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800770 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800771 }
Summer Qind99509f2019-08-02 17:36:58 +0800772 service[i].partition = partition;
Jaykumar Pitambarbhai Patel0c7a0382020-01-09 15:25:58 +0530773 partition->runtime_data.assigned_signals |= service[i].service_db->signal;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800774
Summer Qind99509f2019-08-02 17:36:58 +0800775 tfm_list_init(&service[i].handle_list);
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800776 tfm_list_add_tail(&partition->runtime_data.service_list,
Summer Qind99509f2019-08-02 17:36:58 +0800777 &service[i].list);
Edison Ai764d41f2018-09-21 15:56:36 +0800778 }
779
Ken Liu483f5da2019-04-24 10:45:21 +0800780 /*
781 * All threads initialized, start the scheduler.
782 *
783 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800784 * It is worthy to give the thread object to scheduler if the background
785 * context belongs to one of the threads. Here the background thread is the
786 * initialization thread who calls SPM SVC, which re-uses the non-secure
787 * entry thread's stack. After SPM initialization is done, this stack is
788 * cleaned up and the background context is never going to return. Tell
789 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800790 */
Summer Qin66f1e032020-01-06 15:40:03 +0800791 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800792
Summer Qind2ad7e72020-01-06 18:16:35 +0800793 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800794}
Ken Liu2d175172019-03-21 17:08:41 +0800795
Summer Qind2ad7e72020-01-06 18:16:35 +0800796void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800797{
798#if TFM_LVL == 2
799 struct spm_partition_desc_t *p_next_partition;
Summer Qinb5da9cc2019-08-26 15:19:45 +0800800 struct spm_partition_runtime_data_t *r_data;
Ken Liu2d175172019-03-21 17:08:41 +0800801 uint32_t is_privileged;
802#endif
Summer Qin66f1e032020-01-06 15:40:03 +0800803 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread();
804 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread();
Ken Liu2d175172019-03-21 17:08:41 +0800805
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200806 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liu2d175172019-03-21 17:08:41 +0800807#if TFM_LVL == 2
Summer Qinb5da9cc2019-08-26 15:19:45 +0800808 r_data = TFM_GET_CONTAINER_PTR(pth_next,
809 struct spm_partition_runtime_data_t,
810 sp_thrd);
811 p_next_partition = TFM_GET_CONTAINER_PTR(r_data,
Ken Liu2d175172019-03-21 17:08:41 +0800812 struct spm_partition_desc_t,
Summer Qinb5da9cc2019-08-26 15:19:45 +0800813 runtime_data);
Ken Liu2d175172019-03-21 17:08:41 +0800814
Summer Qin423dbef2019-08-22 15:59:35 +0800815 if (p_next_partition->static_data->partition_flags &
Ken Liu2d175172019-03-21 17:08:41 +0800816 SPM_PART_FLAG_PSA_ROT) {
817 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
818 } else {
819 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
820 }
821
822 tfm_spm_partition_change_privilege(is_privileged);
823#endif
Mate Toth-Palc430b992019-05-09 21:01:14 +0200824
Summer Qind2ad7e72020-01-06 18:16:35 +0800825 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800826 }
David Hufb38d562019-09-23 15:58:34 +0800827
828 /*
829 * Handle pending mailbox message from NS in multi-core topology.
830 * Empty operation on single Armv8-M platform.
831 */
832 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800833}
Mingyang Sund44522a2020-01-16 16:48:37 +0800834
835/*********************** SPM functions for PSA Client APIs *******************/
836
837uint32_t tfm_spm_psa_framework_version(void)
838{
839 return tfm_spm_client_psa_framework_version();
840}
841
842uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller)
843{
844 uint32_t sid;
845
846 TFM_CORE_ASSERT(args != NULL);
847 sid = (uint32_t)args[0];
848
849 return tfm_spm_client_psa_version(sid, ns_caller);
850}
851
852psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller)
853{
854 uint32_t sid;
855 uint32_t version;
856
857 TFM_CORE_ASSERT(args != NULL);
858 sid = (uint32_t)args[0];
859 version = (uint32_t)args[1];
860
861 return tfm_spm_client_psa_connect(sid, version, ns_caller);
862}
863
864psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr)
865{
866 psa_handle_t handle;
867 psa_invec *inptr;
868 psa_outvec *outptr;
869 size_t in_num, out_num;
870 struct spm_partition_desc_t *partition = NULL;
871 uint32_t privileged;
872 int32_t type;
873 struct tfm_control_parameter_t ctrl_param;
874
875 TFM_CORE_ASSERT(args != NULL);
876 handle = (psa_handle_t)args[0];
877
878 partition = tfm_spm_get_running_partition();
879 if (!partition) {
880 tfm_core_panic();
881 }
882 privileged = tfm_spm_partition_get_privileged_mode(
883 partition->static_data->partition_flags);
884
885 /*
886 * Read parameters from the arguments. It is a fatal error if the
887 * memory reference for buffer is invalid or not readable.
888 */
889 if (tfm_memory_check((const void *)args[1],
890 sizeof(struct tfm_control_parameter_t), ns_caller,
891 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
892 tfm_core_panic();
893 }
894
895 tfm_core_util_memcpy(&ctrl_param,
896 (const void *)args[1],
897 sizeof(ctrl_param));
898
899 type = ctrl_param.type;
900 in_num = ctrl_param.in_len;
901 out_num = ctrl_param.out_len;
902 inptr = (psa_invec *)args[2];
903 outptr = (psa_outvec *)args[3];
904
905 /* The request type must be zero or positive. */
906 if (type < 0) {
907 tfm_core_panic();
908 }
909
910 return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num,
911 ns_caller, privileged);
912}
913
914void tfm_spm_psa_close(uint32_t *args, bool ns_caller)
915{
916 psa_handle_t handle;
917
918 TFM_CORE_ASSERT(args != NULL);
919 handle = args[0];
920
921 tfm_spm_client_psa_close(handle, ns_caller);
922}
923
924uint32_t tfm_spm_get_lifecycle_state(void)
925{
926 /*
927 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
928 * implemented in the future.
929 */
930 return PSA_LIFECYCLE_UNKNOWN;
931}
932
933/********************* SPM functions for PSA Service APIs ********************/
934
935psa_signal_t tfm_spm_psa_wait(uint32_t *args)
936{
937 psa_signal_t signal_mask;
938 uint32_t timeout;
939 struct spm_partition_desc_t *partition = NULL;
940
941 TFM_CORE_ASSERT(args != NULL);
942 signal_mask = (psa_signal_t)args[0];
943 timeout = args[1];
944
945 /*
946 * Timeout[30:0] are reserved for future use.
947 * SPM must ignore the value of RES.
948 */
949 timeout &= PSA_TIMEOUT_MASK;
950
951 partition = tfm_spm_get_running_partition();
952 if (!partition) {
953 tfm_core_panic();
954 }
955
956 /*
957 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
958 * signals.
959 */
960 if ((partition->runtime_data.assigned_signals & signal_mask) == 0) {
961 tfm_core_panic();
962 }
963
964 /*
965 * Expected signals are included in signal wait mask, ignored signals
966 * should not be set and affect caller thread state. Save this mask for
967 * further checking while signals are ready to be set.
968 */
969 partition->runtime_data.signal_mask = signal_mask;
970
971 /*
972 * tfm_event_wait() blocks the caller thread if no signals are available.
973 * In this case, the return value of this function is temporary set into
974 * runtime context. After new signal(s) are available, the return value
975 * is updated with the available signal(s) and blocked thread gets to run.
976 */
977 if (timeout == PSA_BLOCK &&
978 (partition->runtime_data.signals & signal_mask) == 0) {
979 tfm_event_wait(&partition->runtime_data.signal_evnt);
980 }
981
982 return partition->runtime_data.signals & signal_mask;
983}
984
985psa_status_t tfm_spm_psa_get(uint32_t *args)
986{
987 psa_signal_t signal;
988 psa_msg_t *msg = NULL;
989 struct tfm_spm_service_t *service = NULL;
990 struct tfm_msg_body_t *tmp_msg = NULL;
991 struct spm_partition_desc_t *partition = NULL;
992 uint32_t privileged;
993
994 TFM_CORE_ASSERT(args != NULL);
995 signal = (psa_signal_t)args[0];
996 msg = (psa_msg_t *)args[1];
997
998 /*
999 * Only one message could be retrieved every time for psa_get(). It is a
1000 * fatal error if the input signal has more than a signal bit set.
1001 */
Ken Liu410ada52020-01-08 11:37:27 +08001002 if (!tfm_is_one_bit_set(signal)) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001003 tfm_core_panic();
1004 }
1005
1006 partition = tfm_spm_get_running_partition();
1007 if (!partition) {
1008 tfm_core_panic();
1009 }
1010 privileged = tfm_spm_partition_get_privileged_mode(
1011 partition->static_data->partition_flags);
1012
1013 /*
1014 * Write the message to the service buffer. It is a fatal error if the
1015 * input msg pointer is not a valid memory reference or not read-write.
1016 */
1017 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
1018 privileged) != IPC_SUCCESS) {
1019 tfm_core_panic();
1020 }
1021
1022 /*
1023 * It is a fatal error if the caller call psa_get() when no message has
1024 * been set. The caller must call this function after an RoT Service signal
1025 * is returned by psa_wait().
1026 */
1027 if (partition->runtime_data.signals == 0) {
1028 tfm_core_panic();
1029 }
1030
1031 /*
1032 * It is a fatal error if the RoT Service signal is not currently asserted.
1033 */
1034 if ((partition->runtime_data.signals & signal) == 0) {
1035 tfm_core_panic();
1036 }
1037
1038 /*
1039 * Get RoT service by signal from partition. It is a fatal error if getting
1040 * failed, which means the input signal is not correspond to an RoT service.
1041 */
1042 service = tfm_spm_get_service_by_signal(partition, signal);
1043 if (!service) {
1044 tfm_core_panic();
1045 }
1046
1047 tmp_msg = tfm_msg_dequeue(&service->msg_queue);
1048 if (!tmp_msg) {
1049 return PSA_ERROR_DOES_NOT_EXIST;
1050 }
1051
Ken Liu505b1702020-05-29 13:19:58 +08001052 (TFM_GET_CONTAINER_PTR(tmp_msg,
1053 struct tfm_conn_handle_t,
1054 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001055
1056 tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
1057
1058 /*
1059 * There may be multiple messages for this RoT Service signal, do not clear
1060 * its mask until no remaining message.
1061 */
1062 if (tfm_msg_queue_is_empty(&service->msg_queue)) {
1063 partition->runtime_data.signals &= ~signal;
1064 }
1065
1066 return PSA_SUCCESS;
1067}
1068
1069void tfm_spm_psa_set_rhandle(uint32_t *args)
1070{
1071 psa_handle_t msg_handle;
1072 void *rhandle = NULL;
1073 struct tfm_msg_body_t *msg = NULL;
Ken Liu505b1702020-05-29 13:19:58 +08001074 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001075
1076 TFM_CORE_ASSERT(args != NULL);
1077 msg_handle = (psa_handle_t)args[0];
1078 rhandle = (void *)args[1];
1079
1080 /* It is a fatal error if message handle is invalid */
1081 msg = tfm_spm_get_msg_from_handle(msg_handle);
1082 if (!msg) {
1083 tfm_core_panic();
1084 }
1085
1086 msg->msg.rhandle = rhandle;
Ken Liu505b1702020-05-29 13:19:58 +08001087 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001088
1089 /* Store reverse handle for following client calls. */
Ken Liu505b1702020-05-29 13:19:58 +08001090 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001091}
1092
1093size_t tfm_spm_psa_read(uint32_t *args)
1094{
1095 psa_handle_t msg_handle;
1096 uint32_t invec_idx;
1097 void *buffer = NULL;
1098 size_t num_bytes;
1099 size_t bytes;
1100 struct tfm_msg_body_t *msg = NULL;
1101 uint32_t privileged;
1102 struct spm_partition_desc_t *partition = NULL;
1103
1104 TFM_CORE_ASSERT(args != NULL);
1105 msg_handle = (psa_handle_t)args[0];
1106 invec_idx = args[1];
1107 buffer = (void *)args[2];
1108 num_bytes = (size_t)args[3];
1109
1110 /* It is a fatal error if message handle is invalid */
1111 msg = tfm_spm_get_msg_from_handle(msg_handle);
1112 if (!msg) {
1113 tfm_core_panic();
1114 }
1115
1116 partition = msg->service->partition;
1117 privileged = tfm_spm_partition_get_privileged_mode(
1118 partition->static_data->partition_flags);
1119
1120 /*
1121 * It is a fatal error if message handle does not refer to a request
1122 * message
1123 */
1124 if (msg->msg.type < PSA_IPC_CALL) {
1125 tfm_core_panic();
1126 }
1127
1128 /*
1129 * It is a fatal error if invec_idx is equal to or greater than
1130 * PSA_MAX_IOVEC
1131 */
1132 if (invec_idx >= PSA_MAX_IOVEC) {
1133 tfm_core_panic();
1134 }
1135
1136 /* There was no remaining data in this input vector */
1137 if (msg->msg.in_size[invec_idx] == 0) {
1138 return 0;
1139 }
1140
1141 /*
1142 * Copy the client data to the service buffer. It is a fatal error
1143 * if the memory reference for buffer is invalid or not read-write.
1144 */
1145 if (tfm_memory_check(buffer, num_bytes, false,
1146 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
1147 tfm_core_panic();
1148 }
1149
1150 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
1151 msg->msg.in_size[invec_idx] : num_bytes;
1152
1153 tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes);
1154
1155 /* There maybe some remaining data */
1156 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
1157 msg->msg.in_size[invec_idx] -= bytes;
1158
1159 return bytes;
1160}
1161
1162size_t tfm_spm_psa_skip(uint32_t *args)
1163{
1164 psa_handle_t msg_handle;
1165 uint32_t invec_idx;
1166 size_t num_bytes;
1167 struct tfm_msg_body_t *msg = NULL;
1168
1169 TFM_CORE_ASSERT(args != NULL);
1170 msg_handle = (psa_handle_t)args[0];
1171 invec_idx = args[1];
1172 num_bytes = (size_t)args[2];
1173
1174 /* It is a fatal error if message handle is invalid */
1175 msg = tfm_spm_get_msg_from_handle(msg_handle);
1176 if (!msg) {
1177 tfm_core_panic();
1178 }
1179
1180 /*
1181 * It is a fatal error if message handle does not refer to a request
1182 * message
1183 */
1184 if (msg->msg.type < PSA_IPC_CALL) {
1185 tfm_core_panic();
1186 }
1187
1188 /*
1189 * It is a fatal error if invec_idx is equal to or greater than
1190 * PSA_MAX_IOVEC
1191 */
1192 if (invec_idx >= PSA_MAX_IOVEC) {
1193 tfm_core_panic();
1194 }
1195
1196 /* There was no remaining data in this input vector */
1197 if (msg->msg.in_size[invec_idx] == 0) {
1198 return 0;
1199 }
1200
1201 /*
1202 * If num_bytes is greater than the remaining size of the input vector then
1203 * the remaining size of the input vector is used.
1204 */
1205 if (num_bytes > msg->msg.in_size[invec_idx]) {
1206 num_bytes = msg->msg.in_size[invec_idx];
1207 }
1208
1209 /* There maybe some remaining data */
1210 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
1211 num_bytes;
1212 msg->msg.in_size[invec_idx] -= num_bytes;
1213
1214 return num_bytes;
1215}
1216
1217void tfm_spm_psa_write(uint32_t *args)
1218{
1219 psa_handle_t msg_handle;
1220 uint32_t outvec_idx;
1221 void *buffer = NULL;
1222 size_t num_bytes;
1223 struct tfm_msg_body_t *msg = NULL;
1224 uint32_t privileged;
1225 struct spm_partition_desc_t *partition = NULL;
1226
1227 TFM_CORE_ASSERT(args != NULL);
1228 msg_handle = (psa_handle_t)args[0];
1229 outvec_idx = args[1];
1230 buffer = (void *)args[2];
1231 num_bytes = (size_t)args[3];
1232
1233 /* It is a fatal error if message handle is invalid */
1234 msg = tfm_spm_get_msg_from_handle(msg_handle);
1235 if (!msg) {
1236 tfm_core_panic();
1237 }
1238
1239 partition = msg->service->partition;
1240 privileged = tfm_spm_partition_get_privileged_mode(
1241 partition->static_data->partition_flags);
1242
1243 /*
1244 * It is a fatal error if message handle does not refer to a request
1245 * message
1246 */
1247 if (msg->msg.type < PSA_IPC_CALL) {
1248 tfm_core_panic();
1249 }
1250
1251 /*
1252 * It is a fatal error if outvec_idx is equal to or greater than
1253 * PSA_MAX_IOVEC
1254 */
1255 if (outvec_idx >= PSA_MAX_IOVEC) {
1256 tfm_core_panic();
1257 }
1258
1259 /*
1260 * It is a fatal error if the call attempts to write data past the end of
1261 * the client output vector
1262 */
1263 if (num_bytes > msg->msg.out_size[outvec_idx] -
1264 msg->outvec[outvec_idx].len) {
1265 tfm_core_panic();
1266 }
1267
1268 /*
1269 * Copy the service buffer to client outvecs. It is a fatal error
1270 * if the memory reference for buffer is invalid or not readable.
1271 */
1272 if (tfm_memory_check(buffer, num_bytes, false,
1273 TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
1274 tfm_core_panic();
1275 }
1276
1277 tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base +
1278 msg->outvec[outvec_idx].len, buffer, num_bytes);
1279
1280 /* Update the write number */
1281 msg->outvec[outvec_idx].len += num_bytes;
1282}
1283
1284static void update_caller_outvec_len(struct tfm_msg_body_t *msg)
1285{
1286 uint32_t i;
1287
1288 /*
1289 * FixeMe: abstract these part into dedicated functions to avoid
1290 * accessing thread context in psa layer
1291 */
1292 /* If it is a NS request via RPC, the owner of this message is not set */
1293 if (!is_tfm_rpc_msg(msg)) {
1294 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
1295 }
1296
1297 for (i = 0; i < PSA_MAX_IOVEC; i++) {
1298 if (msg->msg.out_size[i] == 0) {
1299 continue;
1300 }
1301
1302 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
1303
1304 msg->caller_outvec[i].len = msg->outvec[i].len;
1305 }
1306}
1307
1308void tfm_spm_psa_reply(uint32_t *args)
1309{
1310 psa_handle_t msg_handle;
1311 psa_status_t status;
1312 struct tfm_spm_service_t *service = NULL;
1313 struct tfm_msg_body_t *msg = NULL;
1314 int32_t ret = PSA_SUCCESS;
Ken Liu505b1702020-05-29 13:19:58 +08001315 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001316
1317 TFM_CORE_ASSERT(args != NULL);
1318 msg_handle = (psa_handle_t)args[0];
1319 status = (psa_status_t)args[1];
1320
1321 /* It is a fatal error if message handle is invalid */
1322 msg = tfm_spm_get_msg_from_handle(msg_handle);
1323 if (!msg) {
1324 tfm_core_panic();
1325 }
1326
1327 /*
1328 * RoT Service information is needed in this function, stored it in message
1329 * body structure. Only two parameters are passed in this function: handle
1330 * and status, so it is useful and simply to do like this.
1331 */
1332 service = msg->service;
1333 if (!service) {
1334 tfm_core_panic();
1335 }
1336
1337 /*
1338 * Three type of message are passed in this function: CONNECTION, REQUEST,
1339 * DISCONNECTION. It needs to process differently for each type.
1340 */
Ken Liu505b1702020-05-29 13:19:58 +08001341 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001342 switch (msg->msg.type) {
1343 case PSA_IPC_CONNECT:
1344 /*
1345 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
1346 * input status is PSA_SUCCESS. Others return values are based on the
1347 * input status.
1348 */
1349 if (status == PSA_SUCCESS) {
Ken Liu505b1702020-05-29 13:19:58 +08001350 ret = msg_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001351 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
1352 /* Refuse the client connection, indicating a permanent error. */
Ken Liu505b1702020-05-29 13:19:58 +08001353 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001354 ret = PSA_ERROR_CONNECTION_REFUSED;
1355 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
1356 /* Fail the client connection, indicating a transient error. */
1357 ret = PSA_ERROR_CONNECTION_BUSY;
1358 } else {
1359 tfm_core_panic();
1360 }
1361 break;
1362 case PSA_IPC_DISCONNECT:
1363 /* Service handle is not used anymore */
Ken Liu505b1702020-05-29 13:19:58 +08001364 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001365
1366 /*
1367 * If the message type is PSA_IPC_DISCONNECT, then the status code is
1368 * ignored
1369 */
1370 break;
1371 default:
1372 if (msg->msg.type >= PSA_IPC_CALL) {
1373 /* Reply to a request message. Return values are based on status */
1374 ret = status;
1375 /*
1376 * The total number of bytes written to a single parameter must be
1377 * reported to the client by updating the len member of the
1378 * psa_outvec structure for the parameter before returning from
1379 * psa_call().
1380 */
1381 update_caller_outvec_len(msg);
1382 } else {
1383 tfm_core_panic();
1384 }
1385 }
1386
1387 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
1388 /*
1389 * If the source of the programmer error is a Secure Partition, the SPM
1390 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
1391 */
1392 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
Ken Liu505b1702020-05-29 13:19:58 +08001393 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
Mingyang Sund44522a2020-01-16 16:48:37 +08001394 } else {
1395 tfm_core_panic();
1396 }
1397 } else {
Ken Liu505b1702020-05-29 13:19:58 +08001398 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001399 }
1400
1401 if (is_tfm_rpc_msg(msg)) {
1402 tfm_rpc_client_call_reply(msg, ret);
1403 } else {
1404 tfm_event_wake(&msg->ack_evnt, ret);
1405 }
1406}
1407
1408/**
1409 * \brief notify the partition with the signal.
1410 *
1411 * \param[in] partition_id The ID of the partition to be notified.
1412 * \param[in] signal The signal that the partition is to be notified
1413 * with.
1414 *
1415 * \retval void Success.
1416 * \retval "Does not return" If partition_id is invalid.
1417 */
1418static void notify_with_signal(int32_t partition_id, psa_signal_t signal)
1419{
1420 struct spm_partition_desc_t *partition = NULL;
1421
1422 /*
1423 * The value of partition_id must be greater than zero as the target of
1424 * notification must be a Secure Partition, providing a Non-secure
1425 * Partition ID is a fatal error.
1426 */
1427 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
1428 tfm_core_panic();
1429 }
1430
1431 /*
1432 * It is a fatal error if partition_id does not correspond to a Secure
1433 * Partition.
1434 */
1435 partition = tfm_spm_get_partition_by_id(partition_id);
1436 if (!partition) {
1437 tfm_core_panic();
1438 }
1439
1440 partition->runtime_data.signals |= signal;
1441
1442 /*
1443 * The target partition may be blocked with waiting for signals after
1444 * called psa_wait(). Set the return value with the available signals
1445 * before wake it up with tfm_event_signal().
1446 */
1447 tfm_event_wake(&partition->runtime_data.signal_evnt,
1448 partition->runtime_data.signals &
1449 partition->runtime_data.signal_mask);
1450}
1451
1452void tfm_spm_psa_notify(uint32_t *args)
1453{
1454 int32_t partition_id;
1455
1456 TFM_CORE_ASSERT(args != NULL);
1457 partition_id = (int32_t)args[0];
1458
1459 notify_with_signal(partition_id, PSA_DOORBELL);
1460}
1461
1462/**
1463 * \brief assert signal for a given IRQ line.
1464 *
1465 * \param[in] partition_id The ID of the partition which handles this IRQ
1466 * \param[in] signal The signal associated with this IRQ
1467 * \param[in] irq_line The number of the IRQ line
1468 *
1469 * \retval void Success.
1470 * \retval "Does not return" Partition ID is invalid
1471 */
1472void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001473 IRQn_Type irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001474{
1475 tfm_spm_hal_disable_irq(irq_line);
1476 notify_with_signal(partition_id, signal);
1477}
1478
1479void tfm_spm_psa_clear(void)
1480{
1481 struct spm_partition_desc_t *partition = NULL;
1482
1483 partition = tfm_spm_get_running_partition();
1484 if (!partition) {
1485 tfm_core_panic();
1486 }
1487
1488 /*
1489 * It is a fatal error if the Secure Partition's doorbell signal is not
1490 * currently asserted.
1491 */
1492 if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) {
1493 tfm_core_panic();
1494 }
1495 partition->runtime_data.signals &= ~PSA_DOORBELL;
1496}
1497
1498void tfm_spm_psa_panic(void)
1499{
1500 /*
1501 * PSA FF recommends that the SPM causes the system to restart when a secure
1502 * partition panics.
1503 */
1504 tfm_spm_hal_system_reset();
1505}
1506
1507/**
1508 * \brief Return the IRQ line number associated with a signal
1509 *
1510 * \param[in] partition_id The ID of the partition in which we look for
1511 * the signal.
1512 * \param[in] signal The signal we do the query for.
1513 * \param[out] irq_line The irq line associated with signal
1514 *
1515 * \retval IPC_SUCCESS Execution successful, irq_line contains a valid
1516 * value.
1517 * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the
1518 * signal. irq_line is unchanged.
1519 */
1520static int32_t get_irq_line_for_signal(int32_t partition_id,
1521 psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001522 IRQn_Type *irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001523{
1524 size_t i;
1525
1526 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1527 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1528 tfm_core_irq_signals[i].signal_value == signal) {
1529 *irq_line = tfm_core_irq_signals[i].irq_line;
1530 return IPC_SUCCESS;
1531 }
1532 }
1533 return IPC_ERROR_GENERIC;
1534}
1535
1536void tfm_spm_psa_eoi(uint32_t *args)
1537{
1538 psa_signal_t irq_signal;
TTornblomfaf74f52020-03-04 17:56:27 +01001539 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001540 int32_t ret;
1541 struct spm_partition_desc_t *partition = NULL;
1542
1543 TFM_CORE_ASSERT(args != NULL);
1544 irq_signal = (psa_signal_t)args[0];
1545
1546 /* It is a fatal error if passed signal indicates more than one signals. */
1547 if (!tfm_is_one_bit_set(irq_signal)) {
1548 tfm_core_panic();
1549 }
1550
1551 partition = tfm_spm_get_running_partition();
1552 if (!partition) {
1553 tfm_core_panic();
1554 }
1555
1556 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1557 irq_signal, &irq_line);
1558 /* It is a fatal error if passed signal is not an interrupt signal. */
1559 if (ret != IPC_SUCCESS) {
1560 tfm_core_panic();
1561 }
1562
1563 /* It is a fatal error if passed signal is not currently asserted */
1564 if ((partition->runtime_data.signals & irq_signal) == 0) {
1565 tfm_core_panic();
1566 }
1567
1568 partition->runtime_data.signals &= ~irq_signal;
1569
1570 tfm_spm_hal_clear_pending_irq(irq_line);
1571 tfm_spm_hal_enable_irq(irq_line);
1572}
1573
1574void tfm_spm_enable_irq(uint32_t *args)
1575{
1576 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1577 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001578 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001579 int32_t ret;
1580 struct spm_partition_desc_t *partition = NULL;
1581
1582 /* It is a fatal error if passed signal indicates more than one signals. */
1583 if (!tfm_is_one_bit_set(irq_signal)) {
1584 tfm_core_panic();
1585 }
1586
1587 partition = tfm_spm_get_running_partition();
1588 if (!partition) {
1589 tfm_core_panic();
1590 }
1591
1592 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1593 irq_signal, &irq_line);
1594 /* It is a fatal error if passed signal is not an interrupt signal. */
1595 if (ret != IPC_SUCCESS) {
1596 tfm_core_panic();
1597 }
1598
1599 tfm_spm_hal_enable_irq(irq_line);
1600}
1601
1602void tfm_spm_disable_irq(uint32_t *args)
1603{
1604 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1605 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001606 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001607 int32_t ret;
1608 struct spm_partition_desc_t *partition = NULL;
1609
1610 /* It is a fatal error if passed signal indicates more than one signals. */
1611 if (!tfm_is_one_bit_set(irq_signal)) {
1612 tfm_core_panic();
1613 }
1614
1615 partition = tfm_spm_get_running_partition();
1616 if (!partition) {
1617 tfm_core_panic();
1618 }
1619
1620 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1621 irq_signal, &irq_line);
1622 /* It is a fatal error if passed signal is not an interrupt signal. */
1623 if (ret != IPC_SUCCESS) {
1624 tfm_core_panic();
1625 }
1626
1627 tfm_spm_hal_disable_irq(irq_line);
1628}
1629
1630void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp,
1631 uint32_t *p_ctx, uint32_t exc_return,
1632 bool ns_caller)
1633{
1634 uintptr_t stacked_ctx_pos;
1635
1636 if (ns_caller) {
1637 /*
1638 * The background IRQ can't be supported, since if SP is executing,
1639 * the preempted context of SP can be different with the one who
1640 * preempts veneer.
1641 */
1642 if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) {
1643 tfm_core_panic();
1644 }
1645
1646 /*
1647 * It is non-secure caller, check if veneer stack contains
1648 * multiple contexts.
1649 */
1650 stacked_ctx_pos = (uintptr_t)p_ctx +
1651 sizeof(struct tfm_state_context_t) +
1652 TFM_VENEER_STACK_GUARD_SIZE;
1653
1654 if (is_stack_alloc_fp_space(exc_return)) {
1655#if defined (__FPU_USED) && (__FPU_USED == 1U)
1656 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1657 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1658 sizeof(uint32_t);
1659 }
1660#endif
1661 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1662 }
1663
1664 if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) {
1665 tfm_core_panic();
1666 }
1667 } else if (p_cur_sp->static_data->partition_id <= 0) {
1668 tfm_core_panic();
1669 }
1670}
Summer Qin830c5542020-02-14 13:44:20 +08001671
1672void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1673{
1674 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1675 uint32_t running_partition_flags = 0;
1676 const struct spm_partition_desc_t *partition = NULL;
1677
1678 /* Check permissions on request type basis */
1679
1680 switch (svc_ctx->r0) {
1681 case TFM_SPM_REQUEST_RESET_VOTE:
1682 partition = tfm_spm_get_running_partition();
1683 if (!partition) {
1684 tfm_core_panic();
1685 }
1686 running_partition_flags = partition->static_data->partition_flags;
1687
1688 /* Currently only PSA Root of Trust services are allowed to make Reset
1689 * vote request
1690 */
1691 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1692 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1693 }
1694
1695 /* FixMe: this is a placeholder for checks to be performed before
1696 * allowing execution of reset
1697 */
1698 *res_ptr = (uint32_t)TFM_SUCCESS;
1699
1700 break;
1701 default:
1702 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1703 }
1704}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001705
1706enum spm_err_t tfm_spm_db_init(void)
1707{
1708 uint32_t i;
1709
1710 /* This function initialises partition db */
1711
1712 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1713 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1714 g_spm_partition_db.partitions[i].platform_data_list =
1715 platform_data_list_list[i];
1716 g_spm_partition_db.partitions[i].memory_data = &memory_data_list[i];
1717 }
1718 g_spm_partition_db.is_init = 1;
1719
1720 return SPM_ERR_OK;
1721}