blob: a477a86374bd7ae4799357d99114f0145acf7404 [file] [log] [blame]
Edison Ai764d41f2018-09-21 15:56:36 +08001/*
Ken Liu5248af22019-12-29 12:47:13 +08002 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Edison Ai764d41f2018-09-21 15:56:36 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
Mingyang Sunda01a972019-07-12 17:32:59 +08007
Edison Ai764d41f2018-09-21 15:56:36 +08008#include <inttypes.h>
9#include <stdbool.h>
Jamie Foxcc31d402019-01-28 17:13:52 +000010#include "psa/client.h"
11#include "psa/service.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080012#include "psa/lifecycle.h"
13#include "tfm_thread.h"
Edison Ai764d41f2018-09-21 15:56:36 +080014#include "tfm_wait.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080015#include "utilities.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080016#include "tfm_internal_defines.h"
Edison Ai764d41f2018-09-21 15:56:36 +080017#include "tfm_message_queue.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080018#include "tfm_spm_hal.h"
19#include "tfm_irq_list.h"
20#include "tfm_api.h"
21#include "tfm_secure_api.h"
22#include "tfm_memory_utils.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080023#include "spm_ipc.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080024#include "tfm_peripherals_def.h"
Mingyang Sunc3123ec2020-06-11 17:43:58 +080025#include "spm_db.h"
Mingyang Sund44522a2020-01-16 16:48:37 +080026#include "tfm_core_utils.h"
27#include "spm_psa_client_call.h"
28#include "tfm_rpc.h"
29#include "tfm_internal.h"
30#include "tfm_core_trustzone.h"
31#include "tfm_core_mem_check.h"
Edison Ai764d41f2018-09-21 15:56:36 +080032#include "tfm_list.h"
33#include "tfm_pools.h"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080034#include "region.h"
Summer Qin2bfd2a02018-09-26 17:10:41 +080035#include "region_defs.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080036#include "spm_partition_defs.h"
37#include "psa_manifest/pid.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080038#include "tfm/tfm_spm_services.h"
Edison Ai764d41f2018-09-21 15:56:36 +080039
Ken Liu1f345b02020-05-30 21:11:05 +080040#include "secure_fw/partitions/tfm_service_list.inc"
Mingyang Sunbd7ceb52020-06-11 16:53:03 +080041#include "tfm_spm_db_ipc.inc"
Summer Qind99509f2019-08-02 17:36:58 +080042
43/* Extern service variable */
44extern struct tfm_spm_service_t service[];
Summer Qine578c5b2019-08-16 16:42:16 +080045extern const struct tfm_spm_service_db_t service_db[];
Summer Qind99509f2019-08-02 17:36:58 +080046
Edison Ai764d41f2018-09-21 15:56:36 +080047/* Pools */
48TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t),
49 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +080050
Mingyang Sund44522a2020-01-16 16:48:37 +080051void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +010052 IRQn_Type irq_line);
Mingyang Sund44522a2020-01-16 16:48:37 +080053
54#include "tfm_secure_irq_handlers_ipc.inc"
Edison Ai764d41f2018-09-21 15:56:36 +080055
Summer Qin373feb12020-03-27 15:35:33 +080056/*********************** Connection handle conversion APIs *******************/
57
58/* Set a minimal value here for feature expansion. */
59#define CLIENT_HANDLE_VALUE_MIN 32
60
61#define CONVERSION_FACTOR_BITOFFSET 3
62#define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET)
63/* Set 32 as the maximum */
64#define CONVERSION_FACTOR_VALUE_MAX 0x20
65
66#if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX
67#error "CONVERSION FACTOR OUT OF RANGE"
68#endif
69
70static uint32_t loop_index;
71
72/*
73 * A handle instance psa_handle_t allocated inside SPM is actually a memory
74 * address among the handle pool. Return this handle to the client directly
75 * exposes information of secure memory address. In this case, converting the
76 * handle into another value does not represent the memory address to avoid
77 * exposing secure memory directly to clients.
78 *
79 * This function converts the handle instance into another value by scaling the
80 * handle in pool offset, the converted value is named as a user handle.
81 *
82 * The formula:
83 * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE +
84 * CLIENT_HANDLE_VALUE_MIN + loop_index
85 * where:
86 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
87 * exceed CONVERSION_FACTOR_VALUE_MAX.
88 *
89 * handle_instance in RANGE[POOL_START, POOL_END]
90 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
91 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
92 *
93 * note:
94 * loop_index is used to promise same handle instance is converted into
95 * different user handles in short time.
96 */
Ken Liu505b1702020-05-29 13:19:58 +080097psa_handle_t tfm_spm_to_user_handle(struct tfm_conn_handle_t *handle_instance)
Summer Qin373feb12020-03-27 15:35:33 +080098{
99 psa_handle_t user_handle;
100
101 loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE;
102 user_handle = (psa_handle_t)((((uintptr_t)handle_instance -
103 (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) +
104 CLIENT_HANDLE_VALUE_MIN + loop_index);
105
106 return user_handle;
107}
108
109/*
110 * This function converts a user handle into a corresponded handle instance.
111 * The converted value is validated before returning, an invalid handle instance
112 * is returned as NULL.
113 *
114 * The formula:
115 * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) /
116 * CONVERSION_FACTOR_VALUE) + POOL_START
117 * where:
118 * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not
119 * exceed CONVERSION_FACTOR_VALUE_MAX.
120 *
121 * handle_instance in RANGE[POOL_START, POOL_END]
122 * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF]
123 * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1]
124 */
125struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle)
126{
127 struct tfm_conn_handle_t *handle_instance;
128
129 if (user_handle == PSA_NULL_HANDLE) {
130 return NULL;
131 }
132
133 handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle -
134 CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) +
135 (uintptr_t)conn_handle_pool);
136
137 return handle_instance;
138}
139
Edison Ai764d41f2018-09-21 15:56:36 +0800140/* Service handle management functions */
Summer Qin630c76b2020-05-20 10:32:58 +0800141struct tfm_conn_handle_t *tfm_spm_create_conn_handle(
142 struct tfm_spm_service_t *service,
Summer Qin1ce712a2019-10-14 18:04:05 +0800143 int32_t client_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800144{
Edison Ai9cc26242019-08-06 11:28:04 +0800145 struct tfm_conn_handle_t *p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800146
Ken Liuf250b8b2019-12-27 16:31:24 +0800147 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800148
149 /* Get buffer for handle list structure from handle pool */
Edison Ai9cc26242019-08-06 11:28:04 +0800150 p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool);
151 if (!p_handle) {
Summer Qin630c76b2020-05-20 10:32:58 +0800152 return NULL;
Edison Ai764d41f2018-09-21 15:56:36 +0800153 }
154
Edison Ai9cc26242019-08-06 11:28:04 +0800155 p_handle->service = service;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800156 p_handle->status = TFM_HANDLE_STATUS_IDLE;
Summer Qin1ce712a2019-10-14 18:04:05 +0800157 p_handle->client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800158
159 /* Add handle node to list for next psa functions */
Edison Ai9cc26242019-08-06 11:28:04 +0800160 tfm_list_add_tail(&service->handle_list, &p_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800161
Summer Qin630c76b2020-05-20 10:32:58 +0800162 return p_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800163}
164
Summer Qin630c76b2020-05-20 10:32:58 +0800165int32_t tfm_spm_validate_conn_handle(
166 const struct tfm_conn_handle_t *conn_handle,
167 int32_t client_id)
Summer Qin1ce712a2019-10-14 18:04:05 +0800168{
169 /* Check the handle address is validated */
170 if (is_valid_chunk_data_in_pool(conn_handle_pool,
171 (uint8_t *)conn_handle) != true) {
172 return IPC_ERROR_GENERIC;
173 }
174
175 /* Check the handle caller is correct */
Summer Qin630c76b2020-05-20 10:32:58 +0800176 if (conn_handle->client_id != client_id) {
Summer Qin1ce712a2019-10-14 18:04:05 +0800177 return IPC_ERROR_GENERIC;
178 }
179
180 return IPC_SUCCESS;
181}
182
Mingyang Sund44522a2020-01-16 16:48:37 +0800183/**
184 * \brief Free connection handle which not used anymore.
185 *
186 * \param[in] service Target service context pointer
187 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800188 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800189 *
190 * \retval IPC_SUCCESS Success
191 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
192 * \retval "Does not return" Panic for not find service by handle
193 */
194static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800195 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800196{
Ken Liuf250b8b2019-12-27 16:31:24 +0800197 TFM_CORE_ASSERT(service);
Summer Qin630c76b2020-05-20 10:32:58 +0800198 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800199
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200200 /* Clear magic as the handler is not used anymore */
Summer Qin630c76b2020-05-20 10:32:58 +0800201 conn_handle->internal_msg.magic = 0;
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200202
Edison Ai764d41f2018-09-21 15:56:36 +0800203 /* Remove node from handle list */
Summer Qin630c76b2020-05-20 10:32:58 +0800204 tfm_list_del_node(&conn_handle->list);
Edison Ai764d41f2018-09-21 15:56:36 +0800205
206 /* Back handle buffer to pool */
Summer Qin630c76b2020-05-20 10:32:58 +0800207 tfm_pool_free(conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800208 return IPC_SUCCESS;
209}
210
Mingyang Sund44522a2020-01-16 16:48:37 +0800211/**
212 * \brief Set reverse handle value for connection.
213 *
214 * \param[in] service Target service context pointer
215 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800216 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800217 * \param[in] rhandle rhandle need to save
218 *
219 * \retval IPC_SUCCESS Success
220 * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input
221 * \retval "Does not return" Panic for not find handle node
222 */
223static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800224 struct tfm_conn_handle_t *conn_handle,
Mingyang Sund44522a2020-01-16 16:48:37 +0800225 void *rhandle)
Edison Ai764d41f2018-09-21 15:56:36 +0800226{
Ken Liuf250b8b2019-12-27 16:31:24 +0800227 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800228 /* Set reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800229 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800230
Summer Qin630c76b2020-05-20 10:32:58 +0800231 conn_handle->rhandle = rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800232 return IPC_SUCCESS;
233}
234
Mingyang Sund44522a2020-01-16 16:48:37 +0800235/**
236 * \brief Get reverse handle value from connection hanlde.
237 *
238 * \param[in] service Target service context pointer
239 * \param[in] conn_handle Connection handle created by
Summer Qin630c76b2020-05-20 10:32:58 +0800240 * tfm_spm_create_conn_handle()
Mingyang Sund44522a2020-01-16 16:48:37 +0800241 *
242 * \retval void * Success
243 * \retval "Does not return" Panic for those:
244 * service pointer are NULL
245 * hanlde is \ref PSA_NULL_HANDLE
246 * handle node does not be found
247 */
248static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service,
Summer Qin630c76b2020-05-20 10:32:58 +0800249 struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800250{
Ken Liuf250b8b2019-12-27 16:31:24 +0800251 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800252 /* Get reverse handle value only be allowed for a connected handle */
Summer Qin630c76b2020-05-20 10:32:58 +0800253 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai764d41f2018-09-21 15:56:36 +0800254
Summer Qin630c76b2020-05-20 10:32:58 +0800255 return conn_handle->rhandle;
Edison Ai764d41f2018-09-21 15:56:36 +0800256}
257
258/* Partition management functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800259
260/**
261 * \brief Get the service context by signal.
262 *
263 * \param[in] partition Partition context pointer
264 * \ref spm_partition_desc_t structures
265 * \param[in] signal Signal associated with inputs to the Secure
266 * Partition, \ref psa_signal_t
267 *
268 * \retval NULL Failed
269 * \retval "Not NULL" Target service context pointer,
270 * \ref tfm_spm_service_t structures
271 */
272static struct tfm_spm_service_t *
Mingyang Sunf3d29892019-07-10 17:50:23 +0800273 tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition,
274 psa_signal_t signal)
Edison Ai764d41f2018-09-21 15:56:36 +0800275{
276 struct tfm_list_node_t *node, *head;
277 struct tfm_spm_service_t *service;
278
Ken Liuf250b8b2019-12-27 16:31:24 +0800279 TFM_CORE_ASSERT(partition);
Edison Ai764d41f2018-09-21 15:56:36 +0800280
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800281 if (tfm_list_is_empty(&partition->runtime_data.service_list)) {
Edison Ai9059ea02019-11-28 13:46:14 +0800282 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800283 }
284
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800285 head = &partition->runtime_data.service_list;
Edison Ai764d41f2018-09-21 15:56:36 +0800286 TFM_LIST_FOR_EACH(node, head) {
287 service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list);
Summer Qine578c5b2019-08-16 16:42:16 +0800288 if (service->service_db->signal == signal) {
Edison Ai764d41f2018-09-21 15:56:36 +0800289 return service;
290 }
291 }
292 return NULL;
293}
294
Mingyang Sunda30f1e2020-07-13 17:20:32 +0800295/**
296 * \brief Returns the index of the partition with the given partition ID.
297 *
298 * \param[in] partition_id Partition id
299 *
300 * \return the partition idx if partition_id is valid,
301 * \ref SPM_INVALID_PARTITION_IDX othervise
302 */
303static uint32_t get_partition_idx(uint32_t partition_id)
304{
305 uint32_t i;
306
307 if (partition_id == INVALID_PARTITION_ID) {
308 return SPM_INVALID_PARTITION_IDX;
309 }
310
311 for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
312 if (g_spm_partition_db.partitions[i].static_data->partition_id ==
313 partition_id) {
314 return i;
315 }
316 }
317 return SPM_INVALID_PARTITION_IDX;
318}
319
320/**
321 * \brief Get the flags associated with a partition
322 *
323 * \param[in] partition_idx Partition index
324 *
325 * \return Flags associated with the partition
326 *
327 * \note This function doesn't check if partition_idx is valid.
328 */
329static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
330{
331 return g_spm_partition_db.partitions[partition_idx].static_data->
332 partition_flags;
333}
334
335#if TFM_LVL != 1
336/**
337 * \brief Change the privilege mode for partition thread mode.
338 *
339 * \param[in] privileged Privileged mode,
340 * \ref TFM_PARTITION_PRIVILEGED_MODE
341 * and \ref TFM_PARTITION_UNPRIVILEGED_MODE
342 *
343 * \note Barrier instructions are not called by this function, and if
344 * it is called in thread mode, it might be necessary to call
345 * them after this function returns.
346 */
347static void tfm_spm_partition_change_privilege(uint32_t privileged)
348{
349 CONTROL_Type ctrl;
350
351 ctrl.w = __get_CONTROL();
352
353 if (privileged == TFM_PARTITION_PRIVILEGED_MODE) {
354 ctrl.b.nPRIV = 0;
355 } else {
356 ctrl.b.nPRIV = 1;
357 }
358
359 __set_CONTROL(ctrl.w);
360}
361#endif /* if(TFM_LVL != 1) */
362
363uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
364{
365 return g_spm_partition_db.partitions[partition_idx].static_data->
366 partition_id;
367}
368
369uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
370{
371 if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
372 return TFM_PARTITION_PRIVILEGED_MODE;
373 } else {
374 return TFM_PARTITION_UNPRIVILEGED_MODE;
375 }
376}
377
378bool tfm_is_partition_privileged(uint32_t partition_idx)
379{
380 uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
381
382 return tfm_spm_partition_get_privileged_mode(flags) ==
383 TFM_PARTITION_PRIVILEGED_MODE;
384}
385
Edison Ai764d41f2018-09-21 15:56:36 +0800386struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid)
387{
Summer Qin2fca1c82020-03-20 14:37:55 +0800388 uint32_t i, num;
Edison Ai764d41f2018-09-21 15:56:36 +0800389
Summer Qin2fca1c82020-03-20 14:37:55 +0800390 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
391 for (i = 0; i < num; i++) {
392 if (service[i].service_db->sid == sid) {
393 return &service[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800394 }
395 }
Summer Qin2fca1c82020-03-20 14:37:55 +0800396
Edison Ai764d41f2018-09-21 15:56:36 +0800397 return NULL;
398}
399
Mingyang Sund44522a2020-01-16 16:48:37 +0800400/**
401 * \brief Get the partition context by partition ID.
402 *
403 * \param[in] partition_id Partition identity
404 *
405 * \retval NULL Failed
406 * \retval "Not NULL" Target partition context pointer,
407 * \ref spm_partition_desc_t structures
408 */
409static struct spm_partition_desc_t *
410 tfm_spm_get_partition_by_id(int32_t partition_id)
Edison Ai764d41f2018-09-21 15:56:36 +0800411{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800412 uint32_t idx = get_partition_idx(partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800413
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800414 if (idx != SPM_INVALID_PARTITION_IDX) {
415 return &(g_spm_partition_db.partitions[idx]);
Edison Ai764d41f2018-09-21 15:56:36 +0800416 }
417 return NULL;
418}
419
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800420struct spm_partition_desc_t *tfm_spm_get_running_partition(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800421{
Kevin Peng79c2bda2020-07-24 16:31:12 +0800422 struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread();
423 struct spm_partition_desc_t *partition;
424 struct spm_partition_runtime_data_t *rt_data;
Edison Ai764d41f2018-09-21 15:56:36 +0800425
Kevin Peng79c2bda2020-07-24 16:31:12 +0800426 rt_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t,
427 sp_thrd);
428 partition = TFM_GET_CONTAINER_PTR(rt_data, struct spm_partition_desc_t,
429 runtime_data);
430 return partition;
Edison Ai764d41f2018-09-21 15:56:36 +0800431}
432
433int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service,
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530434 uint32_t version)
Edison Ai764d41f2018-09-21 15:56:36 +0800435{
Ken Liuf250b8b2019-12-27 16:31:24 +0800436 TFM_CORE_ASSERT(service);
Edison Ai764d41f2018-09-21 15:56:36 +0800437
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530438 switch (service->service_db->version_policy) {
Edison Ai764d41f2018-09-21 15:56:36 +0800439 case TFM_VERSION_POLICY_RELAXED:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530440 if (version > service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800441 return IPC_ERROR_VERSION;
442 }
443 break;
444 case TFM_VERSION_POLICY_STRICT:
Jaykumar Pitambarbhai Patel3a986022019-10-08 17:37:15 +0530445 if (version != service->service_db->version) {
Edison Ai764d41f2018-09-21 15:56:36 +0800446 return IPC_ERROR_VERSION;
447 }
448 break;
449 default:
450 return IPC_ERROR_VERSION;
451 }
452 return IPC_SUCCESS;
453}
454
Edison Aie728fbf2019-11-13 09:37:12 +0800455int32_t tfm_spm_check_authorization(uint32_t sid,
456 struct tfm_spm_service_t *service,
Summer Qin618e8c32019-12-09 10:47:20 +0800457 bool ns_caller)
Edison Aie728fbf2019-11-13 09:37:12 +0800458{
459 struct spm_partition_desc_t *partition = NULL;
460 int32_t i;
461
Ken Liuf250b8b2019-12-27 16:31:24 +0800462 TFM_CORE_ASSERT(service);
Edison Aie728fbf2019-11-13 09:37:12 +0800463
464 if (ns_caller) {
465 if (!service->service_db->non_secure_client) {
466 return IPC_ERROR_GENERIC;
467 }
468 } else {
469 partition = tfm_spm_get_running_partition();
470 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800471 tfm_core_panic();
Edison Aie728fbf2019-11-13 09:37:12 +0800472 }
473
474 for (i = 0; i < partition->static_data->dependencies_num; i++) {
475 if (partition->static_data->p_dependencies[i] == sid) {
476 break;
477 }
478 }
479
480 if (i == partition->static_data->dependencies_num) {
481 return IPC_ERROR_GENERIC;
482 }
483 }
484 return IPC_SUCCESS;
485}
486
Edison Ai764d41f2018-09-21 15:56:36 +0800487/* Message functions */
Mingyang Sund44522a2020-01-16 16:48:37 +0800488
489/**
490 * \brief Get message context by message handle.
491 *
492 * \param[in] msg_handle Message handle which is a reference generated
493 * by the SPM to a specific message.
494 *
495 * \return The message body context pointer
496 * \ref tfm_msg_body_t structures
497 */
498static struct tfm_msg_body_t *
499 tfm_spm_get_msg_from_handle(psa_handle_t msg_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800500{
501 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200502 * The message handler passed by the caller is considered invalid in the
503 * following cases:
504 * 1. Not a valid message handle. (The address of a message is not the
505 * address of a possible handle from the pool
506 * 2. Handle not belongs to the caller partition (The handle is either
507 * unused, or owned by anither partition)
508 * Check the conditions above
Edison Ai764d41f2018-09-21 15:56:36 +0800509 */
Ken Liu505b1702020-05-29 13:19:58 +0800510 struct tfm_msg_body_t *p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800511 uint32_t partition_id;
Ken Liu505b1702020-05-29 13:19:58 +0800512 struct tfm_conn_handle_t *p_conn_handle =
513 tfm_spm_to_handle_instance(msg_handle);
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200514
515 if (is_valid_chunk_data_in_pool(
Ken Liu505b1702020-05-29 13:19:58 +0800516 conn_handle_pool, (uint8_t *)p_conn_handle) != 1) {
Edison Ai764d41f2018-09-21 15:56:36 +0800517 return NULL;
518 }
519
Ken Liu505b1702020-05-29 13:19:58 +0800520 p_msg = &p_conn_handle->internal_msg;
521
Edison Ai764d41f2018-09-21 15:56:36 +0800522 /*
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200523 * Check that the magic number is correct. This proves that the message
524 * structure contains an active message.
Edison Ai764d41f2018-09-21 15:56:36 +0800525 */
Ken Liu505b1702020-05-29 13:19:58 +0800526 if (p_msg->magic != TFM_MSG_MAGIC) {
Edison Ai764d41f2018-09-21 15:56:36 +0800527 return NULL;
528 }
529
Mate Toth-Pala4b5d242019-09-23 09:14:47 +0200530 /* Check that the running partition owns the message */
Mingyang Sunf3d29892019-07-10 17:50:23 +0800531 partition_id = tfm_spm_partition_get_running_partition_id();
Ken Liu505b1702020-05-29 13:19:58 +0800532 if (partition_id != p_msg->service->partition->static_data->partition_id) {
Edison Ai764d41f2018-09-21 15:56:36 +0800533 return NULL;
534 }
535
Ken Liu505b1702020-05-29 13:19:58 +0800536 return p_msg;
Edison Ai764d41f2018-09-21 15:56:36 +0800537}
538
Edison Ai97115822019-08-01 14:22:19 +0800539struct tfm_msg_body_t *
Summer Qin630c76b2020-05-20 10:32:58 +0800540 tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle)
Edison Ai764d41f2018-09-21 15:56:36 +0800541{
Summer Qin630c76b2020-05-20 10:32:58 +0800542 TFM_CORE_ASSERT(conn_handle != NULL);
Edison Ai97115822019-08-01 14:22:19 +0800543
Summer Qin630c76b2020-05-20 10:32:58 +0800544 return &(conn_handle->internal_msg);
Edison Ai97115822019-08-01 14:22:19 +0800545}
546
547void tfm_spm_fill_msg(struct tfm_msg_body_t *msg,
548 struct tfm_spm_service_t *service,
Ken Liu505b1702020-05-29 13:19:58 +0800549 psa_handle_t handle,
Summer Qin1ce712a2019-10-14 18:04:05 +0800550 int32_t type, int32_t client_id,
Edison Ai97115822019-08-01 14:22:19 +0800551 psa_invec *invec, size_t in_len,
552 psa_outvec *outvec, size_t out_len,
553 psa_outvec *caller_outvec)
554{
Edison Ai764d41f2018-09-21 15:56:36 +0800555 uint32_t i;
Ken Liu505b1702020-05-29 13:19:58 +0800556 struct tfm_conn_handle_t *conn_handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800557
Ken Liuf250b8b2019-12-27 16:31:24 +0800558 TFM_CORE_ASSERT(msg);
559 TFM_CORE_ASSERT(service);
560 TFM_CORE_ASSERT(!(invec == NULL && in_len != 0));
561 TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0));
562 TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC);
563 TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC);
564 TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC);
Edison Ai764d41f2018-09-21 15:56:36 +0800565
Edison Ai764d41f2018-09-21 15:56:36 +0800566 /* Clear message buffer before using it */
Mingyang Sun94b1b412019-09-20 15:11:14 +0800567 tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t));
Edison Ai764d41f2018-09-21 15:56:36 +0800568
Ken Liu35f89392019-03-14 14:51:05 +0800569 tfm_event_init(&msg->ack_evnt);
Edison Ai764d41f2018-09-21 15:56:36 +0800570 msg->magic = TFM_MSG_MAGIC;
571 msg->service = service;
Edison Ai764d41f2018-09-21 15:56:36 +0800572 msg->caller_outvec = caller_outvec;
Summer Qin1ce712a2019-10-14 18:04:05 +0800573 msg->msg.client_id = client_id;
Edison Ai764d41f2018-09-21 15:56:36 +0800574
575 /* Copy contents */
576 msg->msg.type = type;
577
578 for (i = 0; i < in_len; i++) {
579 msg->msg.in_size[i] = invec[i].len;
580 msg->invec[i].base = invec[i].base;
581 }
582
583 for (i = 0; i < out_len; i++) {
584 msg->msg.out_size[i] = outvec[i].len;
585 msg->outvec[i].base = outvec[i].base;
586 /* Out len is used to record the writed number, set 0 here again */
587 msg->outvec[i].len = 0;
588 }
589
Ken Liu505b1702020-05-29 13:19:58 +0800590 /* Use the user connect handle as the message handle */
591 msg->msg.handle = handle;
Edison Ai764d41f2018-09-21 15:56:36 +0800592
Ken Liu505b1702020-05-29 13:19:58 +0800593 conn_handle = tfm_spm_to_handle_instance(handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800594 /* For connected handle, set rhandle to every message */
Ken Liu505b1702020-05-29 13:19:58 +0800595 if (conn_handle) {
596 msg->msg.rhandle = tfm_spm_get_rhandle(service, conn_handle);
Edison Ai764d41f2018-09-21 15:56:36 +0800597 }
David Hu46603dd2019-12-11 18:05:16 +0800598
599 /* Set the private data of NSPE client caller in multi-core topology */
600 if (TFM_CLIENT_ID_IS_NS(client_id)) {
601 tfm_rpc_set_caller_data(msg, client_id);
602 }
Edison Ai764d41f2018-09-21 15:56:36 +0800603}
604
605int32_t tfm_spm_send_event(struct tfm_spm_service_t *service,
606 struct tfm_msg_body_t *msg)
607{
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800608 struct spm_partition_runtime_data_t *p_runtime_data =
609 &service->partition->runtime_data;
610
Ken Liuf250b8b2019-12-27 16:31:24 +0800611 TFM_CORE_ASSERT(service);
612 TFM_CORE_ASSERT(msg);
Edison Ai764d41f2018-09-21 15:56:36 +0800613
614 /* Enqueue message to service message queue */
615 if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) {
616 return IPC_ERROR_GENERIC;
617 }
618
619 /* Messages put. Update signals */
Summer Qine578c5b2019-08-16 16:42:16 +0800620 p_runtime_data->signals |= service->service_db->signal;
Edison Ai764d41f2018-09-21 15:56:36 +0800621
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800622 tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals &
623 p_runtime_data->signal_mask));
Edison Ai764d41f2018-09-21 15:56:36 +0800624
David Hufb38d562019-09-23 15:58:34 +0800625 /*
626 * If it is a NS request via RPC, it is unnecessary to block current
627 * thread.
628 */
629 if (!is_tfm_rpc_msg(msg)) {
630 tfm_event_wait(&msg->ack_evnt);
631 }
Edison Ai764d41f2018-09-21 15:56:36 +0800632
633 return IPC_SUCCESS;
634}
635
Mingyang Sunf3d29892019-07-10 17:50:23 +0800636uint32_t tfm_spm_partition_get_running_partition_id(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800637{
Edison Ai764d41f2018-09-21 15:56:36 +0800638 struct spm_partition_desc_t *partition;
639
Kevin Peng79c2bda2020-07-24 16:31:12 +0800640 partition = tfm_spm_get_running_partition();
641 if (partition && partition->static_data) {
642 return partition->static_data->partition_id;
643 } else {
644 return INVALID_PARTITION_ID;
645 }
Edison Ai764d41f2018-09-21 15:56:36 +0800646}
647
Summer Qin43c185d2019-10-10 15:44:42 +0800648int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller,
Summer Qineb537e52019-03-29 09:57:10 +0800649 enum tfm_memory_access_e access,
650 uint32_t privileged)
Summer Qin2bfd2a02018-09-26 17:10:41 +0800651{
Hugues de Valon99578562019-06-18 16:08:51 +0100652 enum tfm_status_e err;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800653
654 /* If len is zero, this indicates an empty buffer and base is ignored */
655 if (len == 0) {
656 return IPC_SUCCESS;
657 }
658
659 if (!buffer) {
660 return IPC_ERROR_BAD_PARAMETERS;
661 }
662
663 if ((uintptr_t)buffer > (UINTPTR_MAX - len)) {
664 return IPC_ERROR_MEMORY_CHECK;
665 }
666
Summer Qin424d4db2019-03-25 14:09:51 +0800667 if (access == TFM_MEMORY_ACCESS_RW) {
Summer Qineb537e52019-03-29 09:57:10 +0800668 err = tfm_core_has_write_access_to_region(buffer, len, ns_caller,
669 privileged);
Summer Qin2bfd2a02018-09-26 17:10:41 +0800670 } else {
Summer Qineb537e52019-03-29 09:57:10 +0800671 err = tfm_core_has_read_access_to_region(buffer, len, ns_caller,
672 privileged);
Summer Qin424d4db2019-03-25 14:09:51 +0800673 }
Summer Qin0fc3f592019-04-11 16:00:10 +0800674 if (err == TFM_SUCCESS) {
Summer Qin424d4db2019-03-25 14:09:51 +0800675 return IPC_SUCCESS;
Summer Qin2bfd2a02018-09-26 17:10:41 +0800676 }
677
678 return IPC_ERROR_MEMORY_CHECK;
679}
680
Ken Liuce2692d2020-02-11 12:39:36 +0800681uint32_t tfm_spm_init(void)
Edison Ai764d41f2018-09-21 15:56:36 +0800682{
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800683 uint32_t i, j, num;
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800684 struct spm_partition_desc_t *partition;
Summer Qin66f1e032020-01-06 15:40:03 +0800685 struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL;
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100686 const struct tfm_spm_partition_platform_data_t **platform_data_p;
Edison Ai764d41f2018-09-21 15:56:36 +0800687
688 tfm_pool_init(conn_handle_pool,
689 POOL_BUFFER_SIZE(conn_handle_pool),
690 sizeof(struct tfm_conn_handle_t),
691 TFM_CONN_HANDLE_MAX_NUM);
Edison Ai764d41f2018-09-21 15:56:36 +0800692
693 /* Init partition first for it will be used when init service */
Mate Toth-Pal3ad2e3e2019-07-11 21:43:37 +0200694 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800695 partition = &g_spm_partition_db.partitions[i];
Edison Aif0501702019-10-11 14:36:42 +0800696
Kevin Peng79c2bda2020-07-24 16:31:12 +0800697 if (!partition || !partition->memory_data || !partition->static_data) {
698 tfm_core_panic();
699 }
700
701 if (!(partition->static_data->partition_flags & SPM_PART_FLAG_IPC)) {
702 tfm_core_panic();
703 }
704
Edison Aif0501702019-10-11 14:36:42 +0800705 /* Check if the PSA framework version matches. */
706 if (partition->static_data->psa_framework_version !=
707 PSA_FRAMEWORK_VERSION) {
Kevin Peng79c2bda2020-07-24 16:31:12 +0800708 ERROR_MSG("Warning: PSA Framework Verison does not match!");
Edison Aif0501702019-10-11 14:36:42 +0800709 continue;
710 }
711
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100712 platform_data_p = partition->platform_data_list;
713 if (platform_data_p != NULL) {
714 while ((*platform_data_p) != NULL) {
Edison Ai6be3df12020-02-14 22:14:33 +0800715 if (tfm_spm_hal_configure_default_isolation(i,
716 *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
717 tfm_core_panic();
718 }
Mate Toth-Pal8ac98a72019-11-21 17:30:10 +0100719 ++platform_data_p;
720 }
721 }
722
Shawn Shanc7dda0e2019-12-23 14:45:09 +0800723 /* Add PSA_DOORBELL signal to assigned_signals */
724 partition->runtime_data.assigned_signals |= PSA_DOORBELL;
725
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800726 /* TODO: This can be optimized by generating the assigned signal
727 * in code generation time.
728 */
729 for (j = 0; j < tfm_core_irq_signals_count; ++j) {
730 if (tfm_core_irq_signals[j].partition_id ==
731 partition->static_data->partition_id) {
732 partition->runtime_data.assigned_signals |=
733 tfm_core_irq_signals[j].signal_value;
734 }
735 }
736
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800737 tfm_event_init(&partition->runtime_data.signal_evnt);
738 tfm_list_init(&partition->runtime_data.service_list);
Edison Ai764d41f2018-09-21 15:56:36 +0800739
Kevin Peng79c2bda2020-07-24 16:31:12 +0800740 pth = &partition->runtime_data.sp_thrd;
Edison Ai764d41f2018-09-21 15:56:36 +0800741 if (!pth) {
Edison Ai9059ea02019-11-28 13:46:14 +0800742 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800743 }
744
Summer Qin66f1e032020-01-06 15:40:03 +0800745 tfm_core_thrd_init(pth,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800746 (tfm_core_thrd_entry_t)
747 partition->static_data->partition_init,
Summer Qin66f1e032020-01-06 15:40:03 +0800748 NULL,
Kevin Peng79c2bda2020-07-24 16:31:12 +0800749 (uintptr_t)partition->memory_data->stack_top,
750 (uintptr_t)partition->memory_data->stack_bottom);
Edison Ai788bae22019-02-18 17:38:59 +0800751
Kevin Peng79c2bda2020-07-24 16:31:12 +0800752 pth->prior = partition->static_data->partition_priority;
Edison Ai764d41f2018-09-21 15:56:36 +0800753
Ken Liu490281d2019-12-30 15:55:26 +0800754 if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) {
755 p_ns_entry_thread = pth;
Ken Liu5248af22019-12-29 12:47:13 +0800756 pth->param = (void *)tfm_spm_hal_get_ns_entry_point();
Ken Liu490281d2019-12-30 15:55:26 +0800757 }
758
Edison Ai764d41f2018-09-21 15:56:36 +0800759 /* Kick off */
Summer Qin66f1e032020-01-06 15:40:03 +0800760 if (tfm_core_thrd_start(pth) != THRD_SUCCESS) {
Edison Ai9059ea02019-11-28 13:46:14 +0800761 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800762 }
763 }
764
765 /* Init Service */
Summer Qind99509f2019-08-02 17:36:58 +0800766 num = sizeof(service) / sizeof(struct tfm_spm_service_t);
Edison Ai764d41f2018-09-21 15:56:36 +0800767 for (i = 0; i < num; i++) {
Summer Qine578c5b2019-08-16 16:42:16 +0800768 service[i].service_db = &service_db[i];
Edison Ai764d41f2018-09-21 15:56:36 +0800769 partition =
Summer Qine578c5b2019-08-16 16:42:16 +0800770 tfm_spm_get_partition_by_id(service[i].service_db->partition_id);
Edison Ai764d41f2018-09-21 15:56:36 +0800771 if (!partition) {
Edison Ai9059ea02019-11-28 13:46:14 +0800772 tfm_core_panic();
Edison Ai764d41f2018-09-21 15:56:36 +0800773 }
Summer Qind99509f2019-08-02 17:36:58 +0800774 service[i].partition = partition;
Jaykumar Pitambarbhai Patel0c7a0382020-01-09 15:25:58 +0530775 partition->runtime_data.assigned_signals |= service[i].service_db->signal;
Shawn Shan9b0e0c72019-10-22 13:43:07 +0800776
Summer Qind99509f2019-08-02 17:36:58 +0800777 tfm_list_init(&service[i].handle_list);
Mingyang Sun5e13aa72019-07-10 10:30:16 +0800778 tfm_list_add_tail(&partition->runtime_data.service_list,
Summer Qind99509f2019-08-02 17:36:58 +0800779 &service[i].list);
Edison Ai764d41f2018-09-21 15:56:36 +0800780 }
781
Ken Liu483f5da2019-04-24 10:45:21 +0800782 /*
783 * All threads initialized, start the scheduler.
784 *
785 * NOTE:
Ken Liu490281d2019-12-30 15:55:26 +0800786 * It is worthy to give the thread object to scheduler if the background
787 * context belongs to one of the threads. Here the background thread is the
788 * initialization thread who calls SPM SVC, which re-uses the non-secure
789 * entry thread's stack. After SPM initialization is done, this stack is
790 * cleaned up and the background context is never going to return. Tell
791 * the scheduler that the current thread is non-secure entry thread.
Ken Liu483f5da2019-04-24 10:45:21 +0800792 */
Summer Qin66f1e032020-01-06 15:40:03 +0800793 tfm_core_thrd_start_scheduler(p_ns_entry_thread);
Ken Liuce2692d2020-02-11 12:39:36 +0800794
Summer Qind2ad7e72020-01-06 18:16:35 +0800795 return p_ns_entry_thread->arch_ctx.lr;
Edison Ai764d41f2018-09-21 15:56:36 +0800796}
Ken Liu2d175172019-03-21 17:08:41 +0800797
Summer Qind2ad7e72020-01-06 18:16:35 +0800798void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx)
Ken Liu2d175172019-03-21 17:08:41 +0800799{
800#if TFM_LVL == 2
801 struct spm_partition_desc_t *p_next_partition;
Summer Qinb5da9cc2019-08-26 15:19:45 +0800802 struct spm_partition_runtime_data_t *r_data;
Ken Liu2d175172019-03-21 17:08:41 +0800803 uint32_t is_privileged;
804#endif
Summer Qin66f1e032020-01-06 15:40:03 +0800805 struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread();
806 struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread();
Ken Liu2d175172019-03-21 17:08:41 +0800807
Mate Toth-Pal32b2ccd2019-04-26 10:00:16 +0200808 if (pth_next != NULL && pth_curr != pth_next) {
Ken Liu2d175172019-03-21 17:08:41 +0800809#if TFM_LVL == 2
Summer Qinb5da9cc2019-08-26 15:19:45 +0800810 r_data = TFM_GET_CONTAINER_PTR(pth_next,
811 struct spm_partition_runtime_data_t,
812 sp_thrd);
813 p_next_partition = TFM_GET_CONTAINER_PTR(r_data,
Ken Liu2d175172019-03-21 17:08:41 +0800814 struct spm_partition_desc_t,
Summer Qinb5da9cc2019-08-26 15:19:45 +0800815 runtime_data);
Ken Liu2d175172019-03-21 17:08:41 +0800816
Summer Qin423dbef2019-08-22 15:59:35 +0800817 if (p_next_partition->static_data->partition_flags &
Ken Liu2d175172019-03-21 17:08:41 +0800818 SPM_PART_FLAG_PSA_ROT) {
819 is_privileged = TFM_PARTITION_PRIVILEGED_MODE;
820 } else {
821 is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE;
822 }
823
824 tfm_spm_partition_change_privilege(is_privileged);
825#endif
Mate Toth-Palc430b992019-05-09 21:01:14 +0200826
Summer Qind2ad7e72020-01-06 18:16:35 +0800827 tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next);
Ken Liu2d175172019-03-21 17:08:41 +0800828 }
David Hufb38d562019-09-23 15:58:34 +0800829
830 /*
831 * Handle pending mailbox message from NS in multi-core topology.
832 * Empty operation on single Armv8-M platform.
833 */
834 tfm_rpc_client_call_handler();
Ken Liu2d175172019-03-21 17:08:41 +0800835}
Mingyang Sund44522a2020-01-16 16:48:37 +0800836
837/*********************** SPM functions for PSA Client APIs *******************/
838
839uint32_t tfm_spm_psa_framework_version(void)
840{
841 return tfm_spm_client_psa_framework_version();
842}
843
844uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller)
845{
846 uint32_t sid;
847
848 TFM_CORE_ASSERT(args != NULL);
849 sid = (uint32_t)args[0];
850
851 return tfm_spm_client_psa_version(sid, ns_caller);
852}
853
854psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller)
855{
856 uint32_t sid;
857 uint32_t version;
858
859 TFM_CORE_ASSERT(args != NULL);
860 sid = (uint32_t)args[0];
861 version = (uint32_t)args[1];
862
863 return tfm_spm_client_psa_connect(sid, version, ns_caller);
864}
865
866psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr)
867{
868 psa_handle_t handle;
869 psa_invec *inptr;
870 psa_outvec *outptr;
871 size_t in_num, out_num;
872 struct spm_partition_desc_t *partition = NULL;
873 uint32_t privileged;
874 int32_t type;
875 struct tfm_control_parameter_t ctrl_param;
876
877 TFM_CORE_ASSERT(args != NULL);
878 handle = (psa_handle_t)args[0];
879
880 partition = tfm_spm_get_running_partition();
881 if (!partition) {
882 tfm_core_panic();
883 }
884 privileged = tfm_spm_partition_get_privileged_mode(
885 partition->static_data->partition_flags);
886
887 /*
888 * Read parameters from the arguments. It is a fatal error if the
889 * memory reference for buffer is invalid or not readable.
890 */
891 if (tfm_memory_check((const void *)args[1],
892 sizeof(struct tfm_control_parameter_t), ns_caller,
893 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
894 tfm_core_panic();
895 }
896
897 tfm_core_util_memcpy(&ctrl_param,
898 (const void *)args[1],
899 sizeof(ctrl_param));
900
901 type = ctrl_param.type;
902 in_num = ctrl_param.in_len;
903 out_num = ctrl_param.out_len;
904 inptr = (psa_invec *)args[2];
905 outptr = (psa_outvec *)args[3];
906
907 /* The request type must be zero or positive. */
908 if (type < 0) {
909 tfm_core_panic();
910 }
911
912 return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num,
913 ns_caller, privileged);
914}
915
916void tfm_spm_psa_close(uint32_t *args, bool ns_caller)
917{
918 psa_handle_t handle;
919
920 TFM_CORE_ASSERT(args != NULL);
921 handle = args[0];
922
923 tfm_spm_client_psa_close(handle, ns_caller);
924}
925
926uint32_t tfm_spm_get_lifecycle_state(void)
927{
928 /*
929 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
930 * implemented in the future.
931 */
932 return PSA_LIFECYCLE_UNKNOWN;
933}
934
935/********************* SPM functions for PSA Service APIs ********************/
936
937psa_signal_t tfm_spm_psa_wait(uint32_t *args)
938{
939 psa_signal_t signal_mask;
940 uint32_t timeout;
941 struct spm_partition_desc_t *partition = NULL;
942
943 TFM_CORE_ASSERT(args != NULL);
944 signal_mask = (psa_signal_t)args[0];
945 timeout = args[1];
946
947 /*
948 * Timeout[30:0] are reserved for future use.
949 * SPM must ignore the value of RES.
950 */
951 timeout &= PSA_TIMEOUT_MASK;
952
953 partition = tfm_spm_get_running_partition();
954 if (!partition) {
955 tfm_core_panic();
956 }
957
958 /*
959 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
960 * signals.
961 */
962 if ((partition->runtime_data.assigned_signals & signal_mask) == 0) {
963 tfm_core_panic();
964 }
965
966 /*
967 * Expected signals are included in signal wait mask, ignored signals
968 * should not be set and affect caller thread state. Save this mask for
969 * further checking while signals are ready to be set.
970 */
971 partition->runtime_data.signal_mask = signal_mask;
972
973 /*
974 * tfm_event_wait() blocks the caller thread if no signals are available.
975 * In this case, the return value of this function is temporary set into
976 * runtime context. After new signal(s) are available, the return value
977 * is updated with the available signal(s) and blocked thread gets to run.
978 */
979 if (timeout == PSA_BLOCK &&
980 (partition->runtime_data.signals & signal_mask) == 0) {
981 tfm_event_wait(&partition->runtime_data.signal_evnt);
982 }
983
984 return partition->runtime_data.signals & signal_mask;
985}
986
987psa_status_t tfm_spm_psa_get(uint32_t *args)
988{
989 psa_signal_t signal;
990 psa_msg_t *msg = NULL;
991 struct tfm_spm_service_t *service = NULL;
992 struct tfm_msg_body_t *tmp_msg = NULL;
993 struct spm_partition_desc_t *partition = NULL;
994 uint32_t privileged;
995
996 TFM_CORE_ASSERT(args != NULL);
997 signal = (psa_signal_t)args[0];
998 msg = (psa_msg_t *)args[1];
999
1000 /*
1001 * Only one message could be retrieved every time for psa_get(). It is a
1002 * fatal error if the input signal has more than a signal bit set.
1003 */
Ken Liu410ada52020-01-08 11:37:27 +08001004 if (!tfm_is_one_bit_set(signal)) {
Mingyang Sund44522a2020-01-16 16:48:37 +08001005 tfm_core_panic();
1006 }
1007
1008 partition = tfm_spm_get_running_partition();
1009 if (!partition) {
1010 tfm_core_panic();
1011 }
1012 privileged = tfm_spm_partition_get_privileged_mode(
1013 partition->static_data->partition_flags);
1014
1015 /*
1016 * Write the message to the service buffer. It is a fatal error if the
1017 * input msg pointer is not a valid memory reference or not read-write.
1018 */
1019 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
1020 privileged) != IPC_SUCCESS) {
1021 tfm_core_panic();
1022 }
1023
1024 /*
1025 * It is a fatal error if the caller call psa_get() when no message has
1026 * been set. The caller must call this function after an RoT Service signal
1027 * is returned by psa_wait().
1028 */
1029 if (partition->runtime_data.signals == 0) {
1030 tfm_core_panic();
1031 }
1032
1033 /*
1034 * It is a fatal error if the RoT Service signal is not currently asserted.
1035 */
1036 if ((partition->runtime_data.signals & signal) == 0) {
1037 tfm_core_panic();
1038 }
1039
1040 /*
1041 * Get RoT service by signal from partition. It is a fatal error if getting
1042 * failed, which means the input signal is not correspond to an RoT service.
1043 */
1044 service = tfm_spm_get_service_by_signal(partition, signal);
1045 if (!service) {
1046 tfm_core_panic();
1047 }
1048
1049 tmp_msg = tfm_msg_dequeue(&service->msg_queue);
1050 if (!tmp_msg) {
1051 return PSA_ERROR_DOES_NOT_EXIST;
1052 }
1053
Ken Liu505b1702020-05-29 13:19:58 +08001054 (TFM_GET_CONTAINER_PTR(tmp_msg,
1055 struct tfm_conn_handle_t,
1056 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001057
1058 tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
1059
1060 /*
1061 * There may be multiple messages for this RoT Service signal, do not clear
1062 * its mask until no remaining message.
1063 */
1064 if (tfm_msg_queue_is_empty(&service->msg_queue)) {
1065 partition->runtime_data.signals &= ~signal;
1066 }
1067
1068 return PSA_SUCCESS;
1069}
1070
1071void tfm_spm_psa_set_rhandle(uint32_t *args)
1072{
1073 psa_handle_t msg_handle;
1074 void *rhandle = NULL;
1075 struct tfm_msg_body_t *msg = NULL;
Ken Liu505b1702020-05-29 13:19:58 +08001076 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001077
1078 TFM_CORE_ASSERT(args != NULL);
1079 msg_handle = (psa_handle_t)args[0];
1080 rhandle = (void *)args[1];
1081
1082 /* It is a fatal error if message handle is invalid */
1083 msg = tfm_spm_get_msg_from_handle(msg_handle);
1084 if (!msg) {
1085 tfm_core_panic();
1086 }
1087
1088 msg->msg.rhandle = rhandle;
Ken Liu505b1702020-05-29 13:19:58 +08001089 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001090
1091 /* Store reverse handle for following client calls. */
Ken Liu505b1702020-05-29 13:19:58 +08001092 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001093}
1094
1095size_t tfm_spm_psa_read(uint32_t *args)
1096{
1097 psa_handle_t msg_handle;
1098 uint32_t invec_idx;
1099 void *buffer = NULL;
1100 size_t num_bytes;
1101 size_t bytes;
1102 struct tfm_msg_body_t *msg = NULL;
1103 uint32_t privileged;
1104 struct spm_partition_desc_t *partition = NULL;
1105
1106 TFM_CORE_ASSERT(args != NULL);
1107 msg_handle = (psa_handle_t)args[0];
1108 invec_idx = args[1];
1109 buffer = (void *)args[2];
1110 num_bytes = (size_t)args[3];
1111
1112 /* It is a fatal error if message handle is invalid */
1113 msg = tfm_spm_get_msg_from_handle(msg_handle);
1114 if (!msg) {
1115 tfm_core_panic();
1116 }
1117
1118 partition = msg->service->partition;
1119 privileged = tfm_spm_partition_get_privileged_mode(
1120 partition->static_data->partition_flags);
1121
1122 /*
1123 * It is a fatal error if message handle does not refer to a request
1124 * message
1125 */
1126 if (msg->msg.type < PSA_IPC_CALL) {
1127 tfm_core_panic();
1128 }
1129
1130 /*
1131 * It is a fatal error if invec_idx is equal to or greater than
1132 * PSA_MAX_IOVEC
1133 */
1134 if (invec_idx >= PSA_MAX_IOVEC) {
1135 tfm_core_panic();
1136 }
1137
1138 /* There was no remaining data in this input vector */
1139 if (msg->msg.in_size[invec_idx] == 0) {
1140 return 0;
1141 }
1142
1143 /*
1144 * Copy the client data to the service buffer. It is a fatal error
1145 * if the memory reference for buffer is invalid or not read-write.
1146 */
1147 if (tfm_memory_check(buffer, num_bytes, false,
1148 TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) {
1149 tfm_core_panic();
1150 }
1151
1152 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
1153 msg->msg.in_size[invec_idx] : num_bytes;
1154
1155 tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes);
1156
1157 /* There maybe some remaining data */
1158 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
1159 msg->msg.in_size[invec_idx] -= bytes;
1160
1161 return bytes;
1162}
1163
1164size_t tfm_spm_psa_skip(uint32_t *args)
1165{
1166 psa_handle_t msg_handle;
1167 uint32_t invec_idx;
1168 size_t num_bytes;
1169 struct tfm_msg_body_t *msg = NULL;
1170
1171 TFM_CORE_ASSERT(args != NULL);
1172 msg_handle = (psa_handle_t)args[0];
1173 invec_idx = args[1];
1174 num_bytes = (size_t)args[2];
1175
1176 /* It is a fatal error if message handle is invalid */
1177 msg = tfm_spm_get_msg_from_handle(msg_handle);
1178 if (!msg) {
1179 tfm_core_panic();
1180 }
1181
1182 /*
1183 * It is a fatal error if message handle does not refer to a request
1184 * message
1185 */
1186 if (msg->msg.type < PSA_IPC_CALL) {
1187 tfm_core_panic();
1188 }
1189
1190 /*
1191 * It is a fatal error if invec_idx is equal to or greater than
1192 * PSA_MAX_IOVEC
1193 */
1194 if (invec_idx >= PSA_MAX_IOVEC) {
1195 tfm_core_panic();
1196 }
1197
1198 /* There was no remaining data in this input vector */
1199 if (msg->msg.in_size[invec_idx] == 0) {
1200 return 0;
1201 }
1202
1203 /*
1204 * If num_bytes is greater than the remaining size of the input vector then
1205 * the remaining size of the input vector is used.
1206 */
1207 if (num_bytes > msg->msg.in_size[invec_idx]) {
1208 num_bytes = msg->msg.in_size[invec_idx];
1209 }
1210
1211 /* There maybe some remaining data */
1212 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
1213 num_bytes;
1214 msg->msg.in_size[invec_idx] -= num_bytes;
1215
1216 return num_bytes;
1217}
1218
1219void tfm_spm_psa_write(uint32_t *args)
1220{
1221 psa_handle_t msg_handle;
1222 uint32_t outvec_idx;
1223 void *buffer = NULL;
1224 size_t num_bytes;
1225 struct tfm_msg_body_t *msg = NULL;
1226 uint32_t privileged;
1227 struct spm_partition_desc_t *partition = NULL;
1228
1229 TFM_CORE_ASSERT(args != NULL);
1230 msg_handle = (psa_handle_t)args[0];
1231 outvec_idx = args[1];
1232 buffer = (void *)args[2];
1233 num_bytes = (size_t)args[3];
1234
1235 /* It is a fatal error if message handle is invalid */
1236 msg = tfm_spm_get_msg_from_handle(msg_handle);
1237 if (!msg) {
1238 tfm_core_panic();
1239 }
1240
1241 partition = msg->service->partition;
1242 privileged = tfm_spm_partition_get_privileged_mode(
1243 partition->static_data->partition_flags);
1244
1245 /*
1246 * It is a fatal error if message handle does not refer to a request
1247 * message
1248 */
1249 if (msg->msg.type < PSA_IPC_CALL) {
1250 tfm_core_panic();
1251 }
1252
1253 /*
1254 * It is a fatal error if outvec_idx is equal to or greater than
1255 * PSA_MAX_IOVEC
1256 */
1257 if (outvec_idx >= PSA_MAX_IOVEC) {
1258 tfm_core_panic();
1259 }
1260
1261 /*
1262 * It is a fatal error if the call attempts to write data past the end of
1263 * the client output vector
1264 */
1265 if (num_bytes > msg->msg.out_size[outvec_idx] -
1266 msg->outvec[outvec_idx].len) {
1267 tfm_core_panic();
1268 }
1269
1270 /*
1271 * Copy the service buffer to client outvecs. It is a fatal error
1272 * if the memory reference for buffer is invalid or not readable.
1273 */
1274 if (tfm_memory_check(buffer, num_bytes, false,
1275 TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) {
1276 tfm_core_panic();
1277 }
1278
1279 tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base +
1280 msg->outvec[outvec_idx].len, buffer, num_bytes);
1281
1282 /* Update the write number */
1283 msg->outvec[outvec_idx].len += num_bytes;
1284}
1285
1286static void update_caller_outvec_len(struct tfm_msg_body_t *msg)
1287{
1288 uint32_t i;
1289
1290 /*
1291 * FixeMe: abstract these part into dedicated functions to avoid
1292 * accessing thread context in psa layer
1293 */
1294 /* If it is a NS request via RPC, the owner of this message is not set */
1295 if (!is_tfm_rpc_msg(msg)) {
1296 TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK);
1297 }
1298
1299 for (i = 0; i < PSA_MAX_IOVEC; i++) {
1300 if (msg->msg.out_size[i] == 0) {
1301 continue;
1302 }
1303
1304 TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base);
1305
1306 msg->caller_outvec[i].len = msg->outvec[i].len;
1307 }
1308}
1309
1310void tfm_spm_psa_reply(uint32_t *args)
1311{
1312 psa_handle_t msg_handle;
1313 psa_status_t status;
1314 struct tfm_spm_service_t *service = NULL;
1315 struct tfm_msg_body_t *msg = NULL;
1316 int32_t ret = PSA_SUCCESS;
Ken Liu505b1702020-05-29 13:19:58 +08001317 struct tfm_conn_handle_t *conn_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001318
1319 TFM_CORE_ASSERT(args != NULL);
1320 msg_handle = (psa_handle_t)args[0];
1321 status = (psa_status_t)args[1];
1322
1323 /* It is a fatal error if message handle is invalid */
1324 msg = tfm_spm_get_msg_from_handle(msg_handle);
1325 if (!msg) {
1326 tfm_core_panic();
1327 }
1328
1329 /*
1330 * RoT Service information is needed in this function, stored it in message
1331 * body structure. Only two parameters are passed in this function: handle
1332 * and status, so it is useful and simply to do like this.
1333 */
1334 service = msg->service;
1335 if (!service) {
1336 tfm_core_panic();
1337 }
1338
1339 /*
1340 * Three type of message are passed in this function: CONNECTION, REQUEST,
1341 * DISCONNECTION. It needs to process differently for each type.
1342 */
Ken Liu505b1702020-05-29 13:19:58 +08001343 conn_handle = tfm_spm_to_handle_instance(msg_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001344 switch (msg->msg.type) {
1345 case PSA_IPC_CONNECT:
1346 /*
1347 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
1348 * input status is PSA_SUCCESS. Others return values are based on the
1349 * input status.
1350 */
1351 if (status == PSA_SUCCESS) {
Ken Liu505b1702020-05-29 13:19:58 +08001352 ret = msg_handle;
Mingyang Sund44522a2020-01-16 16:48:37 +08001353 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
1354 /* Refuse the client connection, indicating a permanent error. */
Ken Liu505b1702020-05-29 13:19:58 +08001355 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001356 ret = PSA_ERROR_CONNECTION_REFUSED;
1357 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
1358 /* Fail the client connection, indicating a transient error. */
1359 ret = PSA_ERROR_CONNECTION_BUSY;
1360 } else {
1361 tfm_core_panic();
1362 }
1363 break;
1364 case PSA_IPC_DISCONNECT:
1365 /* Service handle is not used anymore */
Ken Liu505b1702020-05-29 13:19:58 +08001366 tfm_spm_free_conn_handle(service, conn_handle);
Mingyang Sund44522a2020-01-16 16:48:37 +08001367
1368 /*
1369 * If the message type is PSA_IPC_DISCONNECT, then the status code is
1370 * ignored
1371 */
1372 break;
1373 default:
1374 if (msg->msg.type >= PSA_IPC_CALL) {
1375 /* Reply to a request message. Return values are based on status */
1376 ret = status;
1377 /*
1378 * The total number of bytes written to a single parameter must be
1379 * reported to the client by updating the len member of the
1380 * psa_outvec structure for the parameter before returning from
1381 * psa_call().
1382 */
1383 update_caller_outvec_len(msg);
1384 } else {
1385 tfm_core_panic();
1386 }
1387 }
1388
1389 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
1390 /*
1391 * If the source of the programmer error is a Secure Partition, the SPM
1392 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
1393 */
1394 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
Ken Liu505b1702020-05-29 13:19:58 +08001395 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
Mingyang Sund44522a2020-01-16 16:48:37 +08001396 } else {
1397 tfm_core_panic();
1398 }
1399 } else {
Ken Liu505b1702020-05-29 13:19:58 +08001400 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
Mingyang Sund44522a2020-01-16 16:48:37 +08001401 }
1402
1403 if (is_tfm_rpc_msg(msg)) {
1404 tfm_rpc_client_call_reply(msg, ret);
1405 } else {
1406 tfm_event_wake(&msg->ack_evnt, ret);
1407 }
1408}
1409
1410/**
1411 * \brief notify the partition with the signal.
1412 *
1413 * \param[in] partition_id The ID of the partition to be notified.
1414 * \param[in] signal The signal that the partition is to be notified
1415 * with.
1416 *
1417 * \retval void Success.
1418 * \retval "Does not return" If partition_id is invalid.
1419 */
1420static void notify_with_signal(int32_t partition_id, psa_signal_t signal)
1421{
1422 struct spm_partition_desc_t *partition = NULL;
1423
1424 /*
1425 * The value of partition_id must be greater than zero as the target of
1426 * notification must be a Secure Partition, providing a Non-secure
1427 * Partition ID is a fatal error.
1428 */
1429 if (!TFM_CLIENT_ID_IS_S(partition_id)) {
1430 tfm_core_panic();
1431 }
1432
1433 /*
1434 * It is a fatal error if partition_id does not correspond to a Secure
1435 * Partition.
1436 */
1437 partition = tfm_spm_get_partition_by_id(partition_id);
1438 if (!partition) {
1439 tfm_core_panic();
1440 }
1441
1442 partition->runtime_data.signals |= signal;
1443
1444 /*
1445 * The target partition may be blocked with waiting for signals after
1446 * called psa_wait(). Set the return value with the available signals
1447 * before wake it up with tfm_event_signal().
1448 */
1449 tfm_event_wake(&partition->runtime_data.signal_evnt,
1450 partition->runtime_data.signals &
1451 partition->runtime_data.signal_mask);
1452}
1453
1454void tfm_spm_psa_notify(uint32_t *args)
1455{
1456 int32_t partition_id;
1457
1458 TFM_CORE_ASSERT(args != NULL);
1459 partition_id = (int32_t)args[0];
1460
1461 notify_with_signal(partition_id, PSA_DOORBELL);
1462}
1463
1464/**
1465 * \brief assert signal for a given IRQ line.
1466 *
1467 * \param[in] partition_id The ID of the partition which handles this IRQ
1468 * \param[in] signal The signal associated with this IRQ
1469 * \param[in] irq_line The number of the IRQ line
1470 *
1471 * \retval void Success.
1472 * \retval "Does not return" Partition ID is invalid
1473 */
1474void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001475 IRQn_Type irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001476{
1477 tfm_spm_hal_disable_irq(irq_line);
1478 notify_with_signal(partition_id, signal);
1479}
1480
1481void tfm_spm_psa_clear(void)
1482{
1483 struct spm_partition_desc_t *partition = NULL;
1484
1485 partition = tfm_spm_get_running_partition();
1486 if (!partition) {
1487 tfm_core_panic();
1488 }
1489
1490 /*
1491 * It is a fatal error if the Secure Partition's doorbell signal is not
1492 * currently asserted.
1493 */
1494 if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) {
1495 tfm_core_panic();
1496 }
1497 partition->runtime_data.signals &= ~PSA_DOORBELL;
1498}
1499
1500void tfm_spm_psa_panic(void)
1501{
1502 /*
1503 * PSA FF recommends that the SPM causes the system to restart when a secure
1504 * partition panics.
1505 */
1506 tfm_spm_hal_system_reset();
1507}
1508
1509/**
1510 * \brief Return the IRQ line number associated with a signal
1511 *
1512 * \param[in] partition_id The ID of the partition in which we look for
1513 * the signal.
1514 * \param[in] signal The signal we do the query for.
1515 * \param[out] irq_line The irq line associated with signal
1516 *
1517 * \retval IPC_SUCCESS Execution successful, irq_line contains a valid
1518 * value.
1519 * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the
1520 * signal. irq_line is unchanged.
1521 */
1522static int32_t get_irq_line_for_signal(int32_t partition_id,
1523 psa_signal_t signal,
TTornblomfaf74f52020-03-04 17:56:27 +01001524 IRQn_Type *irq_line)
Mingyang Sund44522a2020-01-16 16:48:37 +08001525{
1526 size_t i;
1527
1528 for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1529 if (tfm_core_irq_signals[i].partition_id == partition_id &&
1530 tfm_core_irq_signals[i].signal_value == signal) {
1531 *irq_line = tfm_core_irq_signals[i].irq_line;
1532 return IPC_SUCCESS;
1533 }
1534 }
1535 return IPC_ERROR_GENERIC;
1536}
1537
1538void tfm_spm_psa_eoi(uint32_t *args)
1539{
1540 psa_signal_t irq_signal;
TTornblomfaf74f52020-03-04 17:56:27 +01001541 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001542 int32_t ret;
1543 struct spm_partition_desc_t *partition = NULL;
1544
1545 TFM_CORE_ASSERT(args != NULL);
1546 irq_signal = (psa_signal_t)args[0];
1547
1548 /* It is a fatal error if passed signal indicates more than one signals. */
1549 if (!tfm_is_one_bit_set(irq_signal)) {
1550 tfm_core_panic();
1551 }
1552
1553 partition = tfm_spm_get_running_partition();
1554 if (!partition) {
1555 tfm_core_panic();
1556 }
1557
1558 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1559 irq_signal, &irq_line);
1560 /* It is a fatal error if passed signal is not an interrupt signal. */
1561 if (ret != IPC_SUCCESS) {
1562 tfm_core_panic();
1563 }
1564
1565 /* It is a fatal error if passed signal is not currently asserted */
1566 if ((partition->runtime_data.signals & irq_signal) == 0) {
1567 tfm_core_panic();
1568 }
1569
1570 partition->runtime_data.signals &= ~irq_signal;
1571
1572 tfm_spm_hal_clear_pending_irq(irq_line);
1573 tfm_spm_hal_enable_irq(irq_line);
1574}
1575
1576void tfm_spm_enable_irq(uint32_t *args)
1577{
1578 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1579 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001580 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001581 int32_t ret;
1582 struct spm_partition_desc_t *partition = NULL;
1583
1584 /* It is a fatal error if passed signal indicates more than one signals. */
1585 if (!tfm_is_one_bit_set(irq_signal)) {
1586 tfm_core_panic();
1587 }
1588
1589 partition = tfm_spm_get_running_partition();
1590 if (!partition) {
1591 tfm_core_panic();
1592 }
1593
1594 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1595 irq_signal, &irq_line);
1596 /* It is a fatal error if passed signal is not an interrupt signal. */
1597 if (ret != IPC_SUCCESS) {
1598 tfm_core_panic();
1599 }
1600
1601 tfm_spm_hal_enable_irq(irq_line);
1602}
1603
1604void tfm_spm_disable_irq(uint32_t *args)
1605{
1606 struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args;
1607 psa_signal_t irq_signal = svc_ctx->r0;
TTornblomfaf74f52020-03-04 17:56:27 +01001608 IRQn_Type irq_line = (IRQn_Type) 0;
Mingyang Sund44522a2020-01-16 16:48:37 +08001609 int32_t ret;
1610 struct spm_partition_desc_t *partition = NULL;
1611
1612 /* It is a fatal error if passed signal indicates more than one signals. */
1613 if (!tfm_is_one_bit_set(irq_signal)) {
1614 tfm_core_panic();
1615 }
1616
1617 partition = tfm_spm_get_running_partition();
1618 if (!partition) {
1619 tfm_core_panic();
1620 }
1621
1622 ret = get_irq_line_for_signal(partition->static_data->partition_id,
1623 irq_signal, &irq_line);
1624 /* It is a fatal error if passed signal is not an interrupt signal. */
1625 if (ret != IPC_SUCCESS) {
1626 tfm_core_panic();
1627 }
1628
1629 tfm_spm_hal_disable_irq(irq_line);
1630}
1631
1632void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp,
1633 uint32_t *p_ctx, uint32_t exc_return,
1634 bool ns_caller)
1635{
1636 uintptr_t stacked_ctx_pos;
1637
1638 if (ns_caller) {
1639 /*
1640 * The background IRQ can't be supported, since if SP is executing,
1641 * the preempted context of SP can be different with the one who
1642 * preempts veneer.
1643 */
1644 if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) {
1645 tfm_core_panic();
1646 }
1647
1648 /*
1649 * It is non-secure caller, check if veneer stack contains
1650 * multiple contexts.
1651 */
1652 stacked_ctx_pos = (uintptr_t)p_ctx +
1653 sizeof(struct tfm_state_context_t) +
1654 TFM_VENEER_STACK_GUARD_SIZE;
1655
1656 if (is_stack_alloc_fp_space(exc_return)) {
1657#if defined (__FPU_USED) && (__FPU_USED == 1U)
1658 if (FPU->FPCCR & FPU_FPCCR_TS_Msk) {
1659 stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS *
1660 sizeof(uint32_t);
1661 }
1662#endif
1663 stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t);
1664 }
1665
1666 if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) {
1667 tfm_core_panic();
1668 }
1669 } else if (p_cur_sp->static_data->partition_id <= 0) {
1670 tfm_core_panic();
1671 }
1672}
Summer Qin830c5542020-02-14 13:44:20 +08001673
1674void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
1675{
1676 uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1677 uint32_t running_partition_flags = 0;
1678 const struct spm_partition_desc_t *partition = NULL;
1679
1680 /* Check permissions on request type basis */
1681
1682 switch (svc_ctx->r0) {
1683 case TFM_SPM_REQUEST_RESET_VOTE:
1684 partition = tfm_spm_get_running_partition();
1685 if (!partition) {
1686 tfm_core_panic();
1687 }
1688 running_partition_flags = partition->static_data->partition_flags;
1689
1690 /* Currently only PSA Root of Trust services are allowed to make Reset
1691 * vote request
1692 */
1693 if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1694 *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1695 }
1696
1697 /* FixMe: this is a placeholder for checks to be performed before
1698 * allowing execution of reset
1699 */
1700 *res_ptr = (uint32_t)TFM_SUCCESS;
1701
1702 break;
1703 default:
1704 *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1705 }
1706}
Mingyang Sunbd7ceb52020-06-11 16:53:03 +08001707
1708enum spm_err_t tfm_spm_db_init(void)
1709{
1710 uint32_t i;
1711
1712 /* This function initialises partition db */
1713
1714 for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1715 g_spm_partition_db.partitions[i].static_data = &static_data_list[i];
1716 g_spm_partition_db.partitions[i].platform_data_list =
1717 platform_data_list_list[i];
1718 g_spm_partition_db.partitions[i].memory_data = &memory_data_list[i];
1719 }
1720 g_spm_partition_db.is_init = 1;
1721
1722 return SPM_ERR_OK;
1723}