Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 1 | /* |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 2 | * Copyright (c) 2018-2020, Arm Limited. All rights reserved. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | * |
| 6 | */ |
Mingyang Sun | da01a97 | 2019-07-12 17:32:59 +0800 | [diff] [blame] | 7 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 8 | #include <inttypes.h> |
| 9 | #include <stdbool.h> |
Jamie Fox | cc31d40 | 2019-01-28 17:13:52 +0000 | [diff] [blame] | 10 | #include "psa/client.h" |
| 11 | #include "psa/service.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 12 | #include "psa/lifecycle.h" |
| 13 | #include "tfm_thread.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 14 | #include "tfm_wait.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 15 | #include "tfm_utils.h" |
| 16 | #include "tfm_internal_defines.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 17 | #include "tfm_message_queue.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 18 | #include "tfm_spm_hal.h" |
| 19 | #include "tfm_irq_list.h" |
| 20 | #include "tfm_api.h" |
| 21 | #include "tfm_secure_api.h" |
| 22 | #include "tfm_memory_utils.h" |
Mingyang Sun | c3123ec | 2020-06-11 17:43:58 +0800 | [diff] [blame^] | 23 | #include "spm_api.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 24 | #include "tfm_peripherals_def.h" |
Mingyang Sun | c3123ec | 2020-06-11 17:43:58 +0800 | [diff] [blame^] | 25 | #include "spm_db.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 26 | #include "tfm_core_utils.h" |
| 27 | #include "spm_psa_client_call.h" |
| 28 | #include "tfm_rpc.h" |
| 29 | #include "tfm_internal.h" |
| 30 | #include "tfm_core_trustzone.h" |
| 31 | #include "tfm_core_mem_check.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 32 | #include "tfm_list.h" |
| 33 | #include "tfm_pools.h" |
Mingyang Sun | bd7ceb5 | 2020-06-11 16:53:03 +0800 | [diff] [blame] | 34 | #include "region.h" |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 35 | #include "region_defs.h" |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 36 | #include "tfm/tfm_spm_services_api.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 37 | |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 38 | #include "secure_fw/partitions/tfm_service_list.inc" |
Mingyang Sun | bd7ceb5 | 2020-06-11 16:53:03 +0800 | [diff] [blame] | 39 | #include "tfm_spm_db_ipc.inc" |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 40 | |
| 41 | /* Extern service variable */ |
| 42 | extern struct tfm_spm_service_t service[]; |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 43 | extern const struct tfm_spm_service_db_t service_db[]; |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 44 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 45 | /* Pools */ |
| 46 | TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t), |
| 47 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 48 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 49 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 50 | IRQn_Type irq_line); |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 51 | |
| 52 | #include "tfm_secure_irq_handlers_ipc.inc" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 53 | |
Summer Qin | 373feb1 | 2020-03-27 15:35:33 +0800 | [diff] [blame] | 54 | /*********************** Connection handle conversion APIs *******************/ |
| 55 | |
| 56 | /* Set a minimal value here for feature expansion. */ |
| 57 | #define CLIENT_HANDLE_VALUE_MIN 32 |
| 58 | |
| 59 | #define CONVERSION_FACTOR_BITOFFSET 3 |
| 60 | #define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET) |
| 61 | /* Set 32 as the maximum */ |
| 62 | #define CONVERSION_FACTOR_VALUE_MAX 0x20 |
| 63 | |
| 64 | #if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX |
| 65 | #error "CONVERSION FACTOR OUT OF RANGE" |
| 66 | #endif |
| 67 | |
| 68 | static uint32_t loop_index; |
| 69 | |
| 70 | /* |
| 71 | * A handle instance psa_handle_t allocated inside SPM is actually a memory |
| 72 | * address among the handle pool. Return this handle to the client directly |
| 73 | * exposes information of secure memory address. In this case, converting the |
| 74 | * handle into another value does not represent the memory address to avoid |
| 75 | * exposing secure memory directly to clients. |
| 76 | * |
| 77 | * This function converts the handle instance into another value by scaling the |
| 78 | * handle in pool offset, the converted value is named as a user handle. |
| 79 | * |
| 80 | * The formula: |
| 81 | * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE + |
| 82 | * CLIENT_HANDLE_VALUE_MIN + loop_index |
| 83 | * where: |
| 84 | * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not |
| 85 | * exceed CONVERSION_FACTOR_VALUE_MAX. |
| 86 | * |
| 87 | * handle_instance in RANGE[POOL_START, POOL_END] |
| 88 | * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF] |
| 89 | * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1] |
| 90 | * |
| 91 | * note: |
| 92 | * loop_index is used to promise same handle instance is converted into |
| 93 | * different user handles in short time. |
| 94 | */ |
| 95 | static psa_handle_t tfm_spm_to_user_handle( |
| 96 | struct tfm_conn_handle_t *handle_instance) |
| 97 | { |
| 98 | psa_handle_t user_handle; |
| 99 | |
| 100 | loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE; |
| 101 | user_handle = (psa_handle_t)((((uintptr_t)handle_instance - |
| 102 | (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) + |
| 103 | CLIENT_HANDLE_VALUE_MIN + loop_index); |
| 104 | |
| 105 | return user_handle; |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * This function converts a user handle into a corresponded handle instance. |
| 110 | * The converted value is validated before returning, an invalid handle instance |
| 111 | * is returned as NULL. |
| 112 | * |
| 113 | * The formula: |
| 114 | * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) / |
| 115 | * CONVERSION_FACTOR_VALUE) + POOL_START |
| 116 | * where: |
| 117 | * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not |
| 118 | * exceed CONVERSION_FACTOR_VALUE_MAX. |
| 119 | * |
| 120 | * handle_instance in RANGE[POOL_START, POOL_END] |
| 121 | * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF] |
| 122 | * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1] |
| 123 | */ |
| 124 | struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle) |
| 125 | { |
| 126 | struct tfm_conn_handle_t *handle_instance; |
| 127 | |
| 128 | if (user_handle == PSA_NULL_HANDLE) { |
| 129 | return NULL; |
| 130 | } |
| 131 | |
| 132 | handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle - |
| 133 | CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) + |
| 134 | (uintptr_t)conn_handle_pool); |
| 135 | |
| 136 | return handle_instance; |
| 137 | } |
| 138 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 139 | /* Service handle management functions */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 140 | struct tfm_conn_handle_t *tfm_spm_create_conn_handle( |
| 141 | struct tfm_spm_service_t *service, |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 142 | int32_t client_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 143 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 144 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 145 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 146 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 147 | |
| 148 | /* Get buffer for handle list structure from handle pool */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 149 | p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool); |
| 150 | if (!p_handle) { |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 151 | return NULL; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 152 | } |
| 153 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 154 | p_handle->service = service; |
Shawn Shan | cc39fcb | 2019-11-13 15:38:16 +0800 | [diff] [blame] | 155 | p_handle->status = TFM_HANDLE_STATUS_IDLE; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 156 | p_handle->client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 157 | |
| 158 | /* Add handle node to list for next psa functions */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 159 | tfm_list_add_tail(&service->handle_list, &p_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 160 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 161 | return p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 162 | } |
| 163 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 164 | int32_t tfm_spm_validate_conn_handle( |
| 165 | const struct tfm_conn_handle_t *conn_handle, |
| 166 | int32_t client_id) |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 167 | { |
| 168 | /* Check the handle address is validated */ |
| 169 | if (is_valid_chunk_data_in_pool(conn_handle_pool, |
| 170 | (uint8_t *)conn_handle) != true) { |
| 171 | return IPC_ERROR_GENERIC; |
| 172 | } |
| 173 | |
| 174 | /* Check the handle caller is correct */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 175 | if (conn_handle->client_id != client_id) { |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 176 | return IPC_ERROR_GENERIC; |
| 177 | } |
| 178 | |
| 179 | return IPC_SUCCESS; |
| 180 | } |
| 181 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 182 | /** |
| 183 | * \brief Free connection handle which not used anymore. |
| 184 | * |
| 185 | * \param[in] service Target service context pointer |
| 186 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 187 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 188 | * |
| 189 | * \retval IPC_SUCCESS Success |
| 190 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 191 | * \retval "Does not return" Panic for not find service by handle |
| 192 | */ |
| 193 | static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 194 | struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 195 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 196 | TFM_CORE_ASSERT(service); |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 197 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 198 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 199 | /* Clear magic as the handler is not used anymore */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 200 | conn_handle->internal_msg.magic = 0; |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 201 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 202 | /* Remove node from handle list */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 203 | tfm_list_del_node(&conn_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 204 | |
| 205 | /* Back handle buffer to pool */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 206 | tfm_pool_free(conn_handle); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 207 | return IPC_SUCCESS; |
| 208 | } |
| 209 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 210 | /** |
| 211 | * \brief Set reverse handle value for connection. |
| 212 | * |
| 213 | * \param[in] service Target service context pointer |
| 214 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 215 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 216 | * \param[in] rhandle rhandle need to save |
| 217 | * |
| 218 | * \retval IPC_SUCCESS Success |
| 219 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 220 | * \retval "Does not return" Panic for not find handle node |
| 221 | */ |
| 222 | static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 223 | struct tfm_conn_handle_t *conn_handle, |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 224 | void *rhandle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 225 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 226 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 227 | /* Set reverse handle value only be allowed for a connected handle */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 228 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 229 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 230 | conn_handle->rhandle = rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 231 | return IPC_SUCCESS; |
| 232 | } |
| 233 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 234 | /** |
| 235 | * \brief Get reverse handle value from connection hanlde. |
| 236 | * |
| 237 | * \param[in] service Target service context pointer |
| 238 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 239 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 240 | * |
| 241 | * \retval void * Success |
| 242 | * \retval "Does not return" Panic for those: |
| 243 | * service pointer are NULL |
| 244 | * hanlde is \ref PSA_NULL_HANDLE |
| 245 | * handle node does not be found |
| 246 | */ |
| 247 | static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 248 | struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 249 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 250 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 251 | /* Get reverse handle value only be allowed for a connected handle */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 252 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 253 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 254 | return conn_handle->rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | /* Partition management functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 258 | |
| 259 | /** |
| 260 | * \brief Get the service context by signal. |
| 261 | * |
| 262 | * \param[in] partition Partition context pointer |
| 263 | * \ref spm_partition_desc_t structures |
| 264 | * \param[in] signal Signal associated with inputs to the Secure |
| 265 | * Partition, \ref psa_signal_t |
| 266 | * |
| 267 | * \retval NULL Failed |
| 268 | * \retval "Not NULL" Target service context pointer, |
| 269 | * \ref tfm_spm_service_t structures |
| 270 | */ |
| 271 | static struct tfm_spm_service_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 272 | tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition, |
| 273 | psa_signal_t signal) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 274 | { |
| 275 | struct tfm_list_node_t *node, *head; |
| 276 | struct tfm_spm_service_t *service; |
| 277 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 278 | TFM_CORE_ASSERT(partition); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 279 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 280 | if (tfm_list_is_empty(&partition->runtime_data.service_list)) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 281 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 282 | } |
| 283 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 284 | head = &partition->runtime_data.service_list; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 285 | TFM_LIST_FOR_EACH(node, head) { |
| 286 | service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list); |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 287 | if (service->service_db->signal == signal) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 288 | return service; |
| 289 | } |
| 290 | } |
| 291 | return NULL; |
| 292 | } |
| 293 | |
| 294 | struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid) |
| 295 | { |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame] | 296 | uint32_t i, num; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 297 | |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame] | 298 | num = sizeof(service) / sizeof(struct tfm_spm_service_t); |
| 299 | for (i = 0; i < num; i++) { |
| 300 | if (service[i].service_db->sid == sid) { |
| 301 | return &service[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 302 | } |
| 303 | } |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame] | 304 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 305 | return NULL; |
| 306 | } |
| 307 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 308 | /** |
| 309 | * \brief Get the partition context by partition ID. |
| 310 | * |
| 311 | * \param[in] partition_id Partition identity |
| 312 | * |
| 313 | * \retval NULL Failed |
| 314 | * \retval "Not NULL" Target partition context pointer, |
| 315 | * \ref spm_partition_desc_t structures |
| 316 | */ |
| 317 | static struct spm_partition_desc_t * |
| 318 | tfm_spm_get_partition_by_id(int32_t partition_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 319 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 320 | uint32_t idx = get_partition_idx(partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 321 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 322 | if (idx != SPM_INVALID_PARTITION_IDX) { |
| 323 | return &(g_spm_partition_db.partitions[idx]); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 324 | } |
| 325 | return NULL; |
| 326 | } |
| 327 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 328 | struct spm_partition_desc_t *tfm_spm_get_running_partition(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 329 | { |
| 330 | uint32_t spid; |
| 331 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 332 | spid = tfm_spm_partition_get_running_partition_id(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 333 | |
| 334 | return tfm_spm_get_partition_by_id(spid); |
| 335 | } |
| 336 | |
| 337 | int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service, |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 338 | uint32_t version) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 339 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 340 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 341 | |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 342 | switch (service->service_db->version_policy) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 343 | case TFM_VERSION_POLICY_RELAXED: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 344 | if (version > service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 345 | return IPC_ERROR_VERSION; |
| 346 | } |
| 347 | break; |
| 348 | case TFM_VERSION_POLICY_STRICT: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 349 | if (version != service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 350 | return IPC_ERROR_VERSION; |
| 351 | } |
| 352 | break; |
| 353 | default: |
| 354 | return IPC_ERROR_VERSION; |
| 355 | } |
| 356 | return IPC_SUCCESS; |
| 357 | } |
| 358 | |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 359 | int32_t tfm_spm_check_authorization(uint32_t sid, |
| 360 | struct tfm_spm_service_t *service, |
Summer Qin | 618e8c3 | 2019-12-09 10:47:20 +0800 | [diff] [blame] | 361 | bool ns_caller) |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 362 | { |
| 363 | struct spm_partition_desc_t *partition = NULL; |
| 364 | int32_t i; |
| 365 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 366 | TFM_CORE_ASSERT(service); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 367 | |
| 368 | if (ns_caller) { |
| 369 | if (!service->service_db->non_secure_client) { |
| 370 | return IPC_ERROR_GENERIC; |
| 371 | } |
| 372 | } else { |
| 373 | partition = tfm_spm_get_running_partition(); |
| 374 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 375 | tfm_core_panic(); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 376 | } |
| 377 | |
| 378 | for (i = 0; i < partition->static_data->dependencies_num; i++) { |
| 379 | if (partition->static_data->p_dependencies[i] == sid) { |
| 380 | break; |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | if (i == partition->static_data->dependencies_num) { |
| 385 | return IPC_ERROR_GENERIC; |
| 386 | } |
| 387 | } |
| 388 | return IPC_SUCCESS; |
| 389 | } |
| 390 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 391 | /* Message functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 392 | |
| 393 | /** |
| 394 | * \brief Get message context by message handle. |
| 395 | * |
| 396 | * \param[in] msg_handle Message handle which is a reference generated |
| 397 | * by the SPM to a specific message. |
| 398 | * |
| 399 | * \return The message body context pointer |
| 400 | * \ref tfm_msg_body_t structures |
| 401 | */ |
| 402 | static struct tfm_msg_body_t * |
| 403 | tfm_spm_get_msg_from_handle(psa_handle_t msg_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 404 | { |
| 405 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 406 | * The message handler passed by the caller is considered invalid in the |
| 407 | * following cases: |
| 408 | * 1. Not a valid message handle. (The address of a message is not the |
| 409 | * address of a possible handle from the pool |
| 410 | * 2. Handle not belongs to the caller partition (The handle is either |
| 411 | * unused, or owned by anither partition) |
| 412 | * Check the conditions above |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 413 | */ |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 414 | struct tfm_conn_handle_t *connection_handle_address; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 415 | struct tfm_msg_body_t *msg; |
| 416 | uint32_t partition_id; |
| 417 | |
| 418 | msg = (struct tfm_msg_body_t *)msg_handle; |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 419 | |
| 420 | connection_handle_address = |
| 421 | TFM_GET_CONTAINER_PTR(msg, struct tfm_conn_handle_t, internal_msg); |
| 422 | |
| 423 | if (is_valid_chunk_data_in_pool( |
| 424 | conn_handle_pool, (uint8_t *)connection_handle_address) != 1) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 425 | return NULL; |
| 426 | } |
| 427 | |
| 428 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 429 | * Check that the magic number is correct. This proves that the message |
| 430 | * structure contains an active message. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 431 | */ |
| 432 | if (msg->magic != TFM_MSG_MAGIC) { |
| 433 | return NULL; |
| 434 | } |
| 435 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 436 | /* Check that the running partition owns the message */ |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 437 | partition_id = tfm_spm_partition_get_running_partition_id(); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 438 | if (partition_id != msg->service->partition->static_data->partition_id) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 439 | return NULL; |
| 440 | } |
| 441 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 442 | /* |
| 443 | * FixMe: For condition 1 it should be checked whether the message belongs |
| 444 | * to the service. Skipping this check isn't a security risk as even if the |
| 445 | * message belongs to another service, the handle belongs to the calling |
| 446 | * partition. |
| 447 | */ |
| 448 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 449 | return msg; |
| 450 | } |
| 451 | |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 452 | struct tfm_msg_body_t * |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 453 | tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 454 | { |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 455 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 456 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 457 | return &(conn_handle->internal_msg); |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | void tfm_spm_fill_msg(struct tfm_msg_body_t *msg, |
| 461 | struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 462 | struct tfm_conn_handle_t *handle, |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 463 | int32_t type, int32_t client_id, |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 464 | psa_invec *invec, size_t in_len, |
| 465 | psa_outvec *outvec, size_t out_len, |
| 466 | psa_outvec *caller_outvec) |
| 467 | { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 468 | uint32_t i; |
| 469 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 470 | TFM_CORE_ASSERT(msg); |
| 471 | TFM_CORE_ASSERT(service); |
| 472 | TFM_CORE_ASSERT(!(invec == NULL && in_len != 0)); |
| 473 | TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0)); |
| 474 | TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC); |
| 475 | TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC); |
| 476 | TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 477 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 478 | /* Clear message buffer before using it */ |
Mingyang Sun | 94b1b41 | 2019-09-20 15:11:14 +0800 | [diff] [blame] | 479 | tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 480 | |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 481 | tfm_event_init(&msg->ack_evnt); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 482 | msg->magic = TFM_MSG_MAGIC; |
| 483 | msg->service = service; |
| 484 | msg->handle = handle; |
| 485 | msg->caller_outvec = caller_outvec; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 486 | msg->msg.client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 487 | |
| 488 | /* Copy contents */ |
| 489 | msg->msg.type = type; |
| 490 | |
| 491 | for (i = 0; i < in_len; i++) { |
| 492 | msg->msg.in_size[i] = invec[i].len; |
| 493 | msg->invec[i].base = invec[i].base; |
| 494 | } |
| 495 | |
| 496 | for (i = 0; i < out_len; i++) { |
| 497 | msg->msg.out_size[i] = outvec[i].len; |
| 498 | msg->outvec[i].base = outvec[i].base; |
| 499 | /* Out len is used to record the writed number, set 0 here again */ |
| 500 | msg->outvec[i].len = 0; |
| 501 | } |
| 502 | |
| 503 | /* Use message address as handle */ |
| 504 | msg->msg.handle = (psa_handle_t)msg; |
| 505 | |
| 506 | /* For connected handle, set rhandle to every message */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 507 | if (handle) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 508 | msg->msg.rhandle = tfm_spm_get_rhandle(service, handle); |
| 509 | } |
David Hu | 46603dd | 2019-12-11 18:05:16 +0800 | [diff] [blame] | 510 | |
| 511 | /* Set the private data of NSPE client caller in multi-core topology */ |
| 512 | if (TFM_CLIENT_ID_IS_NS(client_id)) { |
| 513 | tfm_rpc_set_caller_data(msg, client_id); |
| 514 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | int32_t tfm_spm_send_event(struct tfm_spm_service_t *service, |
| 518 | struct tfm_msg_body_t *msg) |
| 519 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 520 | struct spm_partition_runtime_data_t *p_runtime_data = |
| 521 | &service->partition->runtime_data; |
| 522 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 523 | TFM_CORE_ASSERT(service); |
| 524 | TFM_CORE_ASSERT(msg); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 525 | |
| 526 | /* Enqueue message to service message queue */ |
| 527 | if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) { |
| 528 | return IPC_ERROR_GENERIC; |
| 529 | } |
| 530 | |
| 531 | /* Messages put. Update signals */ |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 532 | p_runtime_data->signals |= service->service_db->signal; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 533 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 534 | tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals & |
| 535 | p_runtime_data->signal_mask)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 536 | |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 537 | /* |
| 538 | * If it is a NS request via RPC, it is unnecessary to block current |
| 539 | * thread. |
| 540 | */ |
| 541 | if (!is_tfm_rpc_msg(msg)) { |
| 542 | tfm_event_wait(&msg->ack_evnt); |
| 543 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 544 | |
| 545 | return IPC_SUCCESS; |
| 546 | } |
| 547 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 548 | /** |
| 549 | * \brief Get bottom of stack region for a partition |
| 550 | * |
| 551 | * \param[in] partition_idx Partition index |
| 552 | * |
| 553 | * \return Stack region bottom value |
| 554 | * |
| 555 | * \note This function doesn't check if partition_idx is valid. |
| 556 | */ |
| 557 | static uint32_t tfm_spm_partition_get_stack_bottom(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 558 | { |
| 559 | return g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 560 | memory_data->stack_bottom; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 561 | } |
| 562 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 563 | /** |
| 564 | * \brief Get top of stack region for a partition |
| 565 | * |
| 566 | * \param[in] partition_idx Partition index |
| 567 | * |
| 568 | * \return Stack region top value |
| 569 | * |
| 570 | * \note This function doesn't check if partition_idx is valid. |
| 571 | */ |
| 572 | static uint32_t tfm_spm_partition_get_stack_top(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 573 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 574 | return g_spm_partition_db.partitions[partition_idx].memory_data->stack_top; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 575 | } |
| 576 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 577 | uint32_t tfm_spm_partition_get_running_partition_id(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 578 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 579 | struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 580 | struct spm_partition_desc_t *partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 581 | struct spm_partition_runtime_data_t *r_data; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 582 | |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 583 | r_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t, |
| 584 | sp_thrd); |
| 585 | partition = TFM_GET_CONTAINER_PTR(r_data, struct spm_partition_desc_t, |
| 586 | runtime_data); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 587 | return partition->static_data->partition_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 588 | } |
| 589 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 590 | static struct tfm_core_thread_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 591 | tfm_spm_partition_get_thread_info(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 592 | { |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 593 | return &g_spm_partition_db.partitions[partition_idx].runtime_data.sp_thrd; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 594 | } |
| 595 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 596 | static tfm_core_thrd_entry_t |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 597 | tfm_spm_partition_get_init_func(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 598 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 599 | return (tfm_core_thrd_entry_t)(g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 600 | static_data->partition_init); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 601 | } |
| 602 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 603 | static uint32_t tfm_spm_partition_get_priority(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 604 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 605 | return g_spm_partition_db.partitions[partition_idx].static_data-> |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 606 | partition_priority; |
| 607 | } |
| 608 | |
Summer Qin | 43c185d | 2019-10-10 15:44:42 +0800 | [diff] [blame] | 609 | int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller, |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 610 | enum tfm_memory_access_e access, |
| 611 | uint32_t privileged) |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 612 | { |
Hugues de Valon | 9957856 | 2019-06-18 16:08:51 +0100 | [diff] [blame] | 613 | enum tfm_status_e err; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 614 | |
| 615 | /* If len is zero, this indicates an empty buffer and base is ignored */ |
| 616 | if (len == 0) { |
| 617 | return IPC_SUCCESS; |
| 618 | } |
| 619 | |
| 620 | if (!buffer) { |
| 621 | return IPC_ERROR_BAD_PARAMETERS; |
| 622 | } |
| 623 | |
| 624 | if ((uintptr_t)buffer > (UINTPTR_MAX - len)) { |
| 625 | return IPC_ERROR_MEMORY_CHECK; |
| 626 | } |
| 627 | |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 628 | if (access == TFM_MEMORY_ACCESS_RW) { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 629 | err = tfm_core_has_write_access_to_region(buffer, len, ns_caller, |
| 630 | privileged); |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 631 | } else { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 632 | err = tfm_core_has_read_access_to_region(buffer, len, ns_caller, |
| 633 | privileged); |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 634 | } |
Summer Qin | 0fc3f59 | 2019-04-11 16:00:10 +0800 | [diff] [blame] | 635 | if (err == TFM_SUCCESS) { |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 636 | return IPC_SUCCESS; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | return IPC_ERROR_MEMORY_CHECK; |
| 640 | } |
| 641 | |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 642 | uint32_t tfm_spm_init(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 643 | { |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 644 | uint32_t i, j, num; |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 645 | struct spm_partition_desc_t *partition; |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 646 | struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL; |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 647 | const struct tfm_spm_partition_platform_data_t **platform_data_p; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 648 | |
| 649 | tfm_pool_init(conn_handle_pool, |
| 650 | POOL_BUFFER_SIZE(conn_handle_pool), |
| 651 | sizeof(struct tfm_conn_handle_t), |
| 652 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 653 | |
| 654 | /* Init partition first for it will be used when init service */ |
Mate Toth-Pal | 3ad2e3e | 2019-07-11 21:43:37 +0200 | [diff] [blame] | 655 | for (i = 0; i < g_spm_partition_db.partition_count; i++) { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 656 | partition = &g_spm_partition_db.partitions[i]; |
Edison Ai | f050170 | 2019-10-11 14:36:42 +0800 | [diff] [blame] | 657 | |
| 658 | /* Check if the PSA framework version matches. */ |
| 659 | if (partition->static_data->psa_framework_version != |
| 660 | PSA_FRAMEWORK_VERSION) { |
| 661 | ERROR_MSG("Warning: PSA Framework Verison is not matched!"); |
| 662 | continue; |
| 663 | } |
| 664 | |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 665 | platform_data_p = partition->platform_data_list; |
| 666 | if (platform_data_p != NULL) { |
| 667 | while ((*platform_data_p) != NULL) { |
Edison Ai | 6be3df1 | 2020-02-14 22:14:33 +0800 | [diff] [blame] | 668 | if (tfm_spm_hal_configure_default_isolation(i, |
| 669 | *platform_data_p) != TFM_PLAT_ERR_SUCCESS) { |
| 670 | tfm_core_panic(); |
| 671 | } |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 672 | ++platform_data_p; |
| 673 | } |
| 674 | } |
| 675 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 676 | if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) { |
| 677 | continue; |
| 678 | } |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 679 | |
Shawn Shan | c7dda0e | 2019-12-23 14:45:09 +0800 | [diff] [blame] | 680 | /* Add PSA_DOORBELL signal to assigned_signals */ |
| 681 | partition->runtime_data.assigned_signals |= PSA_DOORBELL; |
| 682 | |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 683 | /* TODO: This can be optimized by generating the assigned signal |
| 684 | * in code generation time. |
| 685 | */ |
| 686 | for (j = 0; j < tfm_core_irq_signals_count; ++j) { |
| 687 | if (tfm_core_irq_signals[j].partition_id == |
| 688 | partition->static_data->partition_id) { |
| 689 | partition->runtime_data.assigned_signals |= |
| 690 | tfm_core_irq_signals[j].signal_value; |
| 691 | } |
| 692 | } |
| 693 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 694 | tfm_event_init(&partition->runtime_data.signal_evnt); |
| 695 | tfm_list_init(&partition->runtime_data.service_list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 696 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 697 | pth = tfm_spm_partition_get_thread_info(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 698 | if (!pth) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 699 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 700 | } |
| 701 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 702 | tfm_core_thrd_init(pth, |
| 703 | tfm_spm_partition_get_init_func(i), |
| 704 | NULL, |
| 705 | (uintptr_t)tfm_spm_partition_get_stack_top(i), |
| 706 | (uintptr_t)tfm_spm_partition_get_stack_bottom(i)); |
Edison Ai | 788bae2 | 2019-02-18 17:38:59 +0800 | [diff] [blame] | 707 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 708 | pth->prior = tfm_spm_partition_get_priority(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 709 | |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 710 | if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) { |
| 711 | p_ns_entry_thread = pth; |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 712 | pth->param = (void *)tfm_spm_hal_get_ns_entry_point(); |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 713 | } |
| 714 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 715 | /* Kick off */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 716 | if (tfm_core_thrd_start(pth) != THRD_SUCCESS) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 717 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 718 | } |
| 719 | } |
| 720 | |
| 721 | /* Init Service */ |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 722 | num = sizeof(service) / sizeof(struct tfm_spm_service_t); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 723 | for (i = 0; i < num; i++) { |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 724 | service[i].service_db = &service_db[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 725 | partition = |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 726 | tfm_spm_get_partition_by_id(service[i].service_db->partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 727 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 728 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 729 | } |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 730 | service[i].partition = partition; |
Jaykumar Pitambarbhai Patel | 0c7a038 | 2020-01-09 15:25:58 +0530 | [diff] [blame] | 731 | partition->runtime_data.assigned_signals |= service[i].service_db->signal; |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 732 | |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 733 | tfm_list_init(&service[i].handle_list); |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 734 | tfm_list_add_tail(&partition->runtime_data.service_list, |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 735 | &service[i].list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 736 | } |
| 737 | |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 738 | /* |
| 739 | * All threads initialized, start the scheduler. |
| 740 | * |
| 741 | * NOTE: |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 742 | * It is worthy to give the thread object to scheduler if the background |
| 743 | * context belongs to one of the threads. Here the background thread is the |
| 744 | * initialization thread who calls SPM SVC, which re-uses the non-secure |
| 745 | * entry thread's stack. After SPM initialization is done, this stack is |
| 746 | * cleaned up and the background context is never going to return. Tell |
| 747 | * the scheduler that the current thread is non-secure entry thread. |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 748 | */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 749 | tfm_core_thrd_start_scheduler(p_ns_entry_thread); |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 750 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 751 | return p_ns_entry_thread->arch_ctx.lr; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 752 | } |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 753 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 754 | void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx) |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 755 | { |
| 756 | #if TFM_LVL == 2 |
| 757 | struct spm_partition_desc_t *p_next_partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 758 | struct spm_partition_runtime_data_t *r_data; |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 759 | uint32_t is_privileged; |
| 760 | #endif |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 761 | struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread(); |
| 762 | struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 763 | |
Mate Toth-Pal | 32b2ccd | 2019-04-26 10:00:16 +0200 | [diff] [blame] | 764 | if (pth_next != NULL && pth_curr != pth_next) { |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 765 | #if TFM_LVL == 2 |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 766 | r_data = TFM_GET_CONTAINER_PTR(pth_next, |
| 767 | struct spm_partition_runtime_data_t, |
| 768 | sp_thrd); |
| 769 | p_next_partition = TFM_GET_CONTAINER_PTR(r_data, |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 770 | struct spm_partition_desc_t, |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 771 | runtime_data); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 772 | |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 773 | if (p_next_partition->static_data->partition_flags & |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 774 | SPM_PART_FLAG_PSA_ROT) { |
| 775 | is_privileged = TFM_PARTITION_PRIVILEGED_MODE; |
| 776 | } else { |
| 777 | is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE; |
| 778 | } |
| 779 | |
| 780 | tfm_spm_partition_change_privilege(is_privileged); |
| 781 | #endif |
Mate Toth-Pal | c430b99 | 2019-05-09 21:01:14 +0200 | [diff] [blame] | 782 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 783 | tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 784 | } |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 785 | |
| 786 | /* |
| 787 | * Handle pending mailbox message from NS in multi-core topology. |
| 788 | * Empty operation on single Armv8-M platform. |
| 789 | */ |
| 790 | tfm_rpc_client_call_handler(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 791 | } |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 792 | |
| 793 | /*********************** SPM functions for PSA Client APIs *******************/ |
| 794 | |
| 795 | uint32_t tfm_spm_psa_framework_version(void) |
| 796 | { |
| 797 | return tfm_spm_client_psa_framework_version(); |
| 798 | } |
| 799 | |
| 800 | uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller) |
| 801 | { |
| 802 | uint32_t sid; |
| 803 | |
| 804 | TFM_CORE_ASSERT(args != NULL); |
| 805 | sid = (uint32_t)args[0]; |
| 806 | |
| 807 | return tfm_spm_client_psa_version(sid, ns_caller); |
| 808 | } |
| 809 | |
| 810 | psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller) |
| 811 | { |
| 812 | uint32_t sid; |
| 813 | uint32_t version; |
| 814 | |
| 815 | TFM_CORE_ASSERT(args != NULL); |
| 816 | sid = (uint32_t)args[0]; |
| 817 | version = (uint32_t)args[1]; |
| 818 | |
| 819 | return tfm_spm_client_psa_connect(sid, version, ns_caller); |
| 820 | } |
| 821 | |
| 822 | psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr) |
| 823 | { |
| 824 | psa_handle_t handle; |
| 825 | psa_invec *inptr; |
| 826 | psa_outvec *outptr; |
| 827 | size_t in_num, out_num; |
| 828 | struct spm_partition_desc_t *partition = NULL; |
| 829 | uint32_t privileged; |
| 830 | int32_t type; |
| 831 | struct tfm_control_parameter_t ctrl_param; |
| 832 | |
| 833 | TFM_CORE_ASSERT(args != NULL); |
| 834 | handle = (psa_handle_t)args[0]; |
| 835 | |
| 836 | partition = tfm_spm_get_running_partition(); |
| 837 | if (!partition) { |
| 838 | tfm_core_panic(); |
| 839 | } |
| 840 | privileged = tfm_spm_partition_get_privileged_mode( |
| 841 | partition->static_data->partition_flags); |
| 842 | |
| 843 | /* |
| 844 | * Read parameters from the arguments. It is a fatal error if the |
| 845 | * memory reference for buffer is invalid or not readable. |
| 846 | */ |
| 847 | if (tfm_memory_check((const void *)args[1], |
| 848 | sizeof(struct tfm_control_parameter_t), ns_caller, |
| 849 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 850 | tfm_core_panic(); |
| 851 | } |
| 852 | |
| 853 | tfm_core_util_memcpy(&ctrl_param, |
| 854 | (const void *)args[1], |
| 855 | sizeof(ctrl_param)); |
| 856 | |
| 857 | type = ctrl_param.type; |
| 858 | in_num = ctrl_param.in_len; |
| 859 | out_num = ctrl_param.out_len; |
| 860 | inptr = (psa_invec *)args[2]; |
| 861 | outptr = (psa_outvec *)args[3]; |
| 862 | |
| 863 | /* The request type must be zero or positive. */ |
| 864 | if (type < 0) { |
| 865 | tfm_core_panic(); |
| 866 | } |
| 867 | |
| 868 | return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num, |
| 869 | ns_caller, privileged); |
| 870 | } |
| 871 | |
| 872 | void tfm_spm_psa_close(uint32_t *args, bool ns_caller) |
| 873 | { |
| 874 | psa_handle_t handle; |
| 875 | |
| 876 | TFM_CORE_ASSERT(args != NULL); |
| 877 | handle = args[0]; |
| 878 | |
| 879 | tfm_spm_client_psa_close(handle, ns_caller); |
| 880 | } |
| 881 | |
| 882 | uint32_t tfm_spm_get_lifecycle_state(void) |
| 883 | { |
| 884 | /* |
| 885 | * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be |
| 886 | * implemented in the future. |
| 887 | */ |
| 888 | return PSA_LIFECYCLE_UNKNOWN; |
| 889 | } |
| 890 | |
| 891 | /********************* SPM functions for PSA Service APIs ********************/ |
| 892 | |
| 893 | psa_signal_t tfm_spm_psa_wait(uint32_t *args) |
| 894 | { |
| 895 | psa_signal_t signal_mask; |
| 896 | uint32_t timeout; |
| 897 | struct spm_partition_desc_t *partition = NULL; |
| 898 | |
| 899 | TFM_CORE_ASSERT(args != NULL); |
| 900 | signal_mask = (psa_signal_t)args[0]; |
| 901 | timeout = args[1]; |
| 902 | |
| 903 | /* |
| 904 | * Timeout[30:0] are reserved for future use. |
| 905 | * SPM must ignore the value of RES. |
| 906 | */ |
| 907 | timeout &= PSA_TIMEOUT_MASK; |
| 908 | |
| 909 | partition = tfm_spm_get_running_partition(); |
| 910 | if (!partition) { |
| 911 | tfm_core_panic(); |
| 912 | } |
| 913 | |
| 914 | /* |
| 915 | * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned |
| 916 | * signals. |
| 917 | */ |
| 918 | if ((partition->runtime_data.assigned_signals & signal_mask) == 0) { |
| 919 | tfm_core_panic(); |
| 920 | } |
| 921 | |
| 922 | /* |
| 923 | * Expected signals are included in signal wait mask, ignored signals |
| 924 | * should not be set and affect caller thread state. Save this mask for |
| 925 | * further checking while signals are ready to be set. |
| 926 | */ |
| 927 | partition->runtime_data.signal_mask = signal_mask; |
| 928 | |
| 929 | /* |
| 930 | * tfm_event_wait() blocks the caller thread if no signals are available. |
| 931 | * In this case, the return value of this function is temporary set into |
| 932 | * runtime context. After new signal(s) are available, the return value |
| 933 | * is updated with the available signal(s) and blocked thread gets to run. |
| 934 | */ |
| 935 | if (timeout == PSA_BLOCK && |
| 936 | (partition->runtime_data.signals & signal_mask) == 0) { |
| 937 | tfm_event_wait(&partition->runtime_data.signal_evnt); |
| 938 | } |
| 939 | |
| 940 | return partition->runtime_data.signals & signal_mask; |
| 941 | } |
| 942 | |
| 943 | psa_status_t tfm_spm_psa_get(uint32_t *args) |
| 944 | { |
| 945 | psa_signal_t signal; |
| 946 | psa_msg_t *msg = NULL; |
| 947 | struct tfm_spm_service_t *service = NULL; |
| 948 | struct tfm_msg_body_t *tmp_msg = NULL; |
| 949 | struct spm_partition_desc_t *partition = NULL; |
| 950 | uint32_t privileged; |
| 951 | |
| 952 | TFM_CORE_ASSERT(args != NULL); |
| 953 | signal = (psa_signal_t)args[0]; |
| 954 | msg = (psa_msg_t *)args[1]; |
| 955 | |
| 956 | /* |
| 957 | * Only one message could be retrieved every time for psa_get(). It is a |
| 958 | * fatal error if the input signal has more than a signal bit set. |
| 959 | */ |
Ken Liu | 410ada5 | 2020-01-08 11:37:27 +0800 | [diff] [blame] | 960 | if (!tfm_is_one_bit_set(signal)) { |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 961 | tfm_core_panic(); |
| 962 | } |
| 963 | |
| 964 | partition = tfm_spm_get_running_partition(); |
| 965 | if (!partition) { |
| 966 | tfm_core_panic(); |
| 967 | } |
| 968 | privileged = tfm_spm_partition_get_privileged_mode( |
| 969 | partition->static_data->partition_flags); |
| 970 | |
| 971 | /* |
| 972 | * Write the message to the service buffer. It is a fatal error if the |
| 973 | * input msg pointer is not a valid memory reference or not read-write. |
| 974 | */ |
| 975 | if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW, |
| 976 | privileged) != IPC_SUCCESS) { |
| 977 | tfm_core_panic(); |
| 978 | } |
| 979 | |
| 980 | /* |
| 981 | * It is a fatal error if the caller call psa_get() when no message has |
| 982 | * been set. The caller must call this function after an RoT Service signal |
| 983 | * is returned by psa_wait(). |
| 984 | */ |
| 985 | if (partition->runtime_data.signals == 0) { |
| 986 | tfm_core_panic(); |
| 987 | } |
| 988 | |
| 989 | /* |
| 990 | * It is a fatal error if the RoT Service signal is not currently asserted. |
| 991 | */ |
| 992 | if ((partition->runtime_data.signals & signal) == 0) { |
| 993 | tfm_core_panic(); |
| 994 | } |
| 995 | |
| 996 | /* |
| 997 | * Get RoT service by signal from partition. It is a fatal error if getting |
| 998 | * failed, which means the input signal is not correspond to an RoT service. |
| 999 | */ |
| 1000 | service = tfm_spm_get_service_by_signal(partition, signal); |
| 1001 | if (!service) { |
| 1002 | tfm_core_panic(); |
| 1003 | } |
| 1004 | |
| 1005 | tmp_msg = tfm_msg_dequeue(&service->msg_queue); |
| 1006 | if (!tmp_msg) { |
| 1007 | return PSA_ERROR_DOES_NOT_EXIST; |
| 1008 | } |
| 1009 | |
| 1010 | ((struct tfm_conn_handle_t *)(tmp_msg->handle))->status = |
| 1011 | TFM_HANDLE_STATUS_ACTIVE; |
| 1012 | |
| 1013 | tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t)); |
| 1014 | |
| 1015 | /* |
| 1016 | * There may be multiple messages for this RoT Service signal, do not clear |
| 1017 | * its mask until no remaining message. |
| 1018 | */ |
| 1019 | if (tfm_msg_queue_is_empty(&service->msg_queue)) { |
| 1020 | partition->runtime_data.signals &= ~signal; |
| 1021 | } |
| 1022 | |
| 1023 | return PSA_SUCCESS; |
| 1024 | } |
| 1025 | |
| 1026 | void tfm_spm_psa_set_rhandle(uint32_t *args) |
| 1027 | { |
| 1028 | psa_handle_t msg_handle; |
| 1029 | void *rhandle = NULL; |
| 1030 | struct tfm_msg_body_t *msg = NULL; |
| 1031 | |
| 1032 | TFM_CORE_ASSERT(args != NULL); |
| 1033 | msg_handle = (psa_handle_t)args[0]; |
| 1034 | rhandle = (void *)args[1]; |
| 1035 | |
| 1036 | /* It is a fatal error if message handle is invalid */ |
| 1037 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1038 | if (!msg) { |
| 1039 | tfm_core_panic(); |
| 1040 | } |
| 1041 | |
| 1042 | msg->msg.rhandle = rhandle; |
| 1043 | |
| 1044 | /* Store reverse handle for following client calls. */ |
| 1045 | tfm_spm_set_rhandle(msg->service, msg->handle, rhandle); |
| 1046 | } |
| 1047 | |
| 1048 | size_t tfm_spm_psa_read(uint32_t *args) |
| 1049 | { |
| 1050 | psa_handle_t msg_handle; |
| 1051 | uint32_t invec_idx; |
| 1052 | void *buffer = NULL; |
| 1053 | size_t num_bytes; |
| 1054 | size_t bytes; |
| 1055 | struct tfm_msg_body_t *msg = NULL; |
| 1056 | uint32_t privileged; |
| 1057 | struct spm_partition_desc_t *partition = NULL; |
| 1058 | |
| 1059 | TFM_CORE_ASSERT(args != NULL); |
| 1060 | msg_handle = (psa_handle_t)args[0]; |
| 1061 | invec_idx = args[1]; |
| 1062 | buffer = (void *)args[2]; |
| 1063 | num_bytes = (size_t)args[3]; |
| 1064 | |
| 1065 | /* It is a fatal error if message handle is invalid */ |
| 1066 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1067 | if (!msg) { |
| 1068 | tfm_core_panic(); |
| 1069 | } |
| 1070 | |
| 1071 | partition = msg->service->partition; |
| 1072 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1073 | partition->static_data->partition_flags); |
| 1074 | |
| 1075 | /* |
| 1076 | * It is a fatal error if message handle does not refer to a request |
| 1077 | * message |
| 1078 | */ |
| 1079 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1080 | tfm_core_panic(); |
| 1081 | } |
| 1082 | |
| 1083 | /* |
| 1084 | * It is a fatal error if invec_idx is equal to or greater than |
| 1085 | * PSA_MAX_IOVEC |
| 1086 | */ |
| 1087 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1088 | tfm_core_panic(); |
| 1089 | } |
| 1090 | |
| 1091 | /* There was no remaining data in this input vector */ |
| 1092 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | /* |
| 1097 | * Copy the client data to the service buffer. It is a fatal error |
| 1098 | * if the memory reference for buffer is invalid or not read-write. |
| 1099 | */ |
| 1100 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1101 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 1102 | tfm_core_panic(); |
| 1103 | } |
| 1104 | |
| 1105 | bytes = num_bytes > msg->msg.in_size[invec_idx] ? |
| 1106 | msg->msg.in_size[invec_idx] : num_bytes; |
| 1107 | |
| 1108 | tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes); |
| 1109 | |
| 1110 | /* There maybe some remaining data */ |
| 1111 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes; |
| 1112 | msg->msg.in_size[invec_idx] -= bytes; |
| 1113 | |
| 1114 | return bytes; |
| 1115 | } |
| 1116 | |
| 1117 | size_t tfm_spm_psa_skip(uint32_t *args) |
| 1118 | { |
| 1119 | psa_handle_t msg_handle; |
| 1120 | uint32_t invec_idx; |
| 1121 | size_t num_bytes; |
| 1122 | struct tfm_msg_body_t *msg = NULL; |
| 1123 | |
| 1124 | TFM_CORE_ASSERT(args != NULL); |
| 1125 | msg_handle = (psa_handle_t)args[0]; |
| 1126 | invec_idx = args[1]; |
| 1127 | num_bytes = (size_t)args[2]; |
| 1128 | |
| 1129 | /* It is a fatal error if message handle is invalid */ |
| 1130 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1131 | if (!msg) { |
| 1132 | tfm_core_panic(); |
| 1133 | } |
| 1134 | |
| 1135 | /* |
| 1136 | * It is a fatal error if message handle does not refer to a request |
| 1137 | * message |
| 1138 | */ |
| 1139 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1140 | tfm_core_panic(); |
| 1141 | } |
| 1142 | |
| 1143 | /* |
| 1144 | * It is a fatal error if invec_idx is equal to or greater than |
| 1145 | * PSA_MAX_IOVEC |
| 1146 | */ |
| 1147 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1148 | tfm_core_panic(); |
| 1149 | } |
| 1150 | |
| 1151 | /* There was no remaining data in this input vector */ |
| 1152 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1153 | return 0; |
| 1154 | } |
| 1155 | |
| 1156 | /* |
| 1157 | * If num_bytes is greater than the remaining size of the input vector then |
| 1158 | * the remaining size of the input vector is used. |
| 1159 | */ |
| 1160 | if (num_bytes > msg->msg.in_size[invec_idx]) { |
| 1161 | num_bytes = msg->msg.in_size[invec_idx]; |
| 1162 | } |
| 1163 | |
| 1164 | /* There maybe some remaining data */ |
| 1165 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + |
| 1166 | num_bytes; |
| 1167 | msg->msg.in_size[invec_idx] -= num_bytes; |
| 1168 | |
| 1169 | return num_bytes; |
| 1170 | } |
| 1171 | |
| 1172 | void tfm_spm_psa_write(uint32_t *args) |
| 1173 | { |
| 1174 | psa_handle_t msg_handle; |
| 1175 | uint32_t outvec_idx; |
| 1176 | void *buffer = NULL; |
| 1177 | size_t num_bytes; |
| 1178 | struct tfm_msg_body_t *msg = NULL; |
| 1179 | uint32_t privileged; |
| 1180 | struct spm_partition_desc_t *partition = NULL; |
| 1181 | |
| 1182 | TFM_CORE_ASSERT(args != NULL); |
| 1183 | msg_handle = (psa_handle_t)args[0]; |
| 1184 | outvec_idx = args[1]; |
| 1185 | buffer = (void *)args[2]; |
| 1186 | num_bytes = (size_t)args[3]; |
| 1187 | |
| 1188 | /* It is a fatal error if message handle is invalid */ |
| 1189 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1190 | if (!msg) { |
| 1191 | tfm_core_panic(); |
| 1192 | } |
| 1193 | |
| 1194 | partition = msg->service->partition; |
| 1195 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1196 | partition->static_data->partition_flags); |
| 1197 | |
| 1198 | /* |
| 1199 | * It is a fatal error if message handle does not refer to a request |
| 1200 | * message |
| 1201 | */ |
| 1202 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1203 | tfm_core_panic(); |
| 1204 | } |
| 1205 | |
| 1206 | /* |
| 1207 | * It is a fatal error if outvec_idx is equal to or greater than |
| 1208 | * PSA_MAX_IOVEC |
| 1209 | */ |
| 1210 | if (outvec_idx >= PSA_MAX_IOVEC) { |
| 1211 | tfm_core_panic(); |
| 1212 | } |
| 1213 | |
| 1214 | /* |
| 1215 | * It is a fatal error if the call attempts to write data past the end of |
| 1216 | * the client output vector |
| 1217 | */ |
| 1218 | if (num_bytes > msg->msg.out_size[outvec_idx] - |
| 1219 | msg->outvec[outvec_idx].len) { |
| 1220 | tfm_core_panic(); |
| 1221 | } |
| 1222 | |
| 1223 | /* |
| 1224 | * Copy the service buffer to client outvecs. It is a fatal error |
| 1225 | * if the memory reference for buffer is invalid or not readable. |
| 1226 | */ |
| 1227 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1228 | TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) { |
| 1229 | tfm_core_panic(); |
| 1230 | } |
| 1231 | |
| 1232 | tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base + |
| 1233 | msg->outvec[outvec_idx].len, buffer, num_bytes); |
| 1234 | |
| 1235 | /* Update the write number */ |
| 1236 | msg->outvec[outvec_idx].len += num_bytes; |
| 1237 | } |
| 1238 | |
| 1239 | static void update_caller_outvec_len(struct tfm_msg_body_t *msg) |
| 1240 | { |
| 1241 | uint32_t i; |
| 1242 | |
| 1243 | /* |
| 1244 | * FixeMe: abstract these part into dedicated functions to avoid |
| 1245 | * accessing thread context in psa layer |
| 1246 | */ |
| 1247 | /* If it is a NS request via RPC, the owner of this message is not set */ |
| 1248 | if (!is_tfm_rpc_msg(msg)) { |
| 1249 | TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK); |
| 1250 | } |
| 1251 | |
| 1252 | for (i = 0; i < PSA_MAX_IOVEC; i++) { |
| 1253 | if (msg->msg.out_size[i] == 0) { |
| 1254 | continue; |
| 1255 | } |
| 1256 | |
| 1257 | TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base); |
| 1258 | |
| 1259 | msg->caller_outvec[i].len = msg->outvec[i].len; |
| 1260 | } |
| 1261 | } |
| 1262 | |
| 1263 | void tfm_spm_psa_reply(uint32_t *args) |
| 1264 | { |
| 1265 | psa_handle_t msg_handle; |
| 1266 | psa_status_t status; |
| 1267 | struct tfm_spm_service_t *service = NULL; |
| 1268 | struct tfm_msg_body_t *msg = NULL; |
| 1269 | int32_t ret = PSA_SUCCESS; |
| 1270 | |
| 1271 | TFM_CORE_ASSERT(args != NULL); |
| 1272 | msg_handle = (psa_handle_t)args[0]; |
| 1273 | status = (psa_status_t)args[1]; |
| 1274 | |
| 1275 | /* It is a fatal error if message handle is invalid */ |
| 1276 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1277 | if (!msg) { |
| 1278 | tfm_core_panic(); |
| 1279 | } |
| 1280 | |
| 1281 | /* |
| 1282 | * RoT Service information is needed in this function, stored it in message |
| 1283 | * body structure. Only two parameters are passed in this function: handle |
| 1284 | * and status, so it is useful and simply to do like this. |
| 1285 | */ |
| 1286 | service = msg->service; |
| 1287 | if (!service) { |
| 1288 | tfm_core_panic(); |
| 1289 | } |
| 1290 | |
| 1291 | /* |
| 1292 | * Three type of message are passed in this function: CONNECTION, REQUEST, |
| 1293 | * DISCONNECTION. It needs to process differently for each type. |
| 1294 | */ |
| 1295 | switch (msg->msg.type) { |
| 1296 | case PSA_IPC_CONNECT: |
| 1297 | /* |
| 1298 | * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the |
| 1299 | * input status is PSA_SUCCESS. Others return values are based on the |
| 1300 | * input status. |
| 1301 | */ |
| 1302 | if (status == PSA_SUCCESS) { |
Summer Qin | 373feb1 | 2020-03-27 15:35:33 +0800 | [diff] [blame] | 1303 | ret = tfm_spm_to_user_handle(msg->handle); |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1304 | } else if (status == PSA_ERROR_CONNECTION_REFUSED) { |
| 1305 | /* Refuse the client connection, indicating a permanent error. */ |
| 1306 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1307 | ret = PSA_ERROR_CONNECTION_REFUSED; |
| 1308 | } else if (status == PSA_ERROR_CONNECTION_BUSY) { |
| 1309 | /* Fail the client connection, indicating a transient error. */ |
| 1310 | ret = PSA_ERROR_CONNECTION_BUSY; |
| 1311 | } else { |
| 1312 | tfm_core_panic(); |
| 1313 | } |
| 1314 | break; |
| 1315 | case PSA_IPC_DISCONNECT: |
| 1316 | /* Service handle is not used anymore */ |
| 1317 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1318 | |
| 1319 | /* |
| 1320 | * If the message type is PSA_IPC_DISCONNECT, then the status code is |
| 1321 | * ignored |
| 1322 | */ |
| 1323 | break; |
| 1324 | default: |
| 1325 | if (msg->msg.type >= PSA_IPC_CALL) { |
| 1326 | /* Reply to a request message. Return values are based on status */ |
| 1327 | ret = status; |
| 1328 | /* |
| 1329 | * The total number of bytes written to a single parameter must be |
| 1330 | * reported to the client by updating the len member of the |
| 1331 | * psa_outvec structure for the parameter before returning from |
| 1332 | * psa_call(). |
| 1333 | */ |
| 1334 | update_caller_outvec_len(msg); |
| 1335 | } else { |
| 1336 | tfm_core_panic(); |
| 1337 | } |
| 1338 | } |
| 1339 | |
| 1340 | if (ret == PSA_ERROR_PROGRAMMER_ERROR) { |
| 1341 | /* |
| 1342 | * If the source of the programmer error is a Secure Partition, the SPM |
| 1343 | * must panic the Secure Partition in response to a PROGRAMMER ERROR. |
| 1344 | */ |
| 1345 | if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) { |
| 1346 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1347 | TFM_HANDLE_STATUS_CONNECT_ERROR; |
| 1348 | } else { |
| 1349 | tfm_core_panic(); |
| 1350 | } |
| 1351 | } else { |
| 1352 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1353 | TFM_HANDLE_STATUS_IDLE; |
| 1354 | } |
| 1355 | |
| 1356 | if (is_tfm_rpc_msg(msg)) { |
| 1357 | tfm_rpc_client_call_reply(msg, ret); |
| 1358 | } else { |
| 1359 | tfm_event_wake(&msg->ack_evnt, ret); |
| 1360 | } |
| 1361 | } |
| 1362 | |
| 1363 | /** |
| 1364 | * \brief notify the partition with the signal. |
| 1365 | * |
| 1366 | * \param[in] partition_id The ID of the partition to be notified. |
| 1367 | * \param[in] signal The signal that the partition is to be notified |
| 1368 | * with. |
| 1369 | * |
| 1370 | * \retval void Success. |
| 1371 | * \retval "Does not return" If partition_id is invalid. |
| 1372 | */ |
| 1373 | static void notify_with_signal(int32_t partition_id, psa_signal_t signal) |
| 1374 | { |
| 1375 | struct spm_partition_desc_t *partition = NULL; |
| 1376 | |
| 1377 | /* |
| 1378 | * The value of partition_id must be greater than zero as the target of |
| 1379 | * notification must be a Secure Partition, providing a Non-secure |
| 1380 | * Partition ID is a fatal error. |
| 1381 | */ |
| 1382 | if (!TFM_CLIENT_ID_IS_S(partition_id)) { |
| 1383 | tfm_core_panic(); |
| 1384 | } |
| 1385 | |
| 1386 | /* |
| 1387 | * It is a fatal error if partition_id does not correspond to a Secure |
| 1388 | * Partition. |
| 1389 | */ |
| 1390 | partition = tfm_spm_get_partition_by_id(partition_id); |
| 1391 | if (!partition) { |
| 1392 | tfm_core_panic(); |
| 1393 | } |
| 1394 | |
| 1395 | partition->runtime_data.signals |= signal; |
| 1396 | |
| 1397 | /* |
| 1398 | * The target partition may be blocked with waiting for signals after |
| 1399 | * called psa_wait(). Set the return value with the available signals |
| 1400 | * before wake it up with tfm_event_signal(). |
| 1401 | */ |
| 1402 | tfm_event_wake(&partition->runtime_data.signal_evnt, |
| 1403 | partition->runtime_data.signals & |
| 1404 | partition->runtime_data.signal_mask); |
| 1405 | } |
| 1406 | |
| 1407 | void tfm_spm_psa_notify(uint32_t *args) |
| 1408 | { |
| 1409 | int32_t partition_id; |
| 1410 | |
| 1411 | TFM_CORE_ASSERT(args != NULL); |
| 1412 | partition_id = (int32_t)args[0]; |
| 1413 | |
| 1414 | notify_with_signal(partition_id, PSA_DOORBELL); |
| 1415 | } |
| 1416 | |
| 1417 | /** |
| 1418 | * \brief assert signal for a given IRQ line. |
| 1419 | * |
| 1420 | * \param[in] partition_id The ID of the partition which handles this IRQ |
| 1421 | * \param[in] signal The signal associated with this IRQ |
| 1422 | * \param[in] irq_line The number of the IRQ line |
| 1423 | * |
| 1424 | * \retval void Success. |
| 1425 | * \retval "Does not return" Partition ID is invalid |
| 1426 | */ |
| 1427 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1428 | IRQn_Type irq_line) |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1429 | { |
| 1430 | tfm_spm_hal_disable_irq(irq_line); |
| 1431 | notify_with_signal(partition_id, signal); |
| 1432 | } |
| 1433 | |
| 1434 | void tfm_spm_psa_clear(void) |
| 1435 | { |
| 1436 | struct spm_partition_desc_t *partition = NULL; |
| 1437 | |
| 1438 | partition = tfm_spm_get_running_partition(); |
| 1439 | if (!partition) { |
| 1440 | tfm_core_panic(); |
| 1441 | } |
| 1442 | |
| 1443 | /* |
| 1444 | * It is a fatal error if the Secure Partition's doorbell signal is not |
| 1445 | * currently asserted. |
| 1446 | */ |
| 1447 | if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) { |
| 1448 | tfm_core_panic(); |
| 1449 | } |
| 1450 | partition->runtime_data.signals &= ~PSA_DOORBELL; |
| 1451 | } |
| 1452 | |
| 1453 | void tfm_spm_psa_panic(void) |
| 1454 | { |
| 1455 | /* |
| 1456 | * PSA FF recommends that the SPM causes the system to restart when a secure |
| 1457 | * partition panics. |
| 1458 | */ |
| 1459 | tfm_spm_hal_system_reset(); |
| 1460 | } |
| 1461 | |
| 1462 | /** |
| 1463 | * \brief Return the IRQ line number associated with a signal |
| 1464 | * |
| 1465 | * \param[in] partition_id The ID of the partition in which we look for |
| 1466 | * the signal. |
| 1467 | * \param[in] signal The signal we do the query for. |
| 1468 | * \param[out] irq_line The irq line associated with signal |
| 1469 | * |
| 1470 | * \retval IPC_SUCCESS Execution successful, irq_line contains a valid |
| 1471 | * value. |
| 1472 | * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the |
| 1473 | * signal. irq_line is unchanged. |
| 1474 | */ |
| 1475 | static int32_t get_irq_line_for_signal(int32_t partition_id, |
| 1476 | psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1477 | IRQn_Type *irq_line) |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1478 | { |
| 1479 | size_t i; |
| 1480 | |
| 1481 | for (i = 0; i < tfm_core_irq_signals_count; ++i) { |
| 1482 | if (tfm_core_irq_signals[i].partition_id == partition_id && |
| 1483 | tfm_core_irq_signals[i].signal_value == signal) { |
| 1484 | *irq_line = tfm_core_irq_signals[i].irq_line; |
| 1485 | return IPC_SUCCESS; |
| 1486 | } |
| 1487 | } |
| 1488 | return IPC_ERROR_GENERIC; |
| 1489 | } |
| 1490 | |
| 1491 | void tfm_spm_psa_eoi(uint32_t *args) |
| 1492 | { |
| 1493 | psa_signal_t irq_signal; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1494 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1495 | int32_t ret; |
| 1496 | struct spm_partition_desc_t *partition = NULL; |
| 1497 | |
| 1498 | TFM_CORE_ASSERT(args != NULL); |
| 1499 | irq_signal = (psa_signal_t)args[0]; |
| 1500 | |
| 1501 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1502 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1503 | tfm_core_panic(); |
| 1504 | } |
| 1505 | |
| 1506 | partition = tfm_spm_get_running_partition(); |
| 1507 | if (!partition) { |
| 1508 | tfm_core_panic(); |
| 1509 | } |
| 1510 | |
| 1511 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1512 | irq_signal, &irq_line); |
| 1513 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1514 | if (ret != IPC_SUCCESS) { |
| 1515 | tfm_core_panic(); |
| 1516 | } |
| 1517 | |
| 1518 | /* It is a fatal error if passed signal is not currently asserted */ |
| 1519 | if ((partition->runtime_data.signals & irq_signal) == 0) { |
| 1520 | tfm_core_panic(); |
| 1521 | } |
| 1522 | |
| 1523 | partition->runtime_data.signals &= ~irq_signal; |
| 1524 | |
| 1525 | tfm_spm_hal_clear_pending_irq(irq_line); |
| 1526 | tfm_spm_hal_enable_irq(irq_line); |
| 1527 | } |
| 1528 | |
| 1529 | void tfm_spm_enable_irq(uint32_t *args) |
| 1530 | { |
| 1531 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1532 | psa_signal_t irq_signal = svc_ctx->r0; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1533 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1534 | int32_t ret; |
| 1535 | struct spm_partition_desc_t *partition = NULL; |
| 1536 | |
| 1537 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1538 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1539 | tfm_core_panic(); |
| 1540 | } |
| 1541 | |
| 1542 | partition = tfm_spm_get_running_partition(); |
| 1543 | if (!partition) { |
| 1544 | tfm_core_panic(); |
| 1545 | } |
| 1546 | |
| 1547 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1548 | irq_signal, &irq_line); |
| 1549 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1550 | if (ret != IPC_SUCCESS) { |
| 1551 | tfm_core_panic(); |
| 1552 | } |
| 1553 | |
| 1554 | tfm_spm_hal_enable_irq(irq_line); |
| 1555 | } |
| 1556 | |
| 1557 | void tfm_spm_disable_irq(uint32_t *args) |
| 1558 | { |
| 1559 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1560 | psa_signal_t irq_signal = svc_ctx->r0; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1561 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1562 | int32_t ret; |
| 1563 | struct spm_partition_desc_t *partition = NULL; |
| 1564 | |
| 1565 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1566 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1567 | tfm_core_panic(); |
| 1568 | } |
| 1569 | |
| 1570 | partition = tfm_spm_get_running_partition(); |
| 1571 | if (!partition) { |
| 1572 | tfm_core_panic(); |
| 1573 | } |
| 1574 | |
| 1575 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1576 | irq_signal, &irq_line); |
| 1577 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1578 | if (ret != IPC_SUCCESS) { |
| 1579 | tfm_core_panic(); |
| 1580 | } |
| 1581 | |
| 1582 | tfm_spm_hal_disable_irq(irq_line); |
| 1583 | } |
| 1584 | |
| 1585 | void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp, |
| 1586 | uint32_t *p_ctx, uint32_t exc_return, |
| 1587 | bool ns_caller) |
| 1588 | { |
| 1589 | uintptr_t stacked_ctx_pos; |
| 1590 | |
| 1591 | if (ns_caller) { |
| 1592 | /* |
| 1593 | * The background IRQ can't be supported, since if SP is executing, |
| 1594 | * the preempted context of SP can be different with the one who |
| 1595 | * preempts veneer. |
| 1596 | */ |
| 1597 | if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) { |
| 1598 | tfm_core_panic(); |
| 1599 | } |
| 1600 | |
| 1601 | /* |
| 1602 | * It is non-secure caller, check if veneer stack contains |
| 1603 | * multiple contexts. |
| 1604 | */ |
| 1605 | stacked_ctx_pos = (uintptr_t)p_ctx + |
| 1606 | sizeof(struct tfm_state_context_t) + |
| 1607 | TFM_VENEER_STACK_GUARD_SIZE; |
| 1608 | |
| 1609 | if (is_stack_alloc_fp_space(exc_return)) { |
| 1610 | #if defined (__FPU_USED) && (__FPU_USED == 1U) |
| 1611 | if (FPU->FPCCR & FPU_FPCCR_TS_Msk) { |
| 1612 | stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS * |
| 1613 | sizeof(uint32_t); |
| 1614 | } |
| 1615 | #endif |
| 1616 | stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t); |
| 1617 | } |
| 1618 | |
| 1619 | if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) { |
| 1620 | tfm_core_panic(); |
| 1621 | } |
| 1622 | } else if (p_cur_sp->static_data->partition_id <= 0) { |
| 1623 | tfm_core_panic(); |
| 1624 | } |
| 1625 | } |
Summer Qin | 830c554 | 2020-02-14 13:44:20 +0800 | [diff] [blame] | 1626 | |
| 1627 | void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx) |
| 1628 | { |
| 1629 | uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0; |
| 1630 | uint32_t running_partition_flags = 0; |
| 1631 | const struct spm_partition_desc_t *partition = NULL; |
| 1632 | |
| 1633 | /* Check permissions on request type basis */ |
| 1634 | |
| 1635 | switch (svc_ctx->r0) { |
| 1636 | case TFM_SPM_REQUEST_RESET_VOTE: |
| 1637 | partition = tfm_spm_get_running_partition(); |
| 1638 | if (!partition) { |
| 1639 | tfm_core_panic(); |
| 1640 | } |
| 1641 | running_partition_flags = partition->static_data->partition_flags; |
| 1642 | |
| 1643 | /* Currently only PSA Root of Trust services are allowed to make Reset |
| 1644 | * vote request |
| 1645 | */ |
| 1646 | if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) { |
| 1647 | *res_ptr = (uint32_t)TFM_ERROR_GENERIC; |
| 1648 | } |
| 1649 | |
| 1650 | /* FixMe: this is a placeholder for checks to be performed before |
| 1651 | * allowing execution of reset |
| 1652 | */ |
| 1653 | *res_ptr = (uint32_t)TFM_SUCCESS; |
| 1654 | |
| 1655 | break; |
| 1656 | default: |
| 1657 | *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER; |
| 1658 | } |
| 1659 | } |
Mingyang Sun | bd7ceb5 | 2020-06-11 16:53:03 +0800 | [diff] [blame] | 1660 | |
| 1661 | enum spm_err_t tfm_spm_db_init(void) |
| 1662 | { |
| 1663 | uint32_t i; |
| 1664 | |
| 1665 | /* This function initialises partition db */ |
| 1666 | |
| 1667 | for (i = 0; i < g_spm_partition_db.partition_count; i++) { |
| 1668 | g_spm_partition_db.partitions[i].static_data = &static_data_list[i]; |
| 1669 | g_spm_partition_db.partitions[i].platform_data_list = |
| 1670 | platform_data_list_list[i]; |
| 1671 | g_spm_partition_db.partitions[i].memory_data = &memory_data_list[i]; |
| 1672 | } |
| 1673 | g_spm_partition_db.is_init = 1; |
| 1674 | |
| 1675 | return SPM_ERR_OK; |
| 1676 | } |