Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 1 | /* |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 2 | * Copyright (c) 2018-2020, Arm Limited. All rights reserved. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | * |
| 6 | */ |
Mingyang Sun | da01a97 | 2019-07-12 17:32:59 +0800 | [diff] [blame] | 7 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 8 | #include <inttypes.h> |
| 9 | #include <stdbool.h> |
Jamie Fox | cc31d40 | 2019-01-28 17:13:52 +0000 | [diff] [blame] | 10 | #include "psa/client.h" |
| 11 | #include "psa/service.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 12 | #include "psa/lifecycle.h" |
| 13 | #include "tfm_thread.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 14 | #include "tfm_wait.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 15 | #include "tfm_utils.h" |
| 16 | #include "tfm_internal_defines.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 17 | #include "tfm_message_queue.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 18 | #include "tfm_spm_hal.h" |
| 19 | #include "tfm_irq_list.h" |
| 20 | #include "tfm_api.h" |
| 21 | #include "tfm_secure_api.h" |
| 22 | #include "tfm_memory_utils.h" |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 23 | #include "tfm/spm_api.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 24 | #include "tfm_peripherals_def.h" |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 25 | #include "tfm/spm_db.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 26 | #include "tfm_core_utils.h" |
| 27 | #include "spm_psa_client_call.h" |
| 28 | #include "tfm_rpc.h" |
| 29 | #include "tfm_internal.h" |
| 30 | #include "tfm_core_trustzone.h" |
| 31 | #include "tfm_core_mem_check.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 32 | #include "tfm_list.h" |
| 33 | #include "tfm_pools.h" |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 34 | #include "region_defs.h" |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 35 | #include "tfm/tfm_spm_services_api.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 36 | |
Ken Liu | 1f345b0 | 2020-05-30 21:11:05 +0800 | [diff] [blame] | 37 | #include "secure_fw/partitions/tfm_service_list.inc" |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 38 | |
| 39 | /* Extern service variable */ |
| 40 | extern struct tfm_spm_service_t service[]; |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 41 | extern const struct tfm_spm_service_db_t service_db[]; |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 42 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 43 | /* Extern SPM variable */ |
| 44 | extern struct spm_partition_db_t g_spm_partition_db; |
| 45 | |
| 46 | /* Pools */ |
| 47 | TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t), |
| 48 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 49 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 50 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 51 | IRQn_Type irq_line); |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 52 | |
| 53 | #include "tfm_secure_irq_handlers_ipc.inc" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 54 | |
Summer Qin | 373feb1 | 2020-03-27 15:35:33 +0800 | [diff] [blame] | 55 | /*********************** Connection handle conversion APIs *******************/ |
| 56 | |
| 57 | /* Set a minimal value here for feature expansion. */ |
| 58 | #define CLIENT_HANDLE_VALUE_MIN 32 |
| 59 | |
| 60 | #define CONVERSION_FACTOR_BITOFFSET 3 |
| 61 | #define CONVERSION_FACTOR_VALUE (1 << CONVERSION_FACTOR_BITOFFSET) |
| 62 | /* Set 32 as the maximum */ |
| 63 | #define CONVERSION_FACTOR_VALUE_MAX 0x20 |
| 64 | |
| 65 | #if CONVERSION_FACTOR_VALUE > CONVERSION_FACTOR_VALUE_MAX |
| 66 | #error "CONVERSION FACTOR OUT OF RANGE" |
| 67 | #endif |
| 68 | |
| 69 | static uint32_t loop_index; |
| 70 | |
| 71 | /* |
| 72 | * A handle instance psa_handle_t allocated inside SPM is actually a memory |
| 73 | * address among the handle pool. Return this handle to the client directly |
| 74 | * exposes information of secure memory address. In this case, converting the |
| 75 | * handle into another value does not represent the memory address to avoid |
| 76 | * exposing secure memory directly to clients. |
| 77 | * |
| 78 | * This function converts the handle instance into another value by scaling the |
| 79 | * handle in pool offset, the converted value is named as a user handle. |
| 80 | * |
| 81 | * The formula: |
| 82 | * user_handle = (handle_instance - POOL_START) * CONVERSION_FACTOR_VALUE + |
| 83 | * CLIENT_HANDLE_VALUE_MIN + loop_index |
| 84 | * where: |
| 85 | * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not |
| 86 | * exceed CONVERSION_FACTOR_VALUE_MAX. |
| 87 | * |
| 88 | * handle_instance in RANGE[POOL_START, POOL_END] |
| 89 | * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF] |
| 90 | * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1] |
| 91 | * |
| 92 | * note: |
| 93 | * loop_index is used to promise same handle instance is converted into |
| 94 | * different user handles in short time. |
| 95 | */ |
| 96 | static psa_handle_t tfm_spm_to_user_handle( |
| 97 | struct tfm_conn_handle_t *handle_instance) |
| 98 | { |
| 99 | psa_handle_t user_handle; |
| 100 | |
| 101 | loop_index = (loop_index + 1) % CONVERSION_FACTOR_VALUE; |
| 102 | user_handle = (psa_handle_t)((((uintptr_t)handle_instance - |
| 103 | (uintptr_t)conn_handle_pool) << CONVERSION_FACTOR_BITOFFSET) + |
| 104 | CLIENT_HANDLE_VALUE_MIN + loop_index); |
| 105 | |
| 106 | return user_handle; |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * This function converts a user handle into a corresponded handle instance. |
| 111 | * The converted value is validated before returning, an invalid handle instance |
| 112 | * is returned as NULL. |
| 113 | * |
| 114 | * The formula: |
| 115 | * handle_instance = ((user_handle - CLIENT_HANDLE_VALUE_MIN) / |
| 116 | * CONVERSION_FACTOR_VALUE) + POOL_START |
| 117 | * where: |
| 118 | * CONVERSION_FACTOR_VALUE = 1 << CONVERSION_FACTOR_BITOFFSET, and should not |
| 119 | * exceed CONVERSION_FACTOR_VALUE_MAX. |
| 120 | * |
| 121 | * handle_instance in RANGE[POOL_START, POOL_END] |
| 122 | * user_handle in RANGE[CLIENT_HANDLE_VALUE_MIN, 0x3FFFFFFF] |
| 123 | * loop_index in RANGE[0, CONVERSION_FACTOR_VALUE - 1] |
| 124 | */ |
| 125 | struct tfm_conn_handle_t *tfm_spm_to_handle_instance(psa_handle_t user_handle) |
| 126 | { |
| 127 | struct tfm_conn_handle_t *handle_instance; |
| 128 | |
| 129 | if (user_handle == PSA_NULL_HANDLE) { |
| 130 | return NULL; |
| 131 | } |
| 132 | |
| 133 | handle_instance = (struct tfm_conn_handle_t *)((((uintptr_t)user_handle - |
| 134 | CLIENT_HANDLE_VALUE_MIN) >> CONVERSION_FACTOR_BITOFFSET) + |
| 135 | (uintptr_t)conn_handle_pool); |
| 136 | |
| 137 | return handle_instance; |
| 138 | } |
| 139 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 140 | /* Service handle management functions */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 141 | struct tfm_conn_handle_t *tfm_spm_create_conn_handle( |
| 142 | struct tfm_spm_service_t *service, |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 143 | int32_t client_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 144 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 145 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 146 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 147 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 148 | |
| 149 | /* Get buffer for handle list structure from handle pool */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 150 | p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool); |
| 151 | if (!p_handle) { |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 152 | return NULL; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 153 | } |
| 154 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 155 | p_handle->service = service; |
Shawn Shan | cc39fcb | 2019-11-13 15:38:16 +0800 | [diff] [blame] | 156 | p_handle->status = TFM_HANDLE_STATUS_IDLE; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 157 | p_handle->client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 158 | |
| 159 | /* Add handle node to list for next psa functions */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 160 | tfm_list_add_tail(&service->handle_list, &p_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 161 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 162 | return p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 163 | } |
| 164 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 165 | int32_t tfm_spm_validate_conn_handle( |
| 166 | const struct tfm_conn_handle_t *conn_handle, |
| 167 | int32_t client_id) |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 168 | { |
| 169 | /* Check the handle address is validated */ |
| 170 | if (is_valid_chunk_data_in_pool(conn_handle_pool, |
| 171 | (uint8_t *)conn_handle) != true) { |
| 172 | return IPC_ERROR_GENERIC; |
| 173 | } |
| 174 | |
| 175 | /* Check the handle caller is correct */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 176 | if (conn_handle->client_id != client_id) { |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 177 | return IPC_ERROR_GENERIC; |
| 178 | } |
| 179 | |
| 180 | return IPC_SUCCESS; |
| 181 | } |
| 182 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 183 | /** |
| 184 | * \brief Free connection handle which not used anymore. |
| 185 | * |
| 186 | * \param[in] service Target service context pointer |
| 187 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 188 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 189 | * |
| 190 | * \retval IPC_SUCCESS Success |
| 191 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 192 | * \retval "Does not return" Panic for not find service by handle |
| 193 | */ |
| 194 | static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 195 | struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 196 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 197 | TFM_CORE_ASSERT(service); |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 198 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 199 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 200 | /* Clear magic as the handler is not used anymore */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 201 | conn_handle->internal_msg.magic = 0; |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 202 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 203 | /* Remove node from handle list */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 204 | tfm_list_del_node(&conn_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 205 | |
| 206 | /* Back handle buffer to pool */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 207 | tfm_pool_free(conn_handle); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 208 | return IPC_SUCCESS; |
| 209 | } |
| 210 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 211 | /** |
| 212 | * \brief Set reverse handle value for connection. |
| 213 | * |
| 214 | * \param[in] service Target service context pointer |
| 215 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 216 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 217 | * \param[in] rhandle rhandle need to save |
| 218 | * |
| 219 | * \retval IPC_SUCCESS Success |
| 220 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 221 | * \retval "Does not return" Panic for not find handle node |
| 222 | */ |
| 223 | static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 224 | struct tfm_conn_handle_t *conn_handle, |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 225 | void *rhandle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 226 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 227 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 228 | /* Set reverse handle value only be allowed for a connected handle */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 229 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 230 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 231 | conn_handle->rhandle = rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 232 | return IPC_SUCCESS; |
| 233 | } |
| 234 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 235 | /** |
| 236 | * \brief Get reverse handle value from connection hanlde. |
| 237 | * |
| 238 | * \param[in] service Target service context pointer |
| 239 | * \param[in] conn_handle Connection handle created by |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 240 | * tfm_spm_create_conn_handle() |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 241 | * |
| 242 | * \retval void * Success |
| 243 | * \retval "Does not return" Panic for those: |
| 244 | * service pointer are NULL |
| 245 | * hanlde is \ref PSA_NULL_HANDLE |
| 246 | * handle node does not be found |
| 247 | */ |
| 248 | static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 249 | struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 250 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 251 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 252 | /* Get reverse handle value only be allowed for a connected handle */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 253 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 254 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 255 | return conn_handle->rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | /* Partition management functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 259 | |
| 260 | /** |
| 261 | * \brief Get the service context by signal. |
| 262 | * |
| 263 | * \param[in] partition Partition context pointer |
| 264 | * \ref spm_partition_desc_t structures |
| 265 | * \param[in] signal Signal associated with inputs to the Secure |
| 266 | * Partition, \ref psa_signal_t |
| 267 | * |
| 268 | * \retval NULL Failed |
| 269 | * \retval "Not NULL" Target service context pointer, |
| 270 | * \ref tfm_spm_service_t structures |
| 271 | */ |
| 272 | static struct tfm_spm_service_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 273 | tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition, |
| 274 | psa_signal_t signal) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 275 | { |
| 276 | struct tfm_list_node_t *node, *head; |
| 277 | struct tfm_spm_service_t *service; |
| 278 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 279 | TFM_CORE_ASSERT(partition); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 280 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 281 | if (tfm_list_is_empty(&partition->runtime_data.service_list)) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 282 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 283 | } |
| 284 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 285 | head = &partition->runtime_data.service_list; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 286 | TFM_LIST_FOR_EACH(node, head) { |
| 287 | service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list); |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 288 | if (service->service_db->signal == signal) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 289 | return service; |
| 290 | } |
| 291 | } |
| 292 | return NULL; |
| 293 | } |
| 294 | |
| 295 | struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid) |
| 296 | { |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame^] | 297 | uint32_t i, num; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 298 | |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame^] | 299 | num = sizeof(service) / sizeof(struct tfm_spm_service_t); |
| 300 | for (i = 0; i < num; i++) { |
| 301 | if (service[i].service_db->sid == sid) { |
| 302 | return &service[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 303 | } |
| 304 | } |
Summer Qin | 2fca1c8 | 2020-03-20 14:37:55 +0800 | [diff] [blame^] | 305 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 306 | return NULL; |
| 307 | } |
| 308 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 309 | /** |
| 310 | * \brief Get the partition context by partition ID. |
| 311 | * |
| 312 | * \param[in] partition_id Partition identity |
| 313 | * |
| 314 | * \retval NULL Failed |
| 315 | * \retval "Not NULL" Target partition context pointer, |
| 316 | * \ref spm_partition_desc_t structures |
| 317 | */ |
| 318 | static struct spm_partition_desc_t * |
| 319 | tfm_spm_get_partition_by_id(int32_t partition_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 320 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 321 | uint32_t idx = get_partition_idx(partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 322 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 323 | if (idx != SPM_INVALID_PARTITION_IDX) { |
| 324 | return &(g_spm_partition_db.partitions[idx]); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 325 | } |
| 326 | return NULL; |
| 327 | } |
| 328 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 329 | struct spm_partition_desc_t *tfm_spm_get_running_partition(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 330 | { |
| 331 | uint32_t spid; |
| 332 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 333 | spid = tfm_spm_partition_get_running_partition_id(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 334 | |
| 335 | return tfm_spm_get_partition_by_id(spid); |
| 336 | } |
| 337 | |
| 338 | int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service, |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 339 | uint32_t version) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 340 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 341 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 342 | |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 343 | switch (service->service_db->version_policy) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 344 | case TFM_VERSION_POLICY_RELAXED: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 345 | if (version > service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 346 | return IPC_ERROR_VERSION; |
| 347 | } |
| 348 | break; |
| 349 | case TFM_VERSION_POLICY_STRICT: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 350 | if (version != service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 351 | return IPC_ERROR_VERSION; |
| 352 | } |
| 353 | break; |
| 354 | default: |
| 355 | return IPC_ERROR_VERSION; |
| 356 | } |
| 357 | return IPC_SUCCESS; |
| 358 | } |
| 359 | |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 360 | int32_t tfm_spm_check_authorization(uint32_t sid, |
| 361 | struct tfm_spm_service_t *service, |
Summer Qin | 618e8c3 | 2019-12-09 10:47:20 +0800 | [diff] [blame] | 362 | bool ns_caller) |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 363 | { |
| 364 | struct spm_partition_desc_t *partition = NULL; |
| 365 | int32_t i; |
| 366 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 367 | TFM_CORE_ASSERT(service); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 368 | |
| 369 | if (ns_caller) { |
| 370 | if (!service->service_db->non_secure_client) { |
| 371 | return IPC_ERROR_GENERIC; |
| 372 | } |
| 373 | } else { |
| 374 | partition = tfm_spm_get_running_partition(); |
| 375 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 376 | tfm_core_panic(); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | for (i = 0; i < partition->static_data->dependencies_num; i++) { |
| 380 | if (partition->static_data->p_dependencies[i] == sid) { |
| 381 | break; |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | if (i == partition->static_data->dependencies_num) { |
| 386 | return IPC_ERROR_GENERIC; |
| 387 | } |
| 388 | } |
| 389 | return IPC_SUCCESS; |
| 390 | } |
| 391 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 392 | /* Message functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 393 | |
| 394 | /** |
| 395 | * \brief Get message context by message handle. |
| 396 | * |
| 397 | * \param[in] msg_handle Message handle which is a reference generated |
| 398 | * by the SPM to a specific message. |
| 399 | * |
| 400 | * \return The message body context pointer |
| 401 | * \ref tfm_msg_body_t structures |
| 402 | */ |
| 403 | static struct tfm_msg_body_t * |
| 404 | tfm_spm_get_msg_from_handle(psa_handle_t msg_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 405 | { |
| 406 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 407 | * The message handler passed by the caller is considered invalid in the |
| 408 | * following cases: |
| 409 | * 1. Not a valid message handle. (The address of a message is not the |
| 410 | * address of a possible handle from the pool |
| 411 | * 2. Handle not belongs to the caller partition (The handle is either |
| 412 | * unused, or owned by anither partition) |
| 413 | * Check the conditions above |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 414 | */ |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 415 | struct tfm_conn_handle_t *connection_handle_address; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 416 | struct tfm_msg_body_t *msg; |
| 417 | uint32_t partition_id; |
| 418 | |
| 419 | msg = (struct tfm_msg_body_t *)msg_handle; |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 420 | |
| 421 | connection_handle_address = |
| 422 | TFM_GET_CONTAINER_PTR(msg, struct tfm_conn_handle_t, internal_msg); |
| 423 | |
| 424 | if (is_valid_chunk_data_in_pool( |
| 425 | conn_handle_pool, (uint8_t *)connection_handle_address) != 1) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 426 | return NULL; |
| 427 | } |
| 428 | |
| 429 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 430 | * Check that the magic number is correct. This proves that the message |
| 431 | * structure contains an active message. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 432 | */ |
| 433 | if (msg->magic != TFM_MSG_MAGIC) { |
| 434 | return NULL; |
| 435 | } |
| 436 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 437 | /* Check that the running partition owns the message */ |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 438 | partition_id = tfm_spm_partition_get_running_partition_id(); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 439 | if (partition_id != msg->service->partition->static_data->partition_id) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 440 | return NULL; |
| 441 | } |
| 442 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 443 | /* |
| 444 | * FixMe: For condition 1 it should be checked whether the message belongs |
| 445 | * to the service. Skipping this check isn't a security risk as even if the |
| 446 | * message belongs to another service, the handle belongs to the calling |
| 447 | * partition. |
| 448 | */ |
| 449 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 450 | return msg; |
| 451 | } |
| 452 | |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 453 | struct tfm_msg_body_t * |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 454 | tfm_spm_get_msg_buffer_from_conn_handle(struct tfm_conn_handle_t *conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 455 | { |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 456 | TFM_CORE_ASSERT(conn_handle != NULL); |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 457 | |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 458 | return &(conn_handle->internal_msg); |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | void tfm_spm_fill_msg(struct tfm_msg_body_t *msg, |
| 462 | struct tfm_spm_service_t *service, |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 463 | struct tfm_conn_handle_t *handle, |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 464 | int32_t type, int32_t client_id, |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 465 | psa_invec *invec, size_t in_len, |
| 466 | psa_outvec *outvec, size_t out_len, |
| 467 | psa_outvec *caller_outvec) |
| 468 | { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 469 | uint32_t i; |
| 470 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 471 | TFM_CORE_ASSERT(msg); |
| 472 | TFM_CORE_ASSERT(service); |
| 473 | TFM_CORE_ASSERT(!(invec == NULL && in_len != 0)); |
| 474 | TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0)); |
| 475 | TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC); |
| 476 | TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC); |
| 477 | TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 478 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 479 | /* Clear message buffer before using it */ |
Mingyang Sun | 94b1b41 | 2019-09-20 15:11:14 +0800 | [diff] [blame] | 480 | tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 481 | |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 482 | tfm_event_init(&msg->ack_evnt); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 483 | msg->magic = TFM_MSG_MAGIC; |
| 484 | msg->service = service; |
| 485 | msg->handle = handle; |
| 486 | msg->caller_outvec = caller_outvec; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 487 | msg->msg.client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 488 | |
| 489 | /* Copy contents */ |
| 490 | msg->msg.type = type; |
| 491 | |
| 492 | for (i = 0; i < in_len; i++) { |
| 493 | msg->msg.in_size[i] = invec[i].len; |
| 494 | msg->invec[i].base = invec[i].base; |
| 495 | } |
| 496 | |
| 497 | for (i = 0; i < out_len; i++) { |
| 498 | msg->msg.out_size[i] = outvec[i].len; |
| 499 | msg->outvec[i].base = outvec[i].base; |
| 500 | /* Out len is used to record the writed number, set 0 here again */ |
| 501 | msg->outvec[i].len = 0; |
| 502 | } |
| 503 | |
| 504 | /* Use message address as handle */ |
| 505 | msg->msg.handle = (psa_handle_t)msg; |
| 506 | |
| 507 | /* For connected handle, set rhandle to every message */ |
Summer Qin | 630c76b | 2020-05-20 10:32:58 +0800 | [diff] [blame] | 508 | if (handle) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 509 | msg->msg.rhandle = tfm_spm_get_rhandle(service, handle); |
| 510 | } |
David Hu | 46603dd | 2019-12-11 18:05:16 +0800 | [diff] [blame] | 511 | |
| 512 | /* Set the private data of NSPE client caller in multi-core topology */ |
| 513 | if (TFM_CLIENT_ID_IS_NS(client_id)) { |
| 514 | tfm_rpc_set_caller_data(msg, client_id); |
| 515 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | int32_t tfm_spm_send_event(struct tfm_spm_service_t *service, |
| 519 | struct tfm_msg_body_t *msg) |
| 520 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 521 | struct spm_partition_runtime_data_t *p_runtime_data = |
| 522 | &service->partition->runtime_data; |
| 523 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 524 | TFM_CORE_ASSERT(service); |
| 525 | TFM_CORE_ASSERT(msg); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 526 | |
| 527 | /* Enqueue message to service message queue */ |
| 528 | if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) { |
| 529 | return IPC_ERROR_GENERIC; |
| 530 | } |
| 531 | |
| 532 | /* Messages put. Update signals */ |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 533 | p_runtime_data->signals |= service->service_db->signal; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 534 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 535 | tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals & |
| 536 | p_runtime_data->signal_mask)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 537 | |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 538 | /* |
| 539 | * If it is a NS request via RPC, it is unnecessary to block current |
| 540 | * thread. |
| 541 | */ |
| 542 | if (!is_tfm_rpc_msg(msg)) { |
| 543 | tfm_event_wait(&msg->ack_evnt); |
| 544 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 545 | |
| 546 | return IPC_SUCCESS; |
| 547 | } |
| 548 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 549 | /** |
| 550 | * \brief Get bottom of stack region for a partition |
| 551 | * |
| 552 | * \param[in] partition_idx Partition index |
| 553 | * |
| 554 | * \return Stack region bottom value |
| 555 | * |
| 556 | * \note This function doesn't check if partition_idx is valid. |
| 557 | */ |
| 558 | static uint32_t tfm_spm_partition_get_stack_bottom(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 559 | { |
| 560 | return g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 561 | memory_data->stack_bottom; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 562 | } |
| 563 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 564 | /** |
| 565 | * \brief Get top of stack region for a partition |
| 566 | * |
| 567 | * \param[in] partition_idx Partition index |
| 568 | * |
| 569 | * \return Stack region top value |
| 570 | * |
| 571 | * \note This function doesn't check if partition_idx is valid. |
| 572 | */ |
| 573 | static uint32_t tfm_spm_partition_get_stack_top(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 574 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 575 | return g_spm_partition_db.partitions[partition_idx].memory_data->stack_top; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 576 | } |
| 577 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 578 | uint32_t tfm_spm_partition_get_running_partition_id(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 579 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 580 | struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 581 | struct spm_partition_desc_t *partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 582 | struct spm_partition_runtime_data_t *r_data; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 583 | |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 584 | r_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t, |
| 585 | sp_thrd); |
| 586 | partition = TFM_GET_CONTAINER_PTR(r_data, struct spm_partition_desc_t, |
| 587 | runtime_data); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 588 | return partition->static_data->partition_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 589 | } |
| 590 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 591 | static struct tfm_core_thread_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 592 | tfm_spm_partition_get_thread_info(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 593 | { |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 594 | return &g_spm_partition_db.partitions[partition_idx].runtime_data.sp_thrd; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 595 | } |
| 596 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 597 | static tfm_core_thrd_entry_t |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 598 | tfm_spm_partition_get_init_func(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 599 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 600 | return (tfm_core_thrd_entry_t)(g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 601 | static_data->partition_init); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 602 | } |
| 603 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 604 | static uint32_t tfm_spm_partition_get_priority(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 605 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 606 | return g_spm_partition_db.partitions[partition_idx].static_data-> |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 607 | partition_priority; |
| 608 | } |
| 609 | |
Summer Qin | 43c185d | 2019-10-10 15:44:42 +0800 | [diff] [blame] | 610 | int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller, |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 611 | enum tfm_memory_access_e access, |
| 612 | uint32_t privileged) |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 613 | { |
Hugues de Valon | 9957856 | 2019-06-18 16:08:51 +0100 | [diff] [blame] | 614 | enum tfm_status_e err; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 615 | |
| 616 | /* If len is zero, this indicates an empty buffer and base is ignored */ |
| 617 | if (len == 0) { |
| 618 | return IPC_SUCCESS; |
| 619 | } |
| 620 | |
| 621 | if (!buffer) { |
| 622 | return IPC_ERROR_BAD_PARAMETERS; |
| 623 | } |
| 624 | |
| 625 | if ((uintptr_t)buffer > (UINTPTR_MAX - len)) { |
| 626 | return IPC_ERROR_MEMORY_CHECK; |
| 627 | } |
| 628 | |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 629 | if (access == TFM_MEMORY_ACCESS_RW) { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 630 | err = tfm_core_has_write_access_to_region(buffer, len, ns_caller, |
| 631 | privileged); |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 632 | } else { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 633 | err = tfm_core_has_read_access_to_region(buffer, len, ns_caller, |
| 634 | privileged); |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 635 | } |
Summer Qin | 0fc3f59 | 2019-04-11 16:00:10 +0800 | [diff] [blame] | 636 | if (err == TFM_SUCCESS) { |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 637 | return IPC_SUCCESS; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | return IPC_ERROR_MEMORY_CHECK; |
| 641 | } |
| 642 | |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 643 | uint32_t tfm_spm_init(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 644 | { |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 645 | uint32_t i, j, num; |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 646 | struct spm_partition_desc_t *partition; |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 647 | struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL; |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 648 | const struct tfm_spm_partition_platform_data_t **platform_data_p; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 649 | |
| 650 | tfm_pool_init(conn_handle_pool, |
| 651 | POOL_BUFFER_SIZE(conn_handle_pool), |
| 652 | sizeof(struct tfm_conn_handle_t), |
| 653 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 654 | |
| 655 | /* Init partition first for it will be used when init service */ |
Mate Toth-Pal | 3ad2e3e | 2019-07-11 21:43:37 +0200 | [diff] [blame] | 656 | for (i = 0; i < g_spm_partition_db.partition_count; i++) { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 657 | partition = &g_spm_partition_db.partitions[i]; |
Edison Ai | f050170 | 2019-10-11 14:36:42 +0800 | [diff] [blame] | 658 | |
| 659 | /* Check if the PSA framework version matches. */ |
| 660 | if (partition->static_data->psa_framework_version != |
| 661 | PSA_FRAMEWORK_VERSION) { |
| 662 | ERROR_MSG("Warning: PSA Framework Verison is not matched!"); |
| 663 | continue; |
| 664 | } |
| 665 | |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 666 | platform_data_p = partition->platform_data_list; |
| 667 | if (platform_data_p != NULL) { |
| 668 | while ((*platform_data_p) != NULL) { |
Edison Ai | 6be3df1 | 2020-02-14 22:14:33 +0800 | [diff] [blame] | 669 | if (tfm_spm_hal_configure_default_isolation(i, |
| 670 | *platform_data_p) != TFM_PLAT_ERR_SUCCESS) { |
| 671 | tfm_core_panic(); |
| 672 | } |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 673 | ++platform_data_p; |
| 674 | } |
| 675 | } |
| 676 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 677 | if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) { |
| 678 | continue; |
| 679 | } |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 680 | |
Shawn Shan | c7dda0e | 2019-12-23 14:45:09 +0800 | [diff] [blame] | 681 | /* Add PSA_DOORBELL signal to assigned_signals */ |
| 682 | partition->runtime_data.assigned_signals |= PSA_DOORBELL; |
| 683 | |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 684 | /* TODO: This can be optimized by generating the assigned signal |
| 685 | * in code generation time. |
| 686 | */ |
| 687 | for (j = 0; j < tfm_core_irq_signals_count; ++j) { |
| 688 | if (tfm_core_irq_signals[j].partition_id == |
| 689 | partition->static_data->partition_id) { |
| 690 | partition->runtime_data.assigned_signals |= |
| 691 | tfm_core_irq_signals[j].signal_value; |
| 692 | } |
| 693 | } |
| 694 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 695 | tfm_event_init(&partition->runtime_data.signal_evnt); |
| 696 | tfm_list_init(&partition->runtime_data.service_list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 697 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 698 | pth = tfm_spm_partition_get_thread_info(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 699 | if (!pth) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 700 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 701 | } |
| 702 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 703 | tfm_core_thrd_init(pth, |
| 704 | tfm_spm_partition_get_init_func(i), |
| 705 | NULL, |
| 706 | (uintptr_t)tfm_spm_partition_get_stack_top(i), |
| 707 | (uintptr_t)tfm_spm_partition_get_stack_bottom(i)); |
Edison Ai | 788bae2 | 2019-02-18 17:38:59 +0800 | [diff] [blame] | 708 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 709 | pth->prior = tfm_spm_partition_get_priority(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 710 | |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 711 | if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) { |
| 712 | p_ns_entry_thread = pth; |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 713 | pth->param = (void *)tfm_spm_hal_get_ns_entry_point(); |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 714 | } |
| 715 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 716 | /* Kick off */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 717 | if (tfm_core_thrd_start(pth) != THRD_SUCCESS) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 718 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 719 | } |
| 720 | } |
| 721 | |
| 722 | /* Init Service */ |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 723 | num = sizeof(service) / sizeof(struct tfm_spm_service_t); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 724 | for (i = 0; i < num; i++) { |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 725 | service[i].service_db = &service_db[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 726 | partition = |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 727 | tfm_spm_get_partition_by_id(service[i].service_db->partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 728 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 729 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 730 | } |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 731 | service[i].partition = partition; |
Jaykumar Pitambarbhai Patel | 0c7a038 | 2020-01-09 15:25:58 +0530 | [diff] [blame] | 732 | partition->runtime_data.assigned_signals |= service[i].service_db->signal; |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 733 | |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 734 | tfm_list_init(&service[i].handle_list); |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 735 | tfm_list_add_tail(&partition->runtime_data.service_list, |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 736 | &service[i].list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 737 | } |
| 738 | |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 739 | /* |
| 740 | * All threads initialized, start the scheduler. |
| 741 | * |
| 742 | * NOTE: |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 743 | * It is worthy to give the thread object to scheduler if the background |
| 744 | * context belongs to one of the threads. Here the background thread is the |
| 745 | * initialization thread who calls SPM SVC, which re-uses the non-secure |
| 746 | * entry thread's stack. After SPM initialization is done, this stack is |
| 747 | * cleaned up and the background context is never going to return. Tell |
| 748 | * the scheduler that the current thread is non-secure entry thread. |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 749 | */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 750 | tfm_core_thrd_start_scheduler(p_ns_entry_thread); |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 751 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 752 | return p_ns_entry_thread->arch_ctx.lr; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 753 | } |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 754 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 755 | void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx) |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 756 | { |
| 757 | #if TFM_LVL == 2 |
| 758 | struct spm_partition_desc_t *p_next_partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 759 | struct spm_partition_runtime_data_t *r_data; |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 760 | uint32_t is_privileged; |
| 761 | #endif |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 762 | struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread(); |
| 763 | struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 764 | |
Mate Toth-Pal | 32b2ccd | 2019-04-26 10:00:16 +0200 | [diff] [blame] | 765 | if (pth_next != NULL && pth_curr != pth_next) { |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 766 | #if TFM_LVL == 2 |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 767 | r_data = TFM_GET_CONTAINER_PTR(pth_next, |
| 768 | struct spm_partition_runtime_data_t, |
| 769 | sp_thrd); |
| 770 | p_next_partition = TFM_GET_CONTAINER_PTR(r_data, |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 771 | struct spm_partition_desc_t, |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 772 | runtime_data); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 773 | |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 774 | if (p_next_partition->static_data->partition_flags & |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 775 | SPM_PART_FLAG_PSA_ROT) { |
| 776 | is_privileged = TFM_PARTITION_PRIVILEGED_MODE; |
| 777 | } else { |
| 778 | is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE; |
| 779 | } |
| 780 | |
| 781 | tfm_spm_partition_change_privilege(is_privileged); |
| 782 | #endif |
Mate Toth-Pal | c430b99 | 2019-05-09 21:01:14 +0200 | [diff] [blame] | 783 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 784 | tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 785 | } |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 786 | |
| 787 | /* |
| 788 | * Handle pending mailbox message from NS in multi-core topology. |
| 789 | * Empty operation on single Armv8-M platform. |
| 790 | */ |
| 791 | tfm_rpc_client_call_handler(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 792 | } |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 793 | |
| 794 | /*********************** SPM functions for PSA Client APIs *******************/ |
| 795 | |
| 796 | uint32_t tfm_spm_psa_framework_version(void) |
| 797 | { |
| 798 | return tfm_spm_client_psa_framework_version(); |
| 799 | } |
| 800 | |
| 801 | uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller) |
| 802 | { |
| 803 | uint32_t sid; |
| 804 | |
| 805 | TFM_CORE_ASSERT(args != NULL); |
| 806 | sid = (uint32_t)args[0]; |
| 807 | |
| 808 | return tfm_spm_client_psa_version(sid, ns_caller); |
| 809 | } |
| 810 | |
| 811 | psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller) |
| 812 | { |
| 813 | uint32_t sid; |
| 814 | uint32_t version; |
| 815 | |
| 816 | TFM_CORE_ASSERT(args != NULL); |
| 817 | sid = (uint32_t)args[0]; |
| 818 | version = (uint32_t)args[1]; |
| 819 | |
| 820 | return tfm_spm_client_psa_connect(sid, version, ns_caller); |
| 821 | } |
| 822 | |
| 823 | psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr) |
| 824 | { |
| 825 | psa_handle_t handle; |
| 826 | psa_invec *inptr; |
| 827 | psa_outvec *outptr; |
| 828 | size_t in_num, out_num; |
| 829 | struct spm_partition_desc_t *partition = NULL; |
| 830 | uint32_t privileged; |
| 831 | int32_t type; |
| 832 | struct tfm_control_parameter_t ctrl_param; |
| 833 | |
| 834 | TFM_CORE_ASSERT(args != NULL); |
| 835 | handle = (psa_handle_t)args[0]; |
| 836 | |
| 837 | partition = tfm_spm_get_running_partition(); |
| 838 | if (!partition) { |
| 839 | tfm_core_panic(); |
| 840 | } |
| 841 | privileged = tfm_spm_partition_get_privileged_mode( |
| 842 | partition->static_data->partition_flags); |
| 843 | |
| 844 | /* |
| 845 | * Read parameters from the arguments. It is a fatal error if the |
| 846 | * memory reference for buffer is invalid or not readable. |
| 847 | */ |
| 848 | if (tfm_memory_check((const void *)args[1], |
| 849 | sizeof(struct tfm_control_parameter_t), ns_caller, |
| 850 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 851 | tfm_core_panic(); |
| 852 | } |
| 853 | |
| 854 | tfm_core_util_memcpy(&ctrl_param, |
| 855 | (const void *)args[1], |
| 856 | sizeof(ctrl_param)); |
| 857 | |
| 858 | type = ctrl_param.type; |
| 859 | in_num = ctrl_param.in_len; |
| 860 | out_num = ctrl_param.out_len; |
| 861 | inptr = (psa_invec *)args[2]; |
| 862 | outptr = (psa_outvec *)args[3]; |
| 863 | |
| 864 | /* The request type must be zero or positive. */ |
| 865 | if (type < 0) { |
| 866 | tfm_core_panic(); |
| 867 | } |
| 868 | |
| 869 | return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num, |
| 870 | ns_caller, privileged); |
| 871 | } |
| 872 | |
| 873 | void tfm_spm_psa_close(uint32_t *args, bool ns_caller) |
| 874 | { |
| 875 | psa_handle_t handle; |
| 876 | |
| 877 | TFM_CORE_ASSERT(args != NULL); |
| 878 | handle = args[0]; |
| 879 | |
| 880 | tfm_spm_client_psa_close(handle, ns_caller); |
| 881 | } |
| 882 | |
| 883 | uint32_t tfm_spm_get_lifecycle_state(void) |
| 884 | { |
| 885 | /* |
| 886 | * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be |
| 887 | * implemented in the future. |
| 888 | */ |
| 889 | return PSA_LIFECYCLE_UNKNOWN; |
| 890 | } |
| 891 | |
| 892 | /********************* SPM functions for PSA Service APIs ********************/ |
| 893 | |
| 894 | psa_signal_t tfm_spm_psa_wait(uint32_t *args) |
| 895 | { |
| 896 | psa_signal_t signal_mask; |
| 897 | uint32_t timeout; |
| 898 | struct spm_partition_desc_t *partition = NULL; |
| 899 | |
| 900 | TFM_CORE_ASSERT(args != NULL); |
| 901 | signal_mask = (psa_signal_t)args[0]; |
| 902 | timeout = args[1]; |
| 903 | |
| 904 | /* |
| 905 | * Timeout[30:0] are reserved for future use. |
| 906 | * SPM must ignore the value of RES. |
| 907 | */ |
| 908 | timeout &= PSA_TIMEOUT_MASK; |
| 909 | |
| 910 | partition = tfm_spm_get_running_partition(); |
| 911 | if (!partition) { |
| 912 | tfm_core_panic(); |
| 913 | } |
| 914 | |
| 915 | /* |
| 916 | * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned |
| 917 | * signals. |
| 918 | */ |
| 919 | if ((partition->runtime_data.assigned_signals & signal_mask) == 0) { |
| 920 | tfm_core_panic(); |
| 921 | } |
| 922 | |
| 923 | /* |
| 924 | * Expected signals are included in signal wait mask, ignored signals |
| 925 | * should not be set and affect caller thread state. Save this mask for |
| 926 | * further checking while signals are ready to be set. |
| 927 | */ |
| 928 | partition->runtime_data.signal_mask = signal_mask; |
| 929 | |
| 930 | /* |
| 931 | * tfm_event_wait() blocks the caller thread if no signals are available. |
| 932 | * In this case, the return value of this function is temporary set into |
| 933 | * runtime context. After new signal(s) are available, the return value |
| 934 | * is updated with the available signal(s) and blocked thread gets to run. |
| 935 | */ |
| 936 | if (timeout == PSA_BLOCK && |
| 937 | (partition->runtime_data.signals & signal_mask) == 0) { |
| 938 | tfm_event_wait(&partition->runtime_data.signal_evnt); |
| 939 | } |
| 940 | |
| 941 | return partition->runtime_data.signals & signal_mask; |
| 942 | } |
| 943 | |
| 944 | psa_status_t tfm_spm_psa_get(uint32_t *args) |
| 945 | { |
| 946 | psa_signal_t signal; |
| 947 | psa_msg_t *msg = NULL; |
| 948 | struct tfm_spm_service_t *service = NULL; |
| 949 | struct tfm_msg_body_t *tmp_msg = NULL; |
| 950 | struct spm_partition_desc_t *partition = NULL; |
| 951 | uint32_t privileged; |
| 952 | |
| 953 | TFM_CORE_ASSERT(args != NULL); |
| 954 | signal = (psa_signal_t)args[0]; |
| 955 | msg = (psa_msg_t *)args[1]; |
| 956 | |
| 957 | /* |
| 958 | * Only one message could be retrieved every time for psa_get(). It is a |
| 959 | * fatal error if the input signal has more than a signal bit set. |
| 960 | */ |
Ken Liu | 410ada5 | 2020-01-08 11:37:27 +0800 | [diff] [blame] | 961 | if (!tfm_is_one_bit_set(signal)) { |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 962 | tfm_core_panic(); |
| 963 | } |
| 964 | |
| 965 | partition = tfm_spm_get_running_partition(); |
| 966 | if (!partition) { |
| 967 | tfm_core_panic(); |
| 968 | } |
| 969 | privileged = tfm_spm_partition_get_privileged_mode( |
| 970 | partition->static_data->partition_flags); |
| 971 | |
| 972 | /* |
| 973 | * Write the message to the service buffer. It is a fatal error if the |
| 974 | * input msg pointer is not a valid memory reference or not read-write. |
| 975 | */ |
| 976 | if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW, |
| 977 | privileged) != IPC_SUCCESS) { |
| 978 | tfm_core_panic(); |
| 979 | } |
| 980 | |
| 981 | /* |
| 982 | * It is a fatal error if the caller call psa_get() when no message has |
| 983 | * been set. The caller must call this function after an RoT Service signal |
| 984 | * is returned by psa_wait(). |
| 985 | */ |
| 986 | if (partition->runtime_data.signals == 0) { |
| 987 | tfm_core_panic(); |
| 988 | } |
| 989 | |
| 990 | /* |
| 991 | * It is a fatal error if the RoT Service signal is not currently asserted. |
| 992 | */ |
| 993 | if ((partition->runtime_data.signals & signal) == 0) { |
| 994 | tfm_core_panic(); |
| 995 | } |
| 996 | |
| 997 | /* |
| 998 | * Get RoT service by signal from partition. It is a fatal error if getting |
| 999 | * failed, which means the input signal is not correspond to an RoT service. |
| 1000 | */ |
| 1001 | service = tfm_spm_get_service_by_signal(partition, signal); |
| 1002 | if (!service) { |
| 1003 | tfm_core_panic(); |
| 1004 | } |
| 1005 | |
| 1006 | tmp_msg = tfm_msg_dequeue(&service->msg_queue); |
| 1007 | if (!tmp_msg) { |
| 1008 | return PSA_ERROR_DOES_NOT_EXIST; |
| 1009 | } |
| 1010 | |
| 1011 | ((struct tfm_conn_handle_t *)(tmp_msg->handle))->status = |
| 1012 | TFM_HANDLE_STATUS_ACTIVE; |
| 1013 | |
| 1014 | tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t)); |
| 1015 | |
| 1016 | /* |
| 1017 | * There may be multiple messages for this RoT Service signal, do not clear |
| 1018 | * its mask until no remaining message. |
| 1019 | */ |
| 1020 | if (tfm_msg_queue_is_empty(&service->msg_queue)) { |
| 1021 | partition->runtime_data.signals &= ~signal; |
| 1022 | } |
| 1023 | |
| 1024 | return PSA_SUCCESS; |
| 1025 | } |
| 1026 | |
| 1027 | void tfm_spm_psa_set_rhandle(uint32_t *args) |
| 1028 | { |
| 1029 | psa_handle_t msg_handle; |
| 1030 | void *rhandle = NULL; |
| 1031 | struct tfm_msg_body_t *msg = NULL; |
| 1032 | |
| 1033 | TFM_CORE_ASSERT(args != NULL); |
| 1034 | msg_handle = (psa_handle_t)args[0]; |
| 1035 | rhandle = (void *)args[1]; |
| 1036 | |
| 1037 | /* It is a fatal error if message handle is invalid */ |
| 1038 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1039 | if (!msg) { |
| 1040 | tfm_core_panic(); |
| 1041 | } |
| 1042 | |
| 1043 | msg->msg.rhandle = rhandle; |
| 1044 | |
| 1045 | /* Store reverse handle for following client calls. */ |
| 1046 | tfm_spm_set_rhandle(msg->service, msg->handle, rhandle); |
| 1047 | } |
| 1048 | |
| 1049 | size_t tfm_spm_psa_read(uint32_t *args) |
| 1050 | { |
| 1051 | psa_handle_t msg_handle; |
| 1052 | uint32_t invec_idx; |
| 1053 | void *buffer = NULL; |
| 1054 | size_t num_bytes; |
| 1055 | size_t bytes; |
| 1056 | struct tfm_msg_body_t *msg = NULL; |
| 1057 | uint32_t privileged; |
| 1058 | struct spm_partition_desc_t *partition = NULL; |
| 1059 | |
| 1060 | TFM_CORE_ASSERT(args != NULL); |
| 1061 | msg_handle = (psa_handle_t)args[0]; |
| 1062 | invec_idx = args[1]; |
| 1063 | buffer = (void *)args[2]; |
| 1064 | num_bytes = (size_t)args[3]; |
| 1065 | |
| 1066 | /* It is a fatal error if message handle is invalid */ |
| 1067 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1068 | if (!msg) { |
| 1069 | tfm_core_panic(); |
| 1070 | } |
| 1071 | |
| 1072 | partition = msg->service->partition; |
| 1073 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1074 | partition->static_data->partition_flags); |
| 1075 | |
| 1076 | /* |
| 1077 | * It is a fatal error if message handle does not refer to a request |
| 1078 | * message |
| 1079 | */ |
| 1080 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1081 | tfm_core_panic(); |
| 1082 | } |
| 1083 | |
| 1084 | /* |
| 1085 | * It is a fatal error if invec_idx is equal to or greater than |
| 1086 | * PSA_MAX_IOVEC |
| 1087 | */ |
| 1088 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1089 | tfm_core_panic(); |
| 1090 | } |
| 1091 | |
| 1092 | /* There was no remaining data in this input vector */ |
| 1093 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1094 | return 0; |
| 1095 | } |
| 1096 | |
| 1097 | /* |
| 1098 | * Copy the client data to the service buffer. It is a fatal error |
| 1099 | * if the memory reference for buffer is invalid or not read-write. |
| 1100 | */ |
| 1101 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1102 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 1103 | tfm_core_panic(); |
| 1104 | } |
| 1105 | |
| 1106 | bytes = num_bytes > msg->msg.in_size[invec_idx] ? |
| 1107 | msg->msg.in_size[invec_idx] : num_bytes; |
| 1108 | |
| 1109 | tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes); |
| 1110 | |
| 1111 | /* There maybe some remaining data */ |
| 1112 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes; |
| 1113 | msg->msg.in_size[invec_idx] -= bytes; |
| 1114 | |
| 1115 | return bytes; |
| 1116 | } |
| 1117 | |
| 1118 | size_t tfm_spm_psa_skip(uint32_t *args) |
| 1119 | { |
| 1120 | psa_handle_t msg_handle; |
| 1121 | uint32_t invec_idx; |
| 1122 | size_t num_bytes; |
| 1123 | struct tfm_msg_body_t *msg = NULL; |
| 1124 | |
| 1125 | TFM_CORE_ASSERT(args != NULL); |
| 1126 | msg_handle = (psa_handle_t)args[0]; |
| 1127 | invec_idx = args[1]; |
| 1128 | num_bytes = (size_t)args[2]; |
| 1129 | |
| 1130 | /* It is a fatal error if message handle is invalid */ |
| 1131 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1132 | if (!msg) { |
| 1133 | tfm_core_panic(); |
| 1134 | } |
| 1135 | |
| 1136 | /* |
| 1137 | * It is a fatal error if message handle does not refer to a request |
| 1138 | * message |
| 1139 | */ |
| 1140 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1141 | tfm_core_panic(); |
| 1142 | } |
| 1143 | |
| 1144 | /* |
| 1145 | * It is a fatal error if invec_idx is equal to or greater than |
| 1146 | * PSA_MAX_IOVEC |
| 1147 | */ |
| 1148 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1149 | tfm_core_panic(); |
| 1150 | } |
| 1151 | |
| 1152 | /* There was no remaining data in this input vector */ |
| 1153 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1154 | return 0; |
| 1155 | } |
| 1156 | |
| 1157 | /* |
| 1158 | * If num_bytes is greater than the remaining size of the input vector then |
| 1159 | * the remaining size of the input vector is used. |
| 1160 | */ |
| 1161 | if (num_bytes > msg->msg.in_size[invec_idx]) { |
| 1162 | num_bytes = msg->msg.in_size[invec_idx]; |
| 1163 | } |
| 1164 | |
| 1165 | /* There maybe some remaining data */ |
| 1166 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + |
| 1167 | num_bytes; |
| 1168 | msg->msg.in_size[invec_idx] -= num_bytes; |
| 1169 | |
| 1170 | return num_bytes; |
| 1171 | } |
| 1172 | |
| 1173 | void tfm_spm_psa_write(uint32_t *args) |
| 1174 | { |
| 1175 | psa_handle_t msg_handle; |
| 1176 | uint32_t outvec_idx; |
| 1177 | void *buffer = NULL; |
| 1178 | size_t num_bytes; |
| 1179 | struct tfm_msg_body_t *msg = NULL; |
| 1180 | uint32_t privileged; |
| 1181 | struct spm_partition_desc_t *partition = NULL; |
| 1182 | |
| 1183 | TFM_CORE_ASSERT(args != NULL); |
| 1184 | msg_handle = (psa_handle_t)args[0]; |
| 1185 | outvec_idx = args[1]; |
| 1186 | buffer = (void *)args[2]; |
| 1187 | num_bytes = (size_t)args[3]; |
| 1188 | |
| 1189 | /* It is a fatal error if message handle is invalid */ |
| 1190 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1191 | if (!msg) { |
| 1192 | tfm_core_panic(); |
| 1193 | } |
| 1194 | |
| 1195 | partition = msg->service->partition; |
| 1196 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1197 | partition->static_data->partition_flags); |
| 1198 | |
| 1199 | /* |
| 1200 | * It is a fatal error if message handle does not refer to a request |
| 1201 | * message |
| 1202 | */ |
| 1203 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1204 | tfm_core_panic(); |
| 1205 | } |
| 1206 | |
| 1207 | /* |
| 1208 | * It is a fatal error if outvec_idx is equal to or greater than |
| 1209 | * PSA_MAX_IOVEC |
| 1210 | */ |
| 1211 | if (outvec_idx >= PSA_MAX_IOVEC) { |
| 1212 | tfm_core_panic(); |
| 1213 | } |
| 1214 | |
| 1215 | /* |
| 1216 | * It is a fatal error if the call attempts to write data past the end of |
| 1217 | * the client output vector |
| 1218 | */ |
| 1219 | if (num_bytes > msg->msg.out_size[outvec_idx] - |
| 1220 | msg->outvec[outvec_idx].len) { |
| 1221 | tfm_core_panic(); |
| 1222 | } |
| 1223 | |
| 1224 | /* |
| 1225 | * Copy the service buffer to client outvecs. It is a fatal error |
| 1226 | * if the memory reference for buffer is invalid or not readable. |
| 1227 | */ |
| 1228 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1229 | TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) { |
| 1230 | tfm_core_panic(); |
| 1231 | } |
| 1232 | |
| 1233 | tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base + |
| 1234 | msg->outvec[outvec_idx].len, buffer, num_bytes); |
| 1235 | |
| 1236 | /* Update the write number */ |
| 1237 | msg->outvec[outvec_idx].len += num_bytes; |
| 1238 | } |
| 1239 | |
| 1240 | static void update_caller_outvec_len(struct tfm_msg_body_t *msg) |
| 1241 | { |
| 1242 | uint32_t i; |
| 1243 | |
| 1244 | /* |
| 1245 | * FixeMe: abstract these part into dedicated functions to avoid |
| 1246 | * accessing thread context in psa layer |
| 1247 | */ |
| 1248 | /* If it is a NS request via RPC, the owner of this message is not set */ |
| 1249 | if (!is_tfm_rpc_msg(msg)) { |
| 1250 | TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK); |
| 1251 | } |
| 1252 | |
| 1253 | for (i = 0; i < PSA_MAX_IOVEC; i++) { |
| 1254 | if (msg->msg.out_size[i] == 0) { |
| 1255 | continue; |
| 1256 | } |
| 1257 | |
| 1258 | TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base); |
| 1259 | |
| 1260 | msg->caller_outvec[i].len = msg->outvec[i].len; |
| 1261 | } |
| 1262 | } |
| 1263 | |
| 1264 | void tfm_spm_psa_reply(uint32_t *args) |
| 1265 | { |
| 1266 | psa_handle_t msg_handle; |
| 1267 | psa_status_t status; |
| 1268 | struct tfm_spm_service_t *service = NULL; |
| 1269 | struct tfm_msg_body_t *msg = NULL; |
| 1270 | int32_t ret = PSA_SUCCESS; |
| 1271 | |
| 1272 | TFM_CORE_ASSERT(args != NULL); |
| 1273 | msg_handle = (psa_handle_t)args[0]; |
| 1274 | status = (psa_status_t)args[1]; |
| 1275 | |
| 1276 | /* It is a fatal error if message handle is invalid */ |
| 1277 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1278 | if (!msg) { |
| 1279 | tfm_core_panic(); |
| 1280 | } |
| 1281 | |
| 1282 | /* |
| 1283 | * RoT Service information is needed in this function, stored it in message |
| 1284 | * body structure. Only two parameters are passed in this function: handle |
| 1285 | * and status, so it is useful and simply to do like this. |
| 1286 | */ |
| 1287 | service = msg->service; |
| 1288 | if (!service) { |
| 1289 | tfm_core_panic(); |
| 1290 | } |
| 1291 | |
| 1292 | /* |
| 1293 | * Three type of message are passed in this function: CONNECTION, REQUEST, |
| 1294 | * DISCONNECTION. It needs to process differently for each type. |
| 1295 | */ |
| 1296 | switch (msg->msg.type) { |
| 1297 | case PSA_IPC_CONNECT: |
| 1298 | /* |
| 1299 | * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the |
| 1300 | * input status is PSA_SUCCESS. Others return values are based on the |
| 1301 | * input status. |
| 1302 | */ |
| 1303 | if (status == PSA_SUCCESS) { |
Summer Qin | 373feb1 | 2020-03-27 15:35:33 +0800 | [diff] [blame] | 1304 | ret = tfm_spm_to_user_handle(msg->handle); |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1305 | } else if (status == PSA_ERROR_CONNECTION_REFUSED) { |
| 1306 | /* Refuse the client connection, indicating a permanent error. */ |
| 1307 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1308 | ret = PSA_ERROR_CONNECTION_REFUSED; |
| 1309 | } else if (status == PSA_ERROR_CONNECTION_BUSY) { |
| 1310 | /* Fail the client connection, indicating a transient error. */ |
| 1311 | ret = PSA_ERROR_CONNECTION_BUSY; |
| 1312 | } else { |
| 1313 | tfm_core_panic(); |
| 1314 | } |
| 1315 | break; |
| 1316 | case PSA_IPC_DISCONNECT: |
| 1317 | /* Service handle is not used anymore */ |
| 1318 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1319 | |
| 1320 | /* |
| 1321 | * If the message type is PSA_IPC_DISCONNECT, then the status code is |
| 1322 | * ignored |
| 1323 | */ |
| 1324 | break; |
| 1325 | default: |
| 1326 | if (msg->msg.type >= PSA_IPC_CALL) { |
| 1327 | /* Reply to a request message. Return values are based on status */ |
| 1328 | ret = status; |
| 1329 | /* |
| 1330 | * The total number of bytes written to a single parameter must be |
| 1331 | * reported to the client by updating the len member of the |
| 1332 | * psa_outvec structure for the parameter before returning from |
| 1333 | * psa_call(). |
| 1334 | */ |
| 1335 | update_caller_outvec_len(msg); |
| 1336 | } else { |
| 1337 | tfm_core_panic(); |
| 1338 | } |
| 1339 | } |
| 1340 | |
| 1341 | if (ret == PSA_ERROR_PROGRAMMER_ERROR) { |
| 1342 | /* |
| 1343 | * If the source of the programmer error is a Secure Partition, the SPM |
| 1344 | * must panic the Secure Partition in response to a PROGRAMMER ERROR. |
| 1345 | */ |
| 1346 | if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) { |
| 1347 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1348 | TFM_HANDLE_STATUS_CONNECT_ERROR; |
| 1349 | } else { |
| 1350 | tfm_core_panic(); |
| 1351 | } |
| 1352 | } else { |
| 1353 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1354 | TFM_HANDLE_STATUS_IDLE; |
| 1355 | } |
| 1356 | |
| 1357 | if (is_tfm_rpc_msg(msg)) { |
| 1358 | tfm_rpc_client_call_reply(msg, ret); |
| 1359 | } else { |
| 1360 | tfm_event_wake(&msg->ack_evnt, ret); |
| 1361 | } |
| 1362 | } |
| 1363 | |
| 1364 | /** |
| 1365 | * \brief notify the partition with the signal. |
| 1366 | * |
| 1367 | * \param[in] partition_id The ID of the partition to be notified. |
| 1368 | * \param[in] signal The signal that the partition is to be notified |
| 1369 | * with. |
| 1370 | * |
| 1371 | * \retval void Success. |
| 1372 | * \retval "Does not return" If partition_id is invalid. |
| 1373 | */ |
| 1374 | static void notify_with_signal(int32_t partition_id, psa_signal_t signal) |
| 1375 | { |
| 1376 | struct spm_partition_desc_t *partition = NULL; |
| 1377 | |
| 1378 | /* |
| 1379 | * The value of partition_id must be greater than zero as the target of |
| 1380 | * notification must be a Secure Partition, providing a Non-secure |
| 1381 | * Partition ID is a fatal error. |
| 1382 | */ |
| 1383 | if (!TFM_CLIENT_ID_IS_S(partition_id)) { |
| 1384 | tfm_core_panic(); |
| 1385 | } |
| 1386 | |
| 1387 | /* |
| 1388 | * It is a fatal error if partition_id does not correspond to a Secure |
| 1389 | * Partition. |
| 1390 | */ |
| 1391 | partition = tfm_spm_get_partition_by_id(partition_id); |
| 1392 | if (!partition) { |
| 1393 | tfm_core_panic(); |
| 1394 | } |
| 1395 | |
| 1396 | partition->runtime_data.signals |= signal; |
| 1397 | |
| 1398 | /* |
| 1399 | * The target partition may be blocked with waiting for signals after |
| 1400 | * called psa_wait(). Set the return value with the available signals |
| 1401 | * before wake it up with tfm_event_signal(). |
| 1402 | */ |
| 1403 | tfm_event_wake(&partition->runtime_data.signal_evnt, |
| 1404 | partition->runtime_data.signals & |
| 1405 | partition->runtime_data.signal_mask); |
| 1406 | } |
| 1407 | |
| 1408 | void tfm_spm_psa_notify(uint32_t *args) |
| 1409 | { |
| 1410 | int32_t partition_id; |
| 1411 | |
| 1412 | TFM_CORE_ASSERT(args != NULL); |
| 1413 | partition_id = (int32_t)args[0]; |
| 1414 | |
| 1415 | notify_with_signal(partition_id, PSA_DOORBELL); |
| 1416 | } |
| 1417 | |
| 1418 | /** |
| 1419 | * \brief assert signal for a given IRQ line. |
| 1420 | * |
| 1421 | * \param[in] partition_id The ID of the partition which handles this IRQ |
| 1422 | * \param[in] signal The signal associated with this IRQ |
| 1423 | * \param[in] irq_line The number of the IRQ line |
| 1424 | * |
| 1425 | * \retval void Success. |
| 1426 | * \retval "Does not return" Partition ID is invalid |
| 1427 | */ |
| 1428 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1429 | IRQn_Type irq_line) |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1430 | { |
| 1431 | tfm_spm_hal_disable_irq(irq_line); |
| 1432 | notify_with_signal(partition_id, signal); |
| 1433 | } |
| 1434 | |
| 1435 | void tfm_spm_psa_clear(void) |
| 1436 | { |
| 1437 | struct spm_partition_desc_t *partition = NULL; |
| 1438 | |
| 1439 | partition = tfm_spm_get_running_partition(); |
| 1440 | if (!partition) { |
| 1441 | tfm_core_panic(); |
| 1442 | } |
| 1443 | |
| 1444 | /* |
| 1445 | * It is a fatal error if the Secure Partition's doorbell signal is not |
| 1446 | * currently asserted. |
| 1447 | */ |
| 1448 | if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) { |
| 1449 | tfm_core_panic(); |
| 1450 | } |
| 1451 | partition->runtime_data.signals &= ~PSA_DOORBELL; |
| 1452 | } |
| 1453 | |
| 1454 | void tfm_spm_psa_panic(void) |
| 1455 | { |
| 1456 | /* |
| 1457 | * PSA FF recommends that the SPM causes the system to restart when a secure |
| 1458 | * partition panics. |
| 1459 | */ |
| 1460 | tfm_spm_hal_system_reset(); |
| 1461 | } |
| 1462 | |
| 1463 | /** |
| 1464 | * \brief Return the IRQ line number associated with a signal |
| 1465 | * |
| 1466 | * \param[in] partition_id The ID of the partition in which we look for |
| 1467 | * the signal. |
| 1468 | * \param[in] signal The signal we do the query for. |
| 1469 | * \param[out] irq_line The irq line associated with signal |
| 1470 | * |
| 1471 | * \retval IPC_SUCCESS Execution successful, irq_line contains a valid |
| 1472 | * value. |
| 1473 | * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the |
| 1474 | * signal. irq_line is unchanged. |
| 1475 | */ |
| 1476 | static int32_t get_irq_line_for_signal(int32_t partition_id, |
| 1477 | psa_signal_t signal, |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1478 | IRQn_Type *irq_line) |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1479 | { |
| 1480 | size_t i; |
| 1481 | |
| 1482 | for (i = 0; i < tfm_core_irq_signals_count; ++i) { |
| 1483 | if (tfm_core_irq_signals[i].partition_id == partition_id && |
| 1484 | tfm_core_irq_signals[i].signal_value == signal) { |
| 1485 | *irq_line = tfm_core_irq_signals[i].irq_line; |
| 1486 | return IPC_SUCCESS; |
| 1487 | } |
| 1488 | } |
| 1489 | return IPC_ERROR_GENERIC; |
| 1490 | } |
| 1491 | |
| 1492 | void tfm_spm_psa_eoi(uint32_t *args) |
| 1493 | { |
| 1494 | psa_signal_t irq_signal; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1495 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1496 | int32_t ret; |
| 1497 | struct spm_partition_desc_t *partition = NULL; |
| 1498 | |
| 1499 | TFM_CORE_ASSERT(args != NULL); |
| 1500 | irq_signal = (psa_signal_t)args[0]; |
| 1501 | |
| 1502 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1503 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1504 | tfm_core_panic(); |
| 1505 | } |
| 1506 | |
| 1507 | partition = tfm_spm_get_running_partition(); |
| 1508 | if (!partition) { |
| 1509 | tfm_core_panic(); |
| 1510 | } |
| 1511 | |
| 1512 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1513 | irq_signal, &irq_line); |
| 1514 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1515 | if (ret != IPC_SUCCESS) { |
| 1516 | tfm_core_panic(); |
| 1517 | } |
| 1518 | |
| 1519 | /* It is a fatal error if passed signal is not currently asserted */ |
| 1520 | if ((partition->runtime_data.signals & irq_signal) == 0) { |
| 1521 | tfm_core_panic(); |
| 1522 | } |
| 1523 | |
| 1524 | partition->runtime_data.signals &= ~irq_signal; |
| 1525 | |
| 1526 | tfm_spm_hal_clear_pending_irq(irq_line); |
| 1527 | tfm_spm_hal_enable_irq(irq_line); |
| 1528 | } |
| 1529 | |
| 1530 | void tfm_spm_enable_irq(uint32_t *args) |
| 1531 | { |
| 1532 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1533 | psa_signal_t irq_signal = svc_ctx->r0; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1534 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1535 | int32_t ret; |
| 1536 | struct spm_partition_desc_t *partition = NULL; |
| 1537 | |
| 1538 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1539 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1540 | tfm_core_panic(); |
| 1541 | } |
| 1542 | |
| 1543 | partition = tfm_spm_get_running_partition(); |
| 1544 | if (!partition) { |
| 1545 | tfm_core_panic(); |
| 1546 | } |
| 1547 | |
| 1548 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1549 | irq_signal, &irq_line); |
| 1550 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1551 | if (ret != IPC_SUCCESS) { |
| 1552 | tfm_core_panic(); |
| 1553 | } |
| 1554 | |
| 1555 | tfm_spm_hal_enable_irq(irq_line); |
| 1556 | } |
| 1557 | |
| 1558 | void tfm_spm_disable_irq(uint32_t *args) |
| 1559 | { |
| 1560 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1561 | psa_signal_t irq_signal = svc_ctx->r0; |
TTornblom | faf74f5 | 2020-03-04 17:56:27 +0100 | [diff] [blame] | 1562 | IRQn_Type irq_line = (IRQn_Type) 0; |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame] | 1563 | int32_t ret; |
| 1564 | struct spm_partition_desc_t *partition = NULL; |
| 1565 | |
| 1566 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1567 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1568 | tfm_core_panic(); |
| 1569 | } |
| 1570 | |
| 1571 | partition = tfm_spm_get_running_partition(); |
| 1572 | if (!partition) { |
| 1573 | tfm_core_panic(); |
| 1574 | } |
| 1575 | |
| 1576 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1577 | irq_signal, &irq_line); |
| 1578 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1579 | if (ret != IPC_SUCCESS) { |
| 1580 | tfm_core_panic(); |
| 1581 | } |
| 1582 | |
| 1583 | tfm_spm_hal_disable_irq(irq_line); |
| 1584 | } |
| 1585 | |
| 1586 | void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp, |
| 1587 | uint32_t *p_ctx, uint32_t exc_return, |
| 1588 | bool ns_caller) |
| 1589 | { |
| 1590 | uintptr_t stacked_ctx_pos; |
| 1591 | |
| 1592 | if (ns_caller) { |
| 1593 | /* |
| 1594 | * The background IRQ can't be supported, since if SP is executing, |
| 1595 | * the preempted context of SP can be different with the one who |
| 1596 | * preempts veneer. |
| 1597 | */ |
| 1598 | if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) { |
| 1599 | tfm_core_panic(); |
| 1600 | } |
| 1601 | |
| 1602 | /* |
| 1603 | * It is non-secure caller, check if veneer stack contains |
| 1604 | * multiple contexts. |
| 1605 | */ |
| 1606 | stacked_ctx_pos = (uintptr_t)p_ctx + |
| 1607 | sizeof(struct tfm_state_context_t) + |
| 1608 | TFM_VENEER_STACK_GUARD_SIZE; |
| 1609 | |
| 1610 | if (is_stack_alloc_fp_space(exc_return)) { |
| 1611 | #if defined (__FPU_USED) && (__FPU_USED == 1U) |
| 1612 | if (FPU->FPCCR & FPU_FPCCR_TS_Msk) { |
| 1613 | stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS * |
| 1614 | sizeof(uint32_t); |
| 1615 | } |
| 1616 | #endif |
| 1617 | stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t); |
| 1618 | } |
| 1619 | |
| 1620 | if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) { |
| 1621 | tfm_core_panic(); |
| 1622 | } |
| 1623 | } else if (p_cur_sp->static_data->partition_id <= 0) { |
| 1624 | tfm_core_panic(); |
| 1625 | } |
| 1626 | } |
Summer Qin | 830c554 | 2020-02-14 13:44:20 +0800 | [diff] [blame] | 1627 | |
| 1628 | void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx) |
| 1629 | { |
| 1630 | uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0; |
| 1631 | uint32_t running_partition_flags = 0; |
| 1632 | const struct spm_partition_desc_t *partition = NULL; |
| 1633 | |
| 1634 | /* Check permissions on request type basis */ |
| 1635 | |
| 1636 | switch (svc_ctx->r0) { |
| 1637 | case TFM_SPM_REQUEST_RESET_VOTE: |
| 1638 | partition = tfm_spm_get_running_partition(); |
| 1639 | if (!partition) { |
| 1640 | tfm_core_panic(); |
| 1641 | } |
| 1642 | running_partition_flags = partition->static_data->partition_flags; |
| 1643 | |
| 1644 | /* Currently only PSA Root of Trust services are allowed to make Reset |
| 1645 | * vote request |
| 1646 | */ |
| 1647 | if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) { |
| 1648 | *res_ptr = (uint32_t)TFM_ERROR_GENERIC; |
| 1649 | } |
| 1650 | |
| 1651 | /* FixMe: this is a placeholder for checks to be performed before |
| 1652 | * allowing execution of reset |
| 1653 | */ |
| 1654 | *res_ptr = (uint32_t)TFM_SUCCESS; |
| 1655 | |
| 1656 | break; |
| 1657 | default: |
| 1658 | *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER; |
| 1659 | } |
| 1660 | } |