Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 1 | /* |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 2 | * Copyright (c) 2018-2020, Arm Limited. All rights reserved. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | * |
| 6 | */ |
Mingyang Sun | da01a97 | 2019-07-12 17:32:59 +0800 | [diff] [blame] | 7 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 8 | #include <inttypes.h> |
| 9 | #include <stdbool.h> |
Jamie Fox | cc31d40 | 2019-01-28 17:13:52 +0000 | [diff] [blame] | 10 | #include "psa/client.h" |
| 11 | #include "psa/service.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 12 | #include "psa/lifecycle.h" |
| 13 | #include "tfm_thread.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 14 | #include "tfm_wait.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 15 | #include "tfm_utils.h" |
| 16 | #include "tfm_internal_defines.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 17 | #include "tfm_message_queue.h" |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 18 | #include "tfm_spm_hal.h" |
| 19 | #include "tfm_irq_list.h" |
| 20 | #include "tfm_api.h" |
| 21 | #include "tfm_secure_api.h" |
| 22 | #include "tfm_memory_utils.h" |
| 23 | #include "spm_api.h" |
| 24 | #include "tfm_peripherals_def.h" |
| 25 | #include "spm_db.h" |
| 26 | #include "tfm_core_utils.h" |
| 27 | #include "spm_psa_client_call.h" |
| 28 | #include "tfm_rpc.h" |
| 29 | #include "tfm_internal.h" |
| 30 | #include "tfm_core_trustzone.h" |
| 31 | #include "tfm_core_mem_check.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 32 | #include "tfm_list.h" |
| 33 | #include "tfm_pools.h" |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 34 | #include "region_defs.h" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 35 | |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 36 | #include "secure_fw/services/tfm_service_list.inc" |
| 37 | |
| 38 | /* Extern service variable */ |
| 39 | extern struct tfm_spm_service_t service[]; |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 40 | extern const struct tfm_spm_service_db_t service_db[]; |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 41 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 42 | /* Extern SPM variable */ |
| 43 | extern struct spm_partition_db_t g_spm_partition_db; |
| 44 | |
| 45 | /* Pools */ |
| 46 | TFM_POOL_DECLARE(conn_handle_pool, sizeof(struct tfm_conn_handle_t), |
| 47 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 48 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 49 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
| 50 | int32_t irq_line); |
| 51 | |
| 52 | #include "tfm_secure_irq_handlers_ipc.inc" |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 53 | |
| 54 | /* Service handle management functions */ |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 55 | psa_handle_t tfm_spm_create_conn_handle(struct tfm_spm_service_t *service, |
| 56 | int32_t client_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 57 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 58 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 59 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 60 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 61 | |
| 62 | /* Get buffer for handle list structure from handle pool */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 63 | p_handle = (struct tfm_conn_handle_t *)tfm_pool_alloc(conn_handle_pool); |
| 64 | if (!p_handle) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 65 | return PSA_NULL_HANDLE; |
| 66 | } |
| 67 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 68 | p_handle->service = service; |
Shawn Shan | cc39fcb | 2019-11-13 15:38:16 +0800 | [diff] [blame] | 69 | p_handle->status = TFM_HANDLE_STATUS_IDLE; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 70 | p_handle->client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 71 | |
| 72 | /* Add handle node to list for next psa functions */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 73 | tfm_list_add_tail(&service->handle_list, &p_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 74 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 75 | return (psa_handle_t)p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 76 | } |
| 77 | |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 78 | int32_t tfm_spm_validate_conn_handle(psa_handle_t conn_handle, |
| 79 | int32_t client_id) |
| 80 | { |
| 81 | /* Check the handle address is validated */ |
| 82 | if (is_valid_chunk_data_in_pool(conn_handle_pool, |
| 83 | (uint8_t *)conn_handle) != true) { |
| 84 | return IPC_ERROR_GENERIC; |
| 85 | } |
| 86 | |
| 87 | /* Check the handle caller is correct */ |
| 88 | if (((struct tfm_conn_handle_t *)conn_handle)->client_id != client_id) { |
| 89 | return IPC_ERROR_GENERIC; |
| 90 | } |
| 91 | |
| 92 | return IPC_SUCCESS; |
| 93 | } |
| 94 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 95 | static struct tfm_conn_handle_t * |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 96 | tfm_spm_find_conn_handle_node(struct tfm_spm_service_t *service, |
| 97 | psa_handle_t conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 98 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 99 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 100 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 101 | return (struct tfm_conn_handle_t *)conn_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 102 | } |
| 103 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 104 | /** |
| 105 | * \brief Free connection handle which not used anymore. |
| 106 | * |
| 107 | * \param[in] service Target service context pointer |
| 108 | * \param[in] conn_handle Connection handle created by |
| 109 | * tfm_spm_create_conn_handle(), \ref psa_handle_t |
| 110 | * |
| 111 | * \retval IPC_SUCCESS Success |
| 112 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 113 | * \retval "Does not return" Panic for not find service by handle |
| 114 | */ |
| 115 | static int32_t tfm_spm_free_conn_handle(struct tfm_spm_service_t *service, |
| 116 | psa_handle_t conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 117 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 118 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 119 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 120 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 121 | |
| 122 | /* There are many handles for each RoT Service */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 123 | p_handle = tfm_spm_find_conn_handle_node(service, conn_handle); |
| 124 | if (!p_handle) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 125 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 126 | } |
| 127 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 128 | /* Clear magic as the handler is not used anymore */ |
| 129 | p_handle->internal_msg.magic = 0; |
| 130 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 131 | /* Remove node from handle list */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 132 | tfm_list_del_node(&p_handle->list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 133 | |
| 134 | /* Back handle buffer to pool */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 135 | tfm_pool_free(p_handle); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 136 | return IPC_SUCCESS; |
| 137 | } |
| 138 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 139 | /** |
| 140 | * \brief Set reverse handle value for connection. |
| 141 | * |
| 142 | * \param[in] service Target service context pointer |
| 143 | * \param[in] conn_handle Connection handle created by |
| 144 | * tfm_spm_create_conn_handle(), \ref psa_handle_t |
| 145 | * \param[in] rhandle rhandle need to save |
| 146 | * |
| 147 | * \retval IPC_SUCCESS Success |
| 148 | * \retval IPC_ERROR_BAD_PARAMETERS Bad parameters input |
| 149 | * \retval "Does not return" Panic for not find handle node |
| 150 | */ |
| 151 | static int32_t tfm_spm_set_rhandle(struct tfm_spm_service_t *service, |
| 152 | psa_handle_t conn_handle, |
| 153 | void *rhandle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 154 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 155 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 156 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 157 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 158 | /* Set reverse handle value only be allowed for a connected handle */ |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 159 | TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 160 | |
| 161 | /* There are many handles for each RoT Service */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 162 | p_handle = tfm_spm_find_conn_handle_node(service, conn_handle); |
| 163 | if (!p_handle) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 164 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 165 | } |
| 166 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 167 | p_handle->rhandle = rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 168 | return IPC_SUCCESS; |
| 169 | } |
| 170 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 171 | /** |
| 172 | * \brief Get reverse handle value from connection hanlde. |
| 173 | * |
| 174 | * \param[in] service Target service context pointer |
| 175 | * \param[in] conn_handle Connection handle created by |
| 176 | * tfm_spm_create_conn_handle(), \ref psa_handle_t |
| 177 | * |
| 178 | * \retval void * Success |
| 179 | * \retval "Does not return" Panic for those: |
| 180 | * service pointer are NULL |
| 181 | * hanlde is \ref PSA_NULL_HANDLE |
| 182 | * handle node does not be found |
| 183 | */ |
| 184 | static void *tfm_spm_get_rhandle(struct tfm_spm_service_t *service, |
| 185 | psa_handle_t conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 186 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 187 | struct tfm_conn_handle_t *p_handle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 188 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 189 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 190 | /* Get reverse handle value only be allowed for a connected handle */ |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 191 | TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 192 | |
| 193 | /* There are many handles for each RoT Service */ |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 194 | p_handle = tfm_spm_find_conn_handle_node(service, conn_handle); |
| 195 | if (!p_handle) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 196 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 197 | } |
| 198 | |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 199 | return p_handle->rhandle; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | /* Partition management functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 203 | |
| 204 | /** |
| 205 | * \brief Get the service context by signal. |
| 206 | * |
| 207 | * \param[in] partition Partition context pointer |
| 208 | * \ref spm_partition_desc_t structures |
| 209 | * \param[in] signal Signal associated with inputs to the Secure |
| 210 | * Partition, \ref psa_signal_t |
| 211 | * |
| 212 | * \retval NULL Failed |
| 213 | * \retval "Not NULL" Target service context pointer, |
| 214 | * \ref tfm_spm_service_t structures |
| 215 | */ |
| 216 | static struct tfm_spm_service_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 217 | tfm_spm_get_service_by_signal(struct spm_partition_desc_t *partition, |
| 218 | psa_signal_t signal) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 219 | { |
| 220 | struct tfm_list_node_t *node, *head; |
| 221 | struct tfm_spm_service_t *service; |
| 222 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 223 | TFM_CORE_ASSERT(partition); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 224 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 225 | if (tfm_list_is_empty(&partition->runtime_data.service_list)) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 226 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 227 | } |
| 228 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 229 | head = &partition->runtime_data.service_list; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 230 | TFM_LIST_FOR_EACH(node, head) { |
| 231 | service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, list); |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 232 | if (service->service_db->signal == signal) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 233 | return service; |
| 234 | } |
| 235 | } |
| 236 | return NULL; |
| 237 | } |
| 238 | |
| 239 | struct tfm_spm_service_t *tfm_spm_get_service_by_sid(uint32_t sid) |
| 240 | { |
| 241 | uint32_t i; |
| 242 | struct tfm_list_node_t *node, *head; |
| 243 | struct tfm_spm_service_t *service; |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 244 | struct spm_partition_desc_t *partition; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 245 | |
Mate Toth-Pal | 3ad2e3e | 2019-07-11 21:43:37 +0200 | [diff] [blame] | 246 | for (i = 0; i < g_spm_partition_db.partition_count; i++) { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 247 | partition = &g_spm_partition_db.partitions[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 248 | /* Skip partition without IPC flag */ |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 249 | if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 250 | continue; |
| 251 | } |
| 252 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 253 | if (tfm_list_is_empty(&partition->runtime_data.service_list)) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 254 | continue; |
| 255 | } |
| 256 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 257 | head = &partition->runtime_data.service_list; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 258 | TFM_LIST_FOR_EACH(node, head) { |
| 259 | service = TFM_GET_CONTAINER_PTR(node, struct tfm_spm_service_t, |
| 260 | list); |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 261 | if (service->service_db->sid == sid) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 262 | return service; |
| 263 | } |
| 264 | } |
| 265 | } |
| 266 | return NULL; |
| 267 | } |
| 268 | |
| 269 | struct tfm_spm_service_t * |
| 270 | tfm_spm_get_service_by_handle(psa_handle_t conn_handle) |
| 271 | { |
Edison Ai | 9cc2624 | 2019-08-06 11:28:04 +0800 | [diff] [blame] | 272 | return ((struct tfm_conn_handle_t *)conn_handle)->service; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 273 | } |
| 274 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 275 | /** |
| 276 | * \brief Get the partition context by partition ID. |
| 277 | * |
| 278 | * \param[in] partition_id Partition identity |
| 279 | * |
| 280 | * \retval NULL Failed |
| 281 | * \retval "Not NULL" Target partition context pointer, |
| 282 | * \ref spm_partition_desc_t structures |
| 283 | */ |
| 284 | static struct spm_partition_desc_t * |
| 285 | tfm_spm_get_partition_by_id(int32_t partition_id) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 286 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 287 | uint32_t idx = get_partition_idx(partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 288 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 289 | if (idx != SPM_INVALID_PARTITION_IDX) { |
| 290 | return &(g_spm_partition_db.partitions[idx]); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 291 | } |
| 292 | return NULL; |
| 293 | } |
| 294 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 295 | struct spm_partition_desc_t *tfm_spm_get_running_partition(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 296 | { |
| 297 | uint32_t spid; |
| 298 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 299 | spid = tfm_spm_partition_get_running_partition_id(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 300 | |
| 301 | return tfm_spm_get_partition_by_id(spid); |
| 302 | } |
| 303 | |
| 304 | int32_t tfm_spm_check_client_version(struct tfm_spm_service_t *service, |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 305 | uint32_t version) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 306 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 307 | TFM_CORE_ASSERT(service); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 308 | |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 309 | switch (service->service_db->version_policy) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 310 | case TFM_VERSION_POLICY_RELAXED: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 311 | if (version > service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 312 | return IPC_ERROR_VERSION; |
| 313 | } |
| 314 | break; |
| 315 | case TFM_VERSION_POLICY_STRICT: |
Jaykumar Pitambarbhai Patel | 3a98602 | 2019-10-08 17:37:15 +0530 | [diff] [blame] | 316 | if (version != service->service_db->version) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 317 | return IPC_ERROR_VERSION; |
| 318 | } |
| 319 | break; |
| 320 | default: |
| 321 | return IPC_ERROR_VERSION; |
| 322 | } |
| 323 | return IPC_SUCCESS; |
| 324 | } |
| 325 | |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 326 | int32_t tfm_spm_check_authorization(uint32_t sid, |
| 327 | struct tfm_spm_service_t *service, |
Summer Qin | 618e8c3 | 2019-12-09 10:47:20 +0800 | [diff] [blame] | 328 | bool ns_caller) |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 329 | { |
| 330 | struct spm_partition_desc_t *partition = NULL; |
| 331 | int32_t i; |
| 332 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 333 | TFM_CORE_ASSERT(service); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 334 | |
| 335 | if (ns_caller) { |
| 336 | if (!service->service_db->non_secure_client) { |
| 337 | return IPC_ERROR_GENERIC; |
| 338 | } |
| 339 | } else { |
| 340 | partition = tfm_spm_get_running_partition(); |
| 341 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 342 | tfm_core_panic(); |
Edison Ai | e728fbf | 2019-11-13 09:37:12 +0800 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | for (i = 0; i < partition->static_data->dependencies_num; i++) { |
| 346 | if (partition->static_data->p_dependencies[i] == sid) { |
| 347 | break; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | if (i == partition->static_data->dependencies_num) { |
| 352 | return IPC_ERROR_GENERIC; |
| 353 | } |
| 354 | } |
| 355 | return IPC_SUCCESS; |
| 356 | } |
| 357 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 358 | /* Message functions */ |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 359 | |
| 360 | /** |
| 361 | * \brief Get message context by message handle. |
| 362 | * |
| 363 | * \param[in] msg_handle Message handle which is a reference generated |
| 364 | * by the SPM to a specific message. |
| 365 | * |
| 366 | * \return The message body context pointer |
| 367 | * \ref tfm_msg_body_t structures |
| 368 | */ |
| 369 | static struct tfm_msg_body_t * |
| 370 | tfm_spm_get_msg_from_handle(psa_handle_t msg_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 371 | { |
| 372 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 373 | * The message handler passed by the caller is considered invalid in the |
| 374 | * following cases: |
| 375 | * 1. Not a valid message handle. (The address of a message is not the |
| 376 | * address of a possible handle from the pool |
| 377 | * 2. Handle not belongs to the caller partition (The handle is either |
| 378 | * unused, or owned by anither partition) |
| 379 | * Check the conditions above |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 380 | */ |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 381 | struct tfm_conn_handle_t *connection_handle_address; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 382 | struct tfm_msg_body_t *msg; |
| 383 | uint32_t partition_id; |
| 384 | |
| 385 | msg = (struct tfm_msg_body_t *)msg_handle; |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 386 | |
| 387 | connection_handle_address = |
| 388 | TFM_GET_CONTAINER_PTR(msg, struct tfm_conn_handle_t, internal_msg); |
| 389 | |
| 390 | if (is_valid_chunk_data_in_pool( |
| 391 | conn_handle_pool, (uint8_t *)connection_handle_address) != 1) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 392 | return NULL; |
| 393 | } |
| 394 | |
| 395 | /* |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 396 | * Check that the magic number is correct. This proves that the message |
| 397 | * structure contains an active message. |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 398 | */ |
| 399 | if (msg->magic != TFM_MSG_MAGIC) { |
| 400 | return NULL; |
| 401 | } |
| 402 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 403 | /* Check that the running partition owns the message */ |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 404 | partition_id = tfm_spm_partition_get_running_partition_id(); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 405 | if (partition_id != msg->service->partition->static_data->partition_id) { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 406 | return NULL; |
| 407 | } |
| 408 | |
Mate Toth-Pal | a4b5d24 | 2019-09-23 09:14:47 +0200 | [diff] [blame] | 409 | /* |
| 410 | * FixMe: For condition 1 it should be checked whether the message belongs |
| 411 | * to the service. Skipping this check isn't a security risk as even if the |
| 412 | * message belongs to another service, the handle belongs to the calling |
| 413 | * partition. |
| 414 | */ |
| 415 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 416 | return msg; |
| 417 | } |
| 418 | |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 419 | struct tfm_msg_body_t * |
| 420 | tfm_spm_get_msg_buffer_from_conn_handle(psa_handle_t conn_handle) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 421 | { |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 422 | TFM_CORE_ASSERT(conn_handle != PSA_NULL_HANDLE); |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 423 | |
| 424 | return &(((struct tfm_conn_handle_t *)conn_handle)->internal_msg); |
| 425 | } |
| 426 | |
| 427 | void tfm_spm_fill_msg(struct tfm_msg_body_t *msg, |
| 428 | struct tfm_spm_service_t *service, |
| 429 | psa_handle_t handle, |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 430 | int32_t type, int32_t client_id, |
Edison Ai | 9711582 | 2019-08-01 14:22:19 +0800 | [diff] [blame] | 431 | psa_invec *invec, size_t in_len, |
| 432 | psa_outvec *outvec, size_t out_len, |
| 433 | psa_outvec *caller_outvec) |
| 434 | { |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 435 | uint32_t i; |
| 436 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 437 | TFM_CORE_ASSERT(msg); |
| 438 | TFM_CORE_ASSERT(service); |
| 439 | TFM_CORE_ASSERT(!(invec == NULL && in_len != 0)); |
| 440 | TFM_CORE_ASSERT(!(outvec == NULL && out_len != 0)); |
| 441 | TFM_CORE_ASSERT(in_len <= PSA_MAX_IOVEC); |
| 442 | TFM_CORE_ASSERT(out_len <= PSA_MAX_IOVEC); |
| 443 | TFM_CORE_ASSERT(in_len + out_len <= PSA_MAX_IOVEC); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 444 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 445 | /* Clear message buffer before using it */ |
Mingyang Sun | 94b1b41 | 2019-09-20 15:11:14 +0800 | [diff] [blame] | 446 | tfm_core_util_memset(msg, 0, sizeof(struct tfm_msg_body_t)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 447 | |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 448 | tfm_event_init(&msg->ack_evnt); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 449 | msg->magic = TFM_MSG_MAGIC; |
| 450 | msg->service = service; |
| 451 | msg->handle = handle; |
| 452 | msg->caller_outvec = caller_outvec; |
Summer Qin | 1ce712a | 2019-10-14 18:04:05 +0800 | [diff] [blame] | 453 | msg->msg.client_id = client_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 454 | |
| 455 | /* Copy contents */ |
| 456 | msg->msg.type = type; |
| 457 | |
| 458 | for (i = 0; i < in_len; i++) { |
| 459 | msg->msg.in_size[i] = invec[i].len; |
| 460 | msg->invec[i].base = invec[i].base; |
| 461 | } |
| 462 | |
| 463 | for (i = 0; i < out_len; i++) { |
| 464 | msg->msg.out_size[i] = outvec[i].len; |
| 465 | msg->outvec[i].base = outvec[i].base; |
| 466 | /* Out len is used to record the writed number, set 0 here again */ |
| 467 | msg->outvec[i].len = 0; |
| 468 | } |
| 469 | |
| 470 | /* Use message address as handle */ |
| 471 | msg->msg.handle = (psa_handle_t)msg; |
| 472 | |
| 473 | /* For connected handle, set rhandle to every message */ |
| 474 | if (handle != PSA_NULL_HANDLE) { |
| 475 | msg->msg.rhandle = tfm_spm_get_rhandle(service, handle); |
| 476 | } |
David Hu | 46603dd | 2019-12-11 18:05:16 +0800 | [diff] [blame] | 477 | |
| 478 | /* Set the private data of NSPE client caller in multi-core topology */ |
| 479 | if (TFM_CLIENT_ID_IS_NS(client_id)) { |
| 480 | tfm_rpc_set_caller_data(msg, client_id); |
| 481 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | int32_t tfm_spm_send_event(struct tfm_spm_service_t *service, |
| 485 | struct tfm_msg_body_t *msg) |
| 486 | { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 487 | struct spm_partition_runtime_data_t *p_runtime_data = |
| 488 | &service->partition->runtime_data; |
| 489 | |
Ken Liu | f250b8b | 2019-12-27 16:31:24 +0800 | [diff] [blame] | 490 | TFM_CORE_ASSERT(service); |
| 491 | TFM_CORE_ASSERT(msg); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 492 | |
| 493 | /* Enqueue message to service message queue */ |
| 494 | if (tfm_msg_enqueue(&service->msg_queue, msg) != IPC_SUCCESS) { |
| 495 | return IPC_ERROR_GENERIC; |
| 496 | } |
| 497 | |
| 498 | /* Messages put. Update signals */ |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 499 | p_runtime_data->signals |= service->service_db->signal; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 500 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 501 | tfm_event_wake(&p_runtime_data->signal_evnt, (p_runtime_data->signals & |
| 502 | p_runtime_data->signal_mask)); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 503 | |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 504 | /* |
| 505 | * If it is a NS request via RPC, it is unnecessary to block current |
| 506 | * thread. |
| 507 | */ |
| 508 | if (!is_tfm_rpc_msg(msg)) { |
| 509 | tfm_event_wait(&msg->ack_evnt); |
| 510 | } |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 511 | |
| 512 | return IPC_SUCCESS; |
| 513 | } |
| 514 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 515 | /** |
| 516 | * \brief Get bottom of stack region for a partition |
| 517 | * |
| 518 | * \param[in] partition_idx Partition index |
| 519 | * |
| 520 | * \return Stack region bottom value |
| 521 | * |
| 522 | * \note This function doesn't check if partition_idx is valid. |
| 523 | */ |
| 524 | static uint32_t tfm_spm_partition_get_stack_bottom(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 525 | { |
| 526 | return g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 527 | memory_data->stack_bottom; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 528 | } |
| 529 | |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 530 | /** |
| 531 | * \brief Get top of stack region for a partition |
| 532 | * |
| 533 | * \param[in] partition_idx Partition index |
| 534 | * |
| 535 | * \return Stack region top value |
| 536 | * |
| 537 | * \note This function doesn't check if partition_idx is valid. |
| 538 | */ |
| 539 | static uint32_t tfm_spm_partition_get_stack_top(uint32_t partition_idx) |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 540 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 541 | return g_spm_partition_db.partitions[partition_idx].memory_data->stack_top; |
Edison Ai | 7aff9e8 | 2019-07-11 14:56:46 +0800 | [diff] [blame] | 542 | } |
| 543 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 544 | uint32_t tfm_spm_partition_get_running_partition_id(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 545 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 546 | struct tfm_core_thread_t *pth = tfm_core_thrd_get_curr_thread(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 547 | struct spm_partition_desc_t *partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 548 | struct spm_partition_runtime_data_t *r_data; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 549 | |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 550 | r_data = TFM_GET_CONTAINER_PTR(pth, struct spm_partition_runtime_data_t, |
| 551 | sp_thrd); |
| 552 | partition = TFM_GET_CONTAINER_PTR(r_data, struct spm_partition_desc_t, |
| 553 | runtime_data); |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 554 | return partition->static_data->partition_id; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 555 | } |
| 556 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 557 | static struct tfm_core_thread_t * |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 558 | tfm_spm_partition_get_thread_info(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 559 | { |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 560 | return &g_spm_partition_db.partitions[partition_idx].runtime_data.sp_thrd; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 561 | } |
| 562 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 563 | static tfm_core_thrd_entry_t |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 564 | tfm_spm_partition_get_init_func(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 565 | { |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 566 | return (tfm_core_thrd_entry_t)(g_spm_partition_db.partitions[partition_idx]. |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 567 | static_data->partition_init); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 568 | } |
| 569 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 570 | static uint32_t tfm_spm_partition_get_priority(uint32_t partition_idx) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 571 | { |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 572 | return g_spm_partition_db.partitions[partition_idx].static_data-> |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 573 | partition_priority; |
| 574 | } |
| 575 | |
Summer Qin | 43c185d | 2019-10-10 15:44:42 +0800 | [diff] [blame] | 576 | int32_t tfm_memory_check(const void *buffer, size_t len, bool ns_caller, |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 577 | enum tfm_memory_access_e access, |
| 578 | uint32_t privileged) |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 579 | { |
Hugues de Valon | 9957856 | 2019-06-18 16:08:51 +0100 | [diff] [blame] | 580 | enum tfm_status_e err; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 581 | |
| 582 | /* If len is zero, this indicates an empty buffer and base is ignored */ |
| 583 | if (len == 0) { |
| 584 | return IPC_SUCCESS; |
| 585 | } |
| 586 | |
| 587 | if (!buffer) { |
| 588 | return IPC_ERROR_BAD_PARAMETERS; |
| 589 | } |
| 590 | |
| 591 | if ((uintptr_t)buffer > (UINTPTR_MAX - len)) { |
| 592 | return IPC_ERROR_MEMORY_CHECK; |
| 593 | } |
| 594 | |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 595 | if (access == TFM_MEMORY_ACCESS_RW) { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 596 | err = tfm_core_has_write_access_to_region(buffer, len, ns_caller, |
| 597 | privileged); |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 598 | } else { |
Summer Qin | eb537e5 | 2019-03-29 09:57:10 +0800 | [diff] [blame] | 599 | err = tfm_core_has_read_access_to_region(buffer, len, ns_caller, |
| 600 | privileged); |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 601 | } |
Summer Qin | 0fc3f59 | 2019-04-11 16:00:10 +0800 | [diff] [blame] | 602 | if (err == TFM_SUCCESS) { |
Summer Qin | 424d4db | 2019-03-25 14:09:51 +0800 | [diff] [blame] | 603 | return IPC_SUCCESS; |
Summer Qin | 2bfd2a0 | 2018-09-26 17:10:41 +0800 | [diff] [blame] | 604 | } |
| 605 | |
| 606 | return IPC_ERROR_MEMORY_CHECK; |
| 607 | } |
| 608 | |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 609 | uint32_t tfm_spm_init(void) |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 610 | { |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 611 | uint32_t i, j, num; |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 612 | struct spm_partition_desc_t *partition; |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 613 | struct tfm_core_thread_t *pth, *p_ns_entry_thread = NULL; |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 614 | const struct tfm_spm_partition_platform_data_t **platform_data_p; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 615 | |
| 616 | tfm_pool_init(conn_handle_pool, |
| 617 | POOL_BUFFER_SIZE(conn_handle_pool), |
| 618 | sizeof(struct tfm_conn_handle_t), |
| 619 | TFM_CONN_HANDLE_MAX_NUM); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 620 | |
| 621 | /* Init partition first for it will be used when init service */ |
Mate Toth-Pal | 3ad2e3e | 2019-07-11 21:43:37 +0200 | [diff] [blame] | 622 | for (i = 0; i < g_spm_partition_db.partition_count; i++) { |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 623 | partition = &g_spm_partition_db.partitions[i]; |
Edison Ai | f050170 | 2019-10-11 14:36:42 +0800 | [diff] [blame] | 624 | |
| 625 | /* Check if the PSA framework version matches. */ |
| 626 | if (partition->static_data->psa_framework_version != |
| 627 | PSA_FRAMEWORK_VERSION) { |
| 628 | ERROR_MSG("Warning: PSA Framework Verison is not matched!"); |
| 629 | continue; |
| 630 | } |
| 631 | |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 632 | platform_data_p = partition->platform_data_list; |
| 633 | if (platform_data_p != NULL) { |
| 634 | while ((*platform_data_p) != NULL) { |
Edison Ai | 6be3df1 | 2020-02-14 22:14:33 +0800 | [diff] [blame] | 635 | if (tfm_spm_hal_configure_default_isolation(i, |
| 636 | *platform_data_p) != TFM_PLAT_ERR_SUCCESS) { |
| 637 | tfm_core_panic(); |
| 638 | } |
Mate Toth-Pal | 8ac98a7 | 2019-11-21 17:30:10 +0100 | [diff] [blame] | 639 | ++platform_data_p; |
| 640 | } |
| 641 | } |
| 642 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 643 | if ((tfm_spm_partition_get_flags(i) & SPM_PART_FLAG_IPC) == 0) { |
| 644 | continue; |
| 645 | } |
Ken Liu | 35f8939 | 2019-03-14 14:51:05 +0800 | [diff] [blame] | 646 | |
Shawn Shan | c7dda0e | 2019-12-23 14:45:09 +0800 | [diff] [blame] | 647 | /* Add PSA_DOORBELL signal to assigned_signals */ |
| 648 | partition->runtime_data.assigned_signals |= PSA_DOORBELL; |
| 649 | |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 650 | /* TODO: This can be optimized by generating the assigned signal |
| 651 | * in code generation time. |
| 652 | */ |
| 653 | for (j = 0; j < tfm_core_irq_signals_count; ++j) { |
| 654 | if (tfm_core_irq_signals[j].partition_id == |
| 655 | partition->static_data->partition_id) { |
| 656 | partition->runtime_data.assigned_signals |= |
| 657 | tfm_core_irq_signals[j].signal_value; |
| 658 | } |
| 659 | } |
| 660 | |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 661 | tfm_event_init(&partition->runtime_data.signal_evnt); |
| 662 | tfm_list_init(&partition->runtime_data.service_list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 663 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 664 | pth = tfm_spm_partition_get_thread_info(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 665 | if (!pth) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 666 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 667 | } |
| 668 | |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 669 | tfm_core_thrd_init(pth, |
| 670 | tfm_spm_partition_get_init_func(i), |
| 671 | NULL, |
| 672 | (uintptr_t)tfm_spm_partition_get_stack_top(i), |
| 673 | (uintptr_t)tfm_spm_partition_get_stack_bottom(i)); |
Edison Ai | 788bae2 | 2019-02-18 17:38:59 +0800 | [diff] [blame] | 674 | |
Mingyang Sun | f3d2989 | 2019-07-10 17:50:23 +0800 | [diff] [blame] | 675 | pth->prior = tfm_spm_partition_get_priority(i); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 676 | |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 677 | if (partition->static_data->partition_id == TFM_SP_NON_SECURE_ID) { |
| 678 | p_ns_entry_thread = pth; |
Ken Liu | 5248af2 | 2019-12-29 12:47:13 +0800 | [diff] [blame] | 679 | pth->param = (void *)tfm_spm_hal_get_ns_entry_point(); |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 680 | } |
| 681 | |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 682 | /* Kick off */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 683 | if (tfm_core_thrd_start(pth) != THRD_SUCCESS) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 684 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 685 | } |
| 686 | } |
| 687 | |
| 688 | /* Init Service */ |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 689 | num = sizeof(service) / sizeof(struct tfm_spm_service_t); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 690 | for (i = 0; i < num; i++) { |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 691 | service[i].service_db = &service_db[i]; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 692 | partition = |
Summer Qin | e578c5b | 2019-08-16 16:42:16 +0800 | [diff] [blame] | 693 | tfm_spm_get_partition_by_id(service[i].service_db->partition_id); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 694 | if (!partition) { |
Edison Ai | 9059ea0 | 2019-11-28 13:46:14 +0800 | [diff] [blame] | 695 | tfm_core_panic(); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 696 | } |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 697 | service[i].partition = partition; |
Jaykumar Pitambarbhai Patel | 0c7a038 | 2020-01-09 15:25:58 +0530 | [diff] [blame] | 698 | partition->runtime_data.assigned_signals |= service[i].service_db->signal; |
Shawn Shan | 9b0e0c7 | 2019-10-22 13:43:07 +0800 | [diff] [blame] | 699 | |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 700 | tfm_list_init(&service[i].handle_list); |
Mingyang Sun | 5e13aa7 | 2019-07-10 10:30:16 +0800 | [diff] [blame] | 701 | tfm_list_add_tail(&partition->runtime_data.service_list, |
Summer Qin | d99509f | 2019-08-02 17:36:58 +0800 | [diff] [blame] | 702 | &service[i].list); |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 703 | } |
| 704 | |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 705 | /* |
| 706 | * All threads initialized, start the scheduler. |
| 707 | * |
| 708 | * NOTE: |
Ken Liu | 490281d | 2019-12-30 15:55:26 +0800 | [diff] [blame] | 709 | * It is worthy to give the thread object to scheduler if the background |
| 710 | * context belongs to one of the threads. Here the background thread is the |
| 711 | * initialization thread who calls SPM SVC, which re-uses the non-secure |
| 712 | * entry thread's stack. After SPM initialization is done, this stack is |
| 713 | * cleaned up and the background context is never going to return. Tell |
| 714 | * the scheduler that the current thread is non-secure entry thread. |
Ken Liu | 483f5da | 2019-04-24 10:45:21 +0800 | [diff] [blame] | 715 | */ |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 716 | tfm_core_thrd_start_scheduler(p_ns_entry_thread); |
Ken Liu | ce2692d | 2020-02-11 12:39:36 +0800 | [diff] [blame] | 717 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 718 | return p_ns_entry_thread->arch_ctx.lr; |
Edison Ai | 764d41f | 2018-09-21 15:56:36 +0800 | [diff] [blame] | 719 | } |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 720 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 721 | void tfm_pendsv_do_schedule(struct tfm_arch_ctx_t *p_actx) |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 722 | { |
| 723 | #if TFM_LVL == 2 |
| 724 | struct spm_partition_desc_t *p_next_partition; |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 725 | struct spm_partition_runtime_data_t *r_data; |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 726 | uint32_t is_privileged; |
| 727 | #endif |
Summer Qin | 66f1e03 | 2020-01-06 15:40:03 +0800 | [diff] [blame] | 728 | struct tfm_core_thread_t *pth_next = tfm_core_thrd_get_next_thread(); |
| 729 | struct tfm_core_thread_t *pth_curr = tfm_core_thrd_get_curr_thread(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 730 | |
Mate Toth-Pal | 32b2ccd | 2019-04-26 10:00:16 +0200 | [diff] [blame] | 731 | if (pth_next != NULL && pth_curr != pth_next) { |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 732 | #if TFM_LVL == 2 |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 733 | r_data = TFM_GET_CONTAINER_PTR(pth_next, |
| 734 | struct spm_partition_runtime_data_t, |
| 735 | sp_thrd); |
| 736 | p_next_partition = TFM_GET_CONTAINER_PTR(r_data, |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 737 | struct spm_partition_desc_t, |
Summer Qin | b5da9cc | 2019-08-26 15:19:45 +0800 | [diff] [blame] | 738 | runtime_data); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 739 | |
Summer Qin | 423dbef | 2019-08-22 15:59:35 +0800 | [diff] [blame] | 740 | if (p_next_partition->static_data->partition_flags & |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 741 | SPM_PART_FLAG_PSA_ROT) { |
| 742 | is_privileged = TFM_PARTITION_PRIVILEGED_MODE; |
| 743 | } else { |
| 744 | is_privileged = TFM_PARTITION_UNPRIVILEGED_MODE; |
| 745 | } |
| 746 | |
| 747 | tfm_spm_partition_change_privilege(is_privileged); |
| 748 | #endif |
Mate Toth-Pal | c430b99 | 2019-05-09 21:01:14 +0200 | [diff] [blame] | 749 | |
Summer Qin | d2ad7e7 | 2020-01-06 18:16:35 +0800 | [diff] [blame] | 750 | tfm_core_thrd_switch_context(p_actx, pth_curr, pth_next); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 751 | } |
David Hu | fb38d56 | 2019-09-23 15:58:34 +0800 | [diff] [blame] | 752 | |
| 753 | /* |
| 754 | * Handle pending mailbox message from NS in multi-core topology. |
| 755 | * Empty operation on single Armv8-M platform. |
| 756 | */ |
| 757 | tfm_rpc_client_call_handler(); |
Ken Liu | 2d17517 | 2019-03-21 17:08:41 +0800 | [diff] [blame] | 758 | } |
Mingyang Sun | d44522a | 2020-01-16 16:48:37 +0800 | [diff] [blame^] | 759 | |
| 760 | /*********************** SPM functions for PSA Client APIs *******************/ |
| 761 | |
| 762 | uint32_t tfm_spm_psa_framework_version(void) |
| 763 | { |
| 764 | return tfm_spm_client_psa_framework_version(); |
| 765 | } |
| 766 | |
| 767 | uint32_t tfm_spm_psa_version(uint32_t *args, bool ns_caller) |
| 768 | { |
| 769 | uint32_t sid; |
| 770 | |
| 771 | TFM_CORE_ASSERT(args != NULL); |
| 772 | sid = (uint32_t)args[0]; |
| 773 | |
| 774 | return tfm_spm_client_psa_version(sid, ns_caller); |
| 775 | } |
| 776 | |
| 777 | psa_status_t tfm_spm_psa_connect(uint32_t *args, bool ns_caller) |
| 778 | { |
| 779 | uint32_t sid; |
| 780 | uint32_t version; |
| 781 | |
| 782 | TFM_CORE_ASSERT(args != NULL); |
| 783 | sid = (uint32_t)args[0]; |
| 784 | version = (uint32_t)args[1]; |
| 785 | |
| 786 | return tfm_spm_client_psa_connect(sid, version, ns_caller); |
| 787 | } |
| 788 | |
| 789 | psa_status_t tfm_spm_psa_call(uint32_t *args, bool ns_caller, uint32_t lr) |
| 790 | { |
| 791 | psa_handle_t handle; |
| 792 | psa_invec *inptr; |
| 793 | psa_outvec *outptr; |
| 794 | size_t in_num, out_num; |
| 795 | struct spm_partition_desc_t *partition = NULL; |
| 796 | uint32_t privileged; |
| 797 | int32_t type; |
| 798 | struct tfm_control_parameter_t ctrl_param; |
| 799 | |
| 800 | TFM_CORE_ASSERT(args != NULL); |
| 801 | handle = (psa_handle_t)args[0]; |
| 802 | |
| 803 | partition = tfm_spm_get_running_partition(); |
| 804 | if (!partition) { |
| 805 | tfm_core_panic(); |
| 806 | } |
| 807 | privileged = tfm_spm_partition_get_privileged_mode( |
| 808 | partition->static_data->partition_flags); |
| 809 | |
| 810 | /* |
| 811 | * Read parameters from the arguments. It is a fatal error if the |
| 812 | * memory reference for buffer is invalid or not readable. |
| 813 | */ |
| 814 | if (tfm_memory_check((const void *)args[1], |
| 815 | sizeof(struct tfm_control_parameter_t), ns_caller, |
| 816 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 817 | tfm_core_panic(); |
| 818 | } |
| 819 | |
| 820 | tfm_core_util_memcpy(&ctrl_param, |
| 821 | (const void *)args[1], |
| 822 | sizeof(ctrl_param)); |
| 823 | |
| 824 | type = ctrl_param.type; |
| 825 | in_num = ctrl_param.in_len; |
| 826 | out_num = ctrl_param.out_len; |
| 827 | inptr = (psa_invec *)args[2]; |
| 828 | outptr = (psa_outvec *)args[3]; |
| 829 | |
| 830 | /* The request type must be zero or positive. */ |
| 831 | if (type < 0) { |
| 832 | tfm_core_panic(); |
| 833 | } |
| 834 | |
| 835 | return tfm_spm_client_psa_call(handle, type, inptr, in_num, outptr, out_num, |
| 836 | ns_caller, privileged); |
| 837 | } |
| 838 | |
| 839 | void tfm_spm_psa_close(uint32_t *args, bool ns_caller) |
| 840 | { |
| 841 | psa_handle_t handle; |
| 842 | |
| 843 | TFM_CORE_ASSERT(args != NULL); |
| 844 | handle = args[0]; |
| 845 | |
| 846 | tfm_spm_client_psa_close(handle, ns_caller); |
| 847 | } |
| 848 | |
| 849 | uint32_t tfm_spm_get_lifecycle_state(void) |
| 850 | { |
| 851 | /* |
| 852 | * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be |
| 853 | * implemented in the future. |
| 854 | */ |
| 855 | return PSA_LIFECYCLE_UNKNOWN; |
| 856 | } |
| 857 | |
| 858 | /********************* SPM functions for PSA Service APIs ********************/ |
| 859 | |
| 860 | psa_signal_t tfm_spm_psa_wait(uint32_t *args) |
| 861 | { |
| 862 | psa_signal_t signal_mask; |
| 863 | uint32_t timeout; |
| 864 | struct spm_partition_desc_t *partition = NULL; |
| 865 | |
| 866 | TFM_CORE_ASSERT(args != NULL); |
| 867 | signal_mask = (psa_signal_t)args[0]; |
| 868 | timeout = args[1]; |
| 869 | |
| 870 | /* |
| 871 | * Timeout[30:0] are reserved for future use. |
| 872 | * SPM must ignore the value of RES. |
| 873 | */ |
| 874 | timeout &= PSA_TIMEOUT_MASK; |
| 875 | |
| 876 | partition = tfm_spm_get_running_partition(); |
| 877 | if (!partition) { |
| 878 | tfm_core_panic(); |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned |
| 883 | * signals. |
| 884 | */ |
| 885 | if ((partition->runtime_data.assigned_signals & signal_mask) == 0) { |
| 886 | tfm_core_panic(); |
| 887 | } |
| 888 | |
| 889 | /* |
| 890 | * Expected signals are included in signal wait mask, ignored signals |
| 891 | * should not be set and affect caller thread state. Save this mask for |
| 892 | * further checking while signals are ready to be set. |
| 893 | */ |
| 894 | partition->runtime_data.signal_mask = signal_mask; |
| 895 | |
| 896 | /* |
| 897 | * tfm_event_wait() blocks the caller thread if no signals are available. |
| 898 | * In this case, the return value of this function is temporary set into |
| 899 | * runtime context. After new signal(s) are available, the return value |
| 900 | * is updated with the available signal(s) and blocked thread gets to run. |
| 901 | */ |
| 902 | if (timeout == PSA_BLOCK && |
| 903 | (partition->runtime_data.signals & signal_mask) == 0) { |
| 904 | tfm_event_wait(&partition->runtime_data.signal_evnt); |
| 905 | } |
| 906 | |
| 907 | return partition->runtime_data.signals & signal_mask; |
| 908 | } |
| 909 | |
| 910 | psa_status_t tfm_spm_psa_get(uint32_t *args) |
| 911 | { |
| 912 | psa_signal_t signal; |
| 913 | psa_msg_t *msg = NULL; |
| 914 | struct tfm_spm_service_t *service = NULL; |
| 915 | struct tfm_msg_body_t *tmp_msg = NULL; |
| 916 | struct spm_partition_desc_t *partition = NULL; |
| 917 | uint32_t privileged; |
| 918 | |
| 919 | TFM_CORE_ASSERT(args != NULL); |
| 920 | signal = (psa_signal_t)args[0]; |
| 921 | msg = (psa_msg_t *)args[1]; |
| 922 | |
| 923 | /* |
| 924 | * Only one message could be retrieved every time for psa_get(). It is a |
| 925 | * fatal error if the input signal has more than a signal bit set. |
| 926 | */ |
| 927 | if (tfm_bitcount(signal) != 1) { |
| 928 | tfm_core_panic(); |
| 929 | } |
| 930 | |
| 931 | partition = tfm_spm_get_running_partition(); |
| 932 | if (!partition) { |
| 933 | tfm_core_panic(); |
| 934 | } |
| 935 | privileged = tfm_spm_partition_get_privileged_mode( |
| 936 | partition->static_data->partition_flags); |
| 937 | |
| 938 | /* |
| 939 | * Write the message to the service buffer. It is a fatal error if the |
| 940 | * input msg pointer is not a valid memory reference or not read-write. |
| 941 | */ |
| 942 | if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW, |
| 943 | privileged) != IPC_SUCCESS) { |
| 944 | tfm_core_panic(); |
| 945 | } |
| 946 | |
| 947 | /* |
| 948 | * It is a fatal error if the caller call psa_get() when no message has |
| 949 | * been set. The caller must call this function after an RoT Service signal |
| 950 | * is returned by psa_wait(). |
| 951 | */ |
| 952 | if (partition->runtime_data.signals == 0) { |
| 953 | tfm_core_panic(); |
| 954 | } |
| 955 | |
| 956 | /* |
| 957 | * It is a fatal error if the RoT Service signal is not currently asserted. |
| 958 | */ |
| 959 | if ((partition->runtime_data.signals & signal) == 0) { |
| 960 | tfm_core_panic(); |
| 961 | } |
| 962 | |
| 963 | /* |
| 964 | * Get RoT service by signal from partition. It is a fatal error if getting |
| 965 | * failed, which means the input signal is not correspond to an RoT service. |
| 966 | */ |
| 967 | service = tfm_spm_get_service_by_signal(partition, signal); |
| 968 | if (!service) { |
| 969 | tfm_core_panic(); |
| 970 | } |
| 971 | |
| 972 | tmp_msg = tfm_msg_dequeue(&service->msg_queue); |
| 973 | if (!tmp_msg) { |
| 974 | return PSA_ERROR_DOES_NOT_EXIST; |
| 975 | } |
| 976 | |
| 977 | ((struct tfm_conn_handle_t *)(tmp_msg->handle))->status = |
| 978 | TFM_HANDLE_STATUS_ACTIVE; |
| 979 | |
| 980 | tfm_core_util_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t)); |
| 981 | |
| 982 | /* |
| 983 | * There may be multiple messages for this RoT Service signal, do not clear |
| 984 | * its mask until no remaining message. |
| 985 | */ |
| 986 | if (tfm_msg_queue_is_empty(&service->msg_queue)) { |
| 987 | partition->runtime_data.signals &= ~signal; |
| 988 | } |
| 989 | |
| 990 | return PSA_SUCCESS; |
| 991 | } |
| 992 | |
| 993 | void tfm_spm_psa_set_rhandle(uint32_t *args) |
| 994 | { |
| 995 | psa_handle_t msg_handle; |
| 996 | void *rhandle = NULL; |
| 997 | struct tfm_msg_body_t *msg = NULL; |
| 998 | |
| 999 | TFM_CORE_ASSERT(args != NULL); |
| 1000 | msg_handle = (psa_handle_t)args[0]; |
| 1001 | rhandle = (void *)args[1]; |
| 1002 | |
| 1003 | /* It is a fatal error if message handle is invalid */ |
| 1004 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1005 | if (!msg) { |
| 1006 | tfm_core_panic(); |
| 1007 | } |
| 1008 | |
| 1009 | msg->msg.rhandle = rhandle; |
| 1010 | |
| 1011 | /* Store reverse handle for following client calls. */ |
| 1012 | tfm_spm_set_rhandle(msg->service, msg->handle, rhandle); |
| 1013 | } |
| 1014 | |
| 1015 | size_t tfm_spm_psa_read(uint32_t *args) |
| 1016 | { |
| 1017 | psa_handle_t msg_handle; |
| 1018 | uint32_t invec_idx; |
| 1019 | void *buffer = NULL; |
| 1020 | size_t num_bytes; |
| 1021 | size_t bytes; |
| 1022 | struct tfm_msg_body_t *msg = NULL; |
| 1023 | uint32_t privileged; |
| 1024 | struct spm_partition_desc_t *partition = NULL; |
| 1025 | |
| 1026 | TFM_CORE_ASSERT(args != NULL); |
| 1027 | msg_handle = (psa_handle_t)args[0]; |
| 1028 | invec_idx = args[1]; |
| 1029 | buffer = (void *)args[2]; |
| 1030 | num_bytes = (size_t)args[3]; |
| 1031 | |
| 1032 | /* It is a fatal error if message handle is invalid */ |
| 1033 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1034 | if (!msg) { |
| 1035 | tfm_core_panic(); |
| 1036 | } |
| 1037 | |
| 1038 | partition = msg->service->partition; |
| 1039 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1040 | partition->static_data->partition_flags); |
| 1041 | |
| 1042 | /* |
| 1043 | * It is a fatal error if message handle does not refer to a request |
| 1044 | * message |
| 1045 | */ |
| 1046 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1047 | tfm_core_panic(); |
| 1048 | } |
| 1049 | |
| 1050 | /* |
| 1051 | * It is a fatal error if invec_idx is equal to or greater than |
| 1052 | * PSA_MAX_IOVEC |
| 1053 | */ |
| 1054 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1055 | tfm_core_panic(); |
| 1056 | } |
| 1057 | |
| 1058 | /* There was no remaining data in this input vector */ |
| 1059 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1060 | return 0; |
| 1061 | } |
| 1062 | |
| 1063 | /* |
| 1064 | * Copy the client data to the service buffer. It is a fatal error |
| 1065 | * if the memory reference for buffer is invalid or not read-write. |
| 1066 | */ |
| 1067 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1068 | TFM_MEMORY_ACCESS_RW, privileged) != IPC_SUCCESS) { |
| 1069 | tfm_core_panic(); |
| 1070 | } |
| 1071 | |
| 1072 | bytes = num_bytes > msg->msg.in_size[invec_idx] ? |
| 1073 | msg->msg.in_size[invec_idx] : num_bytes; |
| 1074 | |
| 1075 | tfm_core_util_memcpy(buffer, msg->invec[invec_idx].base, bytes); |
| 1076 | |
| 1077 | /* There maybe some remaining data */ |
| 1078 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes; |
| 1079 | msg->msg.in_size[invec_idx] -= bytes; |
| 1080 | |
| 1081 | return bytes; |
| 1082 | } |
| 1083 | |
| 1084 | size_t tfm_spm_psa_skip(uint32_t *args) |
| 1085 | { |
| 1086 | psa_handle_t msg_handle; |
| 1087 | uint32_t invec_idx; |
| 1088 | size_t num_bytes; |
| 1089 | struct tfm_msg_body_t *msg = NULL; |
| 1090 | |
| 1091 | TFM_CORE_ASSERT(args != NULL); |
| 1092 | msg_handle = (psa_handle_t)args[0]; |
| 1093 | invec_idx = args[1]; |
| 1094 | num_bytes = (size_t)args[2]; |
| 1095 | |
| 1096 | /* It is a fatal error if message handle is invalid */ |
| 1097 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1098 | if (!msg) { |
| 1099 | tfm_core_panic(); |
| 1100 | } |
| 1101 | |
| 1102 | /* |
| 1103 | * It is a fatal error if message handle does not refer to a request |
| 1104 | * message |
| 1105 | */ |
| 1106 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1107 | tfm_core_panic(); |
| 1108 | } |
| 1109 | |
| 1110 | /* |
| 1111 | * It is a fatal error if invec_idx is equal to or greater than |
| 1112 | * PSA_MAX_IOVEC |
| 1113 | */ |
| 1114 | if (invec_idx >= PSA_MAX_IOVEC) { |
| 1115 | tfm_core_panic(); |
| 1116 | } |
| 1117 | |
| 1118 | /* There was no remaining data in this input vector */ |
| 1119 | if (msg->msg.in_size[invec_idx] == 0) { |
| 1120 | return 0; |
| 1121 | } |
| 1122 | |
| 1123 | /* |
| 1124 | * If num_bytes is greater than the remaining size of the input vector then |
| 1125 | * the remaining size of the input vector is used. |
| 1126 | */ |
| 1127 | if (num_bytes > msg->msg.in_size[invec_idx]) { |
| 1128 | num_bytes = msg->msg.in_size[invec_idx]; |
| 1129 | } |
| 1130 | |
| 1131 | /* There maybe some remaining data */ |
| 1132 | msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + |
| 1133 | num_bytes; |
| 1134 | msg->msg.in_size[invec_idx] -= num_bytes; |
| 1135 | |
| 1136 | return num_bytes; |
| 1137 | } |
| 1138 | |
| 1139 | void tfm_spm_psa_write(uint32_t *args) |
| 1140 | { |
| 1141 | psa_handle_t msg_handle; |
| 1142 | uint32_t outvec_idx; |
| 1143 | void *buffer = NULL; |
| 1144 | size_t num_bytes; |
| 1145 | struct tfm_msg_body_t *msg = NULL; |
| 1146 | uint32_t privileged; |
| 1147 | struct spm_partition_desc_t *partition = NULL; |
| 1148 | |
| 1149 | TFM_CORE_ASSERT(args != NULL); |
| 1150 | msg_handle = (psa_handle_t)args[0]; |
| 1151 | outvec_idx = args[1]; |
| 1152 | buffer = (void *)args[2]; |
| 1153 | num_bytes = (size_t)args[3]; |
| 1154 | |
| 1155 | /* It is a fatal error if message handle is invalid */ |
| 1156 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1157 | if (!msg) { |
| 1158 | tfm_core_panic(); |
| 1159 | } |
| 1160 | |
| 1161 | partition = msg->service->partition; |
| 1162 | privileged = tfm_spm_partition_get_privileged_mode( |
| 1163 | partition->static_data->partition_flags); |
| 1164 | |
| 1165 | /* |
| 1166 | * It is a fatal error if message handle does not refer to a request |
| 1167 | * message |
| 1168 | */ |
| 1169 | if (msg->msg.type < PSA_IPC_CALL) { |
| 1170 | tfm_core_panic(); |
| 1171 | } |
| 1172 | |
| 1173 | /* |
| 1174 | * It is a fatal error if outvec_idx is equal to or greater than |
| 1175 | * PSA_MAX_IOVEC |
| 1176 | */ |
| 1177 | if (outvec_idx >= PSA_MAX_IOVEC) { |
| 1178 | tfm_core_panic(); |
| 1179 | } |
| 1180 | |
| 1181 | /* |
| 1182 | * It is a fatal error if the call attempts to write data past the end of |
| 1183 | * the client output vector |
| 1184 | */ |
| 1185 | if (num_bytes > msg->msg.out_size[outvec_idx] - |
| 1186 | msg->outvec[outvec_idx].len) { |
| 1187 | tfm_core_panic(); |
| 1188 | } |
| 1189 | |
| 1190 | /* |
| 1191 | * Copy the service buffer to client outvecs. It is a fatal error |
| 1192 | * if the memory reference for buffer is invalid or not readable. |
| 1193 | */ |
| 1194 | if (tfm_memory_check(buffer, num_bytes, false, |
| 1195 | TFM_MEMORY_ACCESS_RO, privileged) != IPC_SUCCESS) { |
| 1196 | tfm_core_panic(); |
| 1197 | } |
| 1198 | |
| 1199 | tfm_core_util_memcpy((char *)msg->outvec[outvec_idx].base + |
| 1200 | msg->outvec[outvec_idx].len, buffer, num_bytes); |
| 1201 | |
| 1202 | /* Update the write number */ |
| 1203 | msg->outvec[outvec_idx].len += num_bytes; |
| 1204 | } |
| 1205 | |
| 1206 | static void update_caller_outvec_len(struct tfm_msg_body_t *msg) |
| 1207 | { |
| 1208 | uint32_t i; |
| 1209 | |
| 1210 | /* |
| 1211 | * FixeMe: abstract these part into dedicated functions to avoid |
| 1212 | * accessing thread context in psa layer |
| 1213 | */ |
| 1214 | /* If it is a NS request via RPC, the owner of this message is not set */ |
| 1215 | if (!is_tfm_rpc_msg(msg)) { |
| 1216 | TFM_CORE_ASSERT(msg->ack_evnt.owner->state == THRD_STATE_BLOCK); |
| 1217 | } |
| 1218 | |
| 1219 | for (i = 0; i < PSA_MAX_IOVEC; i++) { |
| 1220 | if (msg->msg.out_size[i] == 0) { |
| 1221 | continue; |
| 1222 | } |
| 1223 | |
| 1224 | TFM_CORE_ASSERT(msg->caller_outvec[i].base == msg->outvec[i].base); |
| 1225 | |
| 1226 | msg->caller_outvec[i].len = msg->outvec[i].len; |
| 1227 | } |
| 1228 | } |
| 1229 | |
| 1230 | void tfm_spm_psa_reply(uint32_t *args) |
| 1231 | { |
| 1232 | psa_handle_t msg_handle; |
| 1233 | psa_status_t status; |
| 1234 | struct tfm_spm_service_t *service = NULL; |
| 1235 | struct tfm_msg_body_t *msg = NULL; |
| 1236 | int32_t ret = PSA_SUCCESS; |
| 1237 | |
| 1238 | TFM_CORE_ASSERT(args != NULL); |
| 1239 | msg_handle = (psa_handle_t)args[0]; |
| 1240 | status = (psa_status_t)args[1]; |
| 1241 | |
| 1242 | /* It is a fatal error if message handle is invalid */ |
| 1243 | msg = tfm_spm_get_msg_from_handle(msg_handle); |
| 1244 | if (!msg) { |
| 1245 | tfm_core_panic(); |
| 1246 | } |
| 1247 | |
| 1248 | /* |
| 1249 | * RoT Service information is needed in this function, stored it in message |
| 1250 | * body structure. Only two parameters are passed in this function: handle |
| 1251 | * and status, so it is useful and simply to do like this. |
| 1252 | */ |
| 1253 | service = msg->service; |
| 1254 | if (!service) { |
| 1255 | tfm_core_panic(); |
| 1256 | } |
| 1257 | |
| 1258 | /* |
| 1259 | * Three type of message are passed in this function: CONNECTION, REQUEST, |
| 1260 | * DISCONNECTION. It needs to process differently for each type. |
| 1261 | */ |
| 1262 | switch (msg->msg.type) { |
| 1263 | case PSA_IPC_CONNECT: |
| 1264 | /* |
| 1265 | * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the |
| 1266 | * input status is PSA_SUCCESS. Others return values are based on the |
| 1267 | * input status. |
| 1268 | */ |
| 1269 | if (status == PSA_SUCCESS) { |
| 1270 | ret = msg->handle; |
| 1271 | } else if (status == PSA_ERROR_CONNECTION_REFUSED) { |
| 1272 | /* Refuse the client connection, indicating a permanent error. */ |
| 1273 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1274 | ret = PSA_ERROR_CONNECTION_REFUSED; |
| 1275 | } else if (status == PSA_ERROR_CONNECTION_BUSY) { |
| 1276 | /* Fail the client connection, indicating a transient error. */ |
| 1277 | ret = PSA_ERROR_CONNECTION_BUSY; |
| 1278 | } else { |
| 1279 | tfm_core_panic(); |
| 1280 | } |
| 1281 | break; |
| 1282 | case PSA_IPC_DISCONNECT: |
| 1283 | /* Service handle is not used anymore */ |
| 1284 | tfm_spm_free_conn_handle(service, msg->handle); |
| 1285 | |
| 1286 | /* |
| 1287 | * If the message type is PSA_IPC_DISCONNECT, then the status code is |
| 1288 | * ignored |
| 1289 | */ |
| 1290 | break; |
| 1291 | default: |
| 1292 | if (msg->msg.type >= PSA_IPC_CALL) { |
| 1293 | /* Reply to a request message. Return values are based on status */ |
| 1294 | ret = status; |
| 1295 | /* |
| 1296 | * The total number of bytes written to a single parameter must be |
| 1297 | * reported to the client by updating the len member of the |
| 1298 | * psa_outvec structure for the parameter before returning from |
| 1299 | * psa_call(). |
| 1300 | */ |
| 1301 | update_caller_outvec_len(msg); |
| 1302 | } else { |
| 1303 | tfm_core_panic(); |
| 1304 | } |
| 1305 | } |
| 1306 | |
| 1307 | if (ret == PSA_ERROR_PROGRAMMER_ERROR) { |
| 1308 | /* |
| 1309 | * If the source of the programmer error is a Secure Partition, the SPM |
| 1310 | * must panic the Secure Partition in response to a PROGRAMMER ERROR. |
| 1311 | */ |
| 1312 | if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) { |
| 1313 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1314 | TFM_HANDLE_STATUS_CONNECT_ERROR; |
| 1315 | } else { |
| 1316 | tfm_core_panic(); |
| 1317 | } |
| 1318 | } else { |
| 1319 | ((struct tfm_conn_handle_t *)(msg->handle))->status = |
| 1320 | TFM_HANDLE_STATUS_IDLE; |
| 1321 | } |
| 1322 | |
| 1323 | if (is_tfm_rpc_msg(msg)) { |
| 1324 | tfm_rpc_client_call_reply(msg, ret); |
| 1325 | } else { |
| 1326 | tfm_event_wake(&msg->ack_evnt, ret); |
| 1327 | } |
| 1328 | } |
| 1329 | |
| 1330 | /** |
| 1331 | * \brief notify the partition with the signal. |
| 1332 | * |
| 1333 | * \param[in] partition_id The ID of the partition to be notified. |
| 1334 | * \param[in] signal The signal that the partition is to be notified |
| 1335 | * with. |
| 1336 | * |
| 1337 | * \retval void Success. |
| 1338 | * \retval "Does not return" If partition_id is invalid. |
| 1339 | */ |
| 1340 | static void notify_with_signal(int32_t partition_id, psa_signal_t signal) |
| 1341 | { |
| 1342 | struct spm_partition_desc_t *partition = NULL; |
| 1343 | |
| 1344 | /* |
| 1345 | * The value of partition_id must be greater than zero as the target of |
| 1346 | * notification must be a Secure Partition, providing a Non-secure |
| 1347 | * Partition ID is a fatal error. |
| 1348 | */ |
| 1349 | if (!TFM_CLIENT_ID_IS_S(partition_id)) { |
| 1350 | tfm_core_panic(); |
| 1351 | } |
| 1352 | |
| 1353 | /* |
| 1354 | * It is a fatal error if partition_id does not correspond to a Secure |
| 1355 | * Partition. |
| 1356 | */ |
| 1357 | partition = tfm_spm_get_partition_by_id(partition_id); |
| 1358 | if (!partition) { |
| 1359 | tfm_core_panic(); |
| 1360 | } |
| 1361 | |
| 1362 | partition->runtime_data.signals |= signal; |
| 1363 | |
| 1364 | /* |
| 1365 | * The target partition may be blocked with waiting for signals after |
| 1366 | * called psa_wait(). Set the return value with the available signals |
| 1367 | * before wake it up with tfm_event_signal(). |
| 1368 | */ |
| 1369 | tfm_event_wake(&partition->runtime_data.signal_evnt, |
| 1370 | partition->runtime_data.signals & |
| 1371 | partition->runtime_data.signal_mask); |
| 1372 | } |
| 1373 | |
| 1374 | void tfm_spm_psa_notify(uint32_t *args) |
| 1375 | { |
| 1376 | int32_t partition_id; |
| 1377 | |
| 1378 | TFM_CORE_ASSERT(args != NULL); |
| 1379 | partition_id = (int32_t)args[0]; |
| 1380 | |
| 1381 | notify_with_signal(partition_id, PSA_DOORBELL); |
| 1382 | } |
| 1383 | |
| 1384 | /** |
| 1385 | * \brief assert signal for a given IRQ line. |
| 1386 | * |
| 1387 | * \param[in] partition_id The ID of the partition which handles this IRQ |
| 1388 | * \param[in] signal The signal associated with this IRQ |
| 1389 | * \param[in] irq_line The number of the IRQ line |
| 1390 | * |
| 1391 | * \retval void Success. |
| 1392 | * \retval "Does not return" Partition ID is invalid |
| 1393 | */ |
| 1394 | void tfm_irq_handler(uint32_t partition_id, psa_signal_t signal, |
| 1395 | int32_t irq_line) |
| 1396 | { |
| 1397 | tfm_spm_hal_disable_irq(irq_line); |
| 1398 | notify_with_signal(partition_id, signal); |
| 1399 | } |
| 1400 | |
| 1401 | void tfm_spm_psa_clear(void) |
| 1402 | { |
| 1403 | struct spm_partition_desc_t *partition = NULL; |
| 1404 | |
| 1405 | partition = tfm_spm_get_running_partition(); |
| 1406 | if (!partition) { |
| 1407 | tfm_core_panic(); |
| 1408 | } |
| 1409 | |
| 1410 | /* |
| 1411 | * It is a fatal error if the Secure Partition's doorbell signal is not |
| 1412 | * currently asserted. |
| 1413 | */ |
| 1414 | if ((partition->runtime_data.signals & PSA_DOORBELL) == 0) { |
| 1415 | tfm_core_panic(); |
| 1416 | } |
| 1417 | partition->runtime_data.signals &= ~PSA_DOORBELL; |
| 1418 | } |
| 1419 | |
| 1420 | void tfm_spm_psa_panic(void) |
| 1421 | { |
| 1422 | /* |
| 1423 | * PSA FF recommends that the SPM causes the system to restart when a secure |
| 1424 | * partition panics. |
| 1425 | */ |
| 1426 | tfm_spm_hal_system_reset(); |
| 1427 | } |
| 1428 | |
| 1429 | /** |
| 1430 | * \brief Return the IRQ line number associated with a signal |
| 1431 | * |
| 1432 | * \param[in] partition_id The ID of the partition in which we look for |
| 1433 | * the signal. |
| 1434 | * \param[in] signal The signal we do the query for. |
| 1435 | * \param[out] irq_line The irq line associated with signal |
| 1436 | * |
| 1437 | * \retval IPC_SUCCESS Execution successful, irq_line contains a valid |
| 1438 | * value. |
| 1439 | * \retval IPC_ERROR_GENERIC There was an error finding the IRQ line for the |
| 1440 | * signal. irq_line is unchanged. |
| 1441 | */ |
| 1442 | static int32_t get_irq_line_for_signal(int32_t partition_id, |
| 1443 | psa_signal_t signal, |
| 1444 | int32_t *irq_line) |
| 1445 | { |
| 1446 | size_t i; |
| 1447 | |
| 1448 | for (i = 0; i < tfm_core_irq_signals_count; ++i) { |
| 1449 | if (tfm_core_irq_signals[i].partition_id == partition_id && |
| 1450 | tfm_core_irq_signals[i].signal_value == signal) { |
| 1451 | *irq_line = tfm_core_irq_signals[i].irq_line; |
| 1452 | return IPC_SUCCESS; |
| 1453 | } |
| 1454 | } |
| 1455 | return IPC_ERROR_GENERIC; |
| 1456 | } |
| 1457 | |
| 1458 | void tfm_spm_psa_eoi(uint32_t *args) |
| 1459 | { |
| 1460 | psa_signal_t irq_signal; |
| 1461 | int32_t irq_line = 0; |
| 1462 | int32_t ret; |
| 1463 | struct spm_partition_desc_t *partition = NULL; |
| 1464 | |
| 1465 | TFM_CORE_ASSERT(args != NULL); |
| 1466 | irq_signal = (psa_signal_t)args[0]; |
| 1467 | |
| 1468 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1469 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1470 | tfm_core_panic(); |
| 1471 | } |
| 1472 | |
| 1473 | partition = tfm_spm_get_running_partition(); |
| 1474 | if (!partition) { |
| 1475 | tfm_core_panic(); |
| 1476 | } |
| 1477 | |
| 1478 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1479 | irq_signal, &irq_line); |
| 1480 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1481 | if (ret != IPC_SUCCESS) { |
| 1482 | tfm_core_panic(); |
| 1483 | } |
| 1484 | |
| 1485 | /* It is a fatal error if passed signal is not currently asserted */ |
| 1486 | if ((partition->runtime_data.signals & irq_signal) == 0) { |
| 1487 | tfm_core_panic(); |
| 1488 | } |
| 1489 | |
| 1490 | partition->runtime_data.signals &= ~irq_signal; |
| 1491 | |
| 1492 | tfm_spm_hal_clear_pending_irq(irq_line); |
| 1493 | tfm_spm_hal_enable_irq(irq_line); |
| 1494 | } |
| 1495 | |
| 1496 | void tfm_spm_enable_irq(uint32_t *args) |
| 1497 | { |
| 1498 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1499 | psa_signal_t irq_signal = svc_ctx->r0; |
| 1500 | int32_t irq_line = 0; |
| 1501 | int32_t ret; |
| 1502 | struct spm_partition_desc_t *partition = NULL; |
| 1503 | |
| 1504 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1505 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1506 | tfm_core_panic(); |
| 1507 | } |
| 1508 | |
| 1509 | partition = tfm_spm_get_running_partition(); |
| 1510 | if (!partition) { |
| 1511 | tfm_core_panic(); |
| 1512 | } |
| 1513 | |
| 1514 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1515 | irq_signal, &irq_line); |
| 1516 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1517 | if (ret != IPC_SUCCESS) { |
| 1518 | tfm_core_panic(); |
| 1519 | } |
| 1520 | |
| 1521 | tfm_spm_hal_enable_irq(irq_line); |
| 1522 | } |
| 1523 | |
| 1524 | void tfm_spm_disable_irq(uint32_t *args) |
| 1525 | { |
| 1526 | struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)args; |
| 1527 | psa_signal_t irq_signal = svc_ctx->r0; |
| 1528 | int32_t irq_line = 0; |
| 1529 | int32_t ret; |
| 1530 | struct spm_partition_desc_t *partition = NULL; |
| 1531 | |
| 1532 | /* It is a fatal error if passed signal indicates more than one signals. */ |
| 1533 | if (!tfm_is_one_bit_set(irq_signal)) { |
| 1534 | tfm_core_panic(); |
| 1535 | } |
| 1536 | |
| 1537 | partition = tfm_spm_get_running_partition(); |
| 1538 | if (!partition) { |
| 1539 | tfm_core_panic(); |
| 1540 | } |
| 1541 | |
| 1542 | ret = get_irq_line_for_signal(partition->static_data->partition_id, |
| 1543 | irq_signal, &irq_line); |
| 1544 | /* It is a fatal error if passed signal is not an interrupt signal. */ |
| 1545 | if (ret != IPC_SUCCESS) { |
| 1546 | tfm_core_panic(); |
| 1547 | } |
| 1548 | |
| 1549 | tfm_spm_hal_disable_irq(irq_line); |
| 1550 | } |
| 1551 | |
| 1552 | void tfm_spm_validate_caller(struct spm_partition_desc_t *p_cur_sp, |
| 1553 | uint32_t *p_ctx, uint32_t exc_return, |
| 1554 | bool ns_caller) |
| 1555 | { |
| 1556 | uintptr_t stacked_ctx_pos; |
| 1557 | |
| 1558 | if (ns_caller) { |
| 1559 | /* |
| 1560 | * The background IRQ can't be supported, since if SP is executing, |
| 1561 | * the preempted context of SP can be different with the one who |
| 1562 | * preempts veneer. |
| 1563 | */ |
| 1564 | if (p_cur_sp->static_data->partition_id != TFM_SP_NON_SECURE_ID) { |
| 1565 | tfm_core_panic(); |
| 1566 | } |
| 1567 | |
| 1568 | /* |
| 1569 | * It is non-secure caller, check if veneer stack contains |
| 1570 | * multiple contexts. |
| 1571 | */ |
| 1572 | stacked_ctx_pos = (uintptr_t)p_ctx + |
| 1573 | sizeof(struct tfm_state_context_t) + |
| 1574 | TFM_VENEER_STACK_GUARD_SIZE; |
| 1575 | |
| 1576 | if (is_stack_alloc_fp_space(exc_return)) { |
| 1577 | #if defined (__FPU_USED) && (__FPU_USED == 1U) |
| 1578 | if (FPU->FPCCR & FPU_FPCCR_TS_Msk) { |
| 1579 | stacked_ctx_pos += TFM_ADDTIONAL_FP_CONTEXT_WORDS * |
| 1580 | sizeof(uint32_t); |
| 1581 | } |
| 1582 | #endif |
| 1583 | stacked_ctx_pos += TFM_BASIC_FP_CONTEXT_WORDS * sizeof(uint32_t); |
| 1584 | } |
| 1585 | |
| 1586 | if (stacked_ctx_pos != p_cur_sp->runtime_data.sp_thrd.stk_top) { |
| 1587 | tfm_core_panic(); |
| 1588 | } |
| 1589 | } else if (p_cur_sp->static_data->partition_id <= 0) { |
| 1590 | tfm_core_panic(); |
| 1591 | } |
| 1592 | } |