Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2021 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | #include "hf/ffa.h" |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 12 | #include "hf/manifest.h" |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 13 | #include "hf/vcpu.h" |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 14 | #include "hf/vm.h" |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 15 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 16 | /** |
| 17 | * The following enum relates to a state machine to guide the handling of the |
| 18 | * Scheduler Receiver Interrupt. |
| 19 | * The SRI is used to signal the receiver scheduler that there are pending |
| 20 | * notifications for the receiver, and it is sent when there is a valid call to |
| 21 | * FFA_NOTIFICATION_SET. |
| 22 | * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler, |
| 23 | * after which the FF-A driver should process the returned list, and request |
| 24 | * the receiver scheduler to give the receiver CPU cycles to process the |
| 25 | * notification. |
| 26 | * The use of the following state machine allows for synchronized sending |
| 27 | * and handling of the SRI, as well as avoiding the occurrence of spurious |
| 28 | * SRI. A spurious SRI would be one such that upon handling a call to |
| 29 | * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible |
| 30 | * in an MP system. |
| 31 | * The state machine also aims at resolving the delay of the SRI by setting |
| 32 | * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By |
| 33 | * delaying, the SRI is sent in context switching to the primary endpoint. |
| 34 | * The SPMC is implemented under the assumption the receiver scheduler is a |
| 35 | * NWd endpoint, hence the SRI is triggered at the world switch. |
| 36 | * If concurrently another notification is set that requires immediate action, |
| 37 | * the SRI is triggered immediately within that same execution context. |
| 38 | * |
| 39 | * HANDLED is the initial state, and means a new SRI can be sent. The following |
| 40 | * state transitions are possible: |
| 41 | * * HANDLED => DELAYED: Setting notification, and requesting SRI delay. |
| 42 | * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay. |
| 43 | * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the |
| 44 | * receiver scheduler is being done. |
| 45 | * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 46 | * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 47 | */ |
| 48 | enum plat_ffa_sri_state { |
| 49 | HANDLED = 0, |
| 50 | DELAYED, |
| 51 | TRIGGERED, |
| 52 | }; |
| 53 | |
Daniel Boulby | 87b2dc8 | 2021-08-04 14:07:43 +0100 | [diff] [blame] | 54 | /** Returns information on features that are specific to the platform. */ |
J-Alves | 6f72ca8 | 2021-11-01 12:34:58 +0000 | [diff] [blame] | 55 | struct ffa_value plat_ffa_features(uint32_t function_feature_id); |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 56 | /** Returns the SPMC ID. */ |
| 57 | struct ffa_value plat_ffa_spmc_id_get(void); |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 58 | |
| 59 | void plat_ffa_log_init(void); |
J-Alves | a09ac2d | 2022-06-07 13:46:59 +0100 | [diff] [blame] | 60 | void plat_ffa_set_tee_enabled(bool tee_enabled); |
| 61 | void plat_ffa_init(void); |
Maksims Svecovs | a3d570c | 2021-12-08 11:16:32 +0000 | [diff] [blame] | 62 | bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id, |
| 63 | uint32_t share_func); |
| 64 | |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 65 | bool plat_ffa_is_direct_request_valid(struct vcpu *current, |
| 66 | ffa_vm_id_t sender_vm_id, |
| 67 | ffa_vm_id_t receiver_vm_id); |
| 68 | bool plat_ffa_is_direct_response_valid(struct vcpu *current, |
| 69 | ffa_vm_id_t sender_vm_id, |
| 70 | ffa_vm_id_t receiver_vm_id); |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 71 | bool plat_ffa_is_direct_request_supported(struct vm *sender_vm, |
| 72 | struct vm *receiver_vm); |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 73 | bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id, |
| 74 | struct ffa_value args, |
| 75 | struct ffa_value *ret); |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 76 | |
Federico Recanati | 7bef0b9 | 2022-03-17 14:56:22 +0100 | [diff] [blame] | 77 | bool plat_ffa_rx_release_forward(struct vm_locked vm_locked, |
| 78 | struct ffa_value *ret); |
| 79 | |
| 80 | bool plat_ffa_rx_release_forwarded(struct vm_locked vm_locked); |
| 81 | |
| 82 | bool plat_ffa_acquire_receiver_rx(struct vm_locked locked, |
Federico Recanati | 644f046 | 2022-03-17 12:04:00 +0100 | [diff] [blame] | 83 | struct ffa_value *ret); |
| 84 | |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 85 | bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked, |
| 86 | struct vm_locked receiver_locked); |
| 87 | |
| 88 | bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id, |
| 89 | ffa_vm_id_t sender_vm_id, |
| 90 | struct ffa_value *ret); |
| 91 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 92 | bool plat_ffa_is_notifications_create_valid(struct vcpu *current, |
| 93 | ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 94 | |
| 95 | bool plat_ffa_is_notifications_bind_valid(struct vcpu *current, |
| 96 | ffa_vm_id_t sender_id, |
| 97 | ffa_vm_id_t receiver_id); |
J-Alves | b15e940 | 2021-09-08 11:44:42 +0100 | [diff] [blame] | 98 | bool plat_ffa_notifications_update_bindings_forward( |
| 99 | ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags, |
| 100 | ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 101 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 102 | bool plat_ffa_is_notification_set_valid(struct vcpu *current, |
| 103 | ffa_vm_id_t sender_id, |
| 104 | ffa_vm_id_t receiver_id); |
| 105 | |
J-Alves | de7bd2f | 2021-09-09 19:54:35 +0100 | [diff] [blame] | 106 | bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id, |
| 107 | ffa_vm_id_t receiver_vm_id, |
| 108 | uint32_t flags, |
| 109 | ffa_notifications_bitmap_t bitmap, |
| 110 | struct ffa_value *ret); |
| 111 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 112 | bool plat_ffa_is_notification_get_valid(struct vcpu *current, |
J-Alves | fc95a30 | 2022-04-22 14:18:23 +0100 | [diff] [blame] | 113 | ffa_vm_id_t receiver_id, |
| 114 | uint32_t flags); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 115 | |
J-Alves | 98ff956 | 2021-09-09 14:39:41 +0100 | [diff] [blame] | 116 | bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked, |
| 117 | ffa_vcpu_index_t vcpu_id, |
| 118 | ffa_notifications_bitmap_t *from_sp, |
| 119 | struct ffa_value *ret); |
| 120 | |
J-Alves | d605a09 | 2022-03-28 14:20:48 +0100 | [diff] [blame] | 121 | bool plat_ffa_notifications_get_framework_notifications( |
| 122 | struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk, |
| 123 | uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret); |
| 124 | |
Federico Recanati | 8d8b1cf | 2022-04-14 13:16:00 +0200 | [diff] [blame] | 125 | void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked); |
| 126 | |
Federico Recanati | 10bd06c | 2022-02-23 17:32:59 +0100 | [diff] [blame] | 127 | void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked); |
| 128 | |
Federico Recanati | 8da9e33 | 2022-02-10 11:00:17 +0100 | [diff] [blame] | 129 | void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id); |
| 130 | |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 131 | /** |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 132 | * Checks whether managed exit is supported by given SP. |
| 133 | */ |
| 134 | bool plat_ffa_vm_managed_exit_supported(struct vm *vm); |
| 135 | |
| 136 | /** |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 137 | * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec. |
| 138 | */ |
| 139 | ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index); |
| 140 | |
| 141 | /** |
| 142 | * Checks whether given handle was allocated by current world, according to |
| 143 | * handle encoding rules. |
| 144 | */ |
| 145 | bool plat_ffa_memory_handle_allocated_by_current_world( |
| 146 | ffa_memory_handle_t handle); |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 147 | |
| 148 | /** |
J-Alves | 7db3200 | 2021-12-14 14:44:50 +0000 | [diff] [blame] | 149 | * For non-secure memory, retrieve the NS mode if the partition manager supports |
| 150 | * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals |
| 151 | * with NS accesses by default. |
| 152 | */ |
| 153 | uint32_t plat_ffa_other_world_mode(void); |
| 154 | |
| 155 | /** |
| 156 | * For memory management operations between SWd and NWd, the SPMC might need |
| 157 | * to operate NS-memory. The function below returns the mode to use in the mm.h |
| 158 | * library, depending on the memory ownder's id. |
| 159 | */ |
| 160 | uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id); |
| 161 | |
| 162 | /** |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 163 | * Return the FF-A partition info VM/SP properties given the VM id. |
| 164 | */ |
| 165 | ffa_partition_properties_t plat_ffa_partition_properties( |
J-Alves | fa78209 | 2021-10-13 16:02:16 +0100 | [diff] [blame] | 166 | ffa_vm_id_t vm_id, const struct vm *target); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 167 | |
| 168 | /** |
| 169 | * Get NWd VM's structure. |
| 170 | */ |
| 171 | struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id); |
| 172 | |
Federico Recanati | 8d8b1cf | 2022-04-14 13:16:00 +0200 | [diff] [blame] | 173 | struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id); |
| 174 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 175 | /** |
| 176 | * Creates a bitmap for the VM of the given ID. |
| 177 | */ |
| 178 | struct ffa_value plat_ffa_notifications_bitmap_create( |
| 179 | ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count); |
| 180 | |
| 181 | /** |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 182 | * Issues a FFA_NOTIFICATION_BITMAP_CREATE. |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 183 | * Returns true if the call goes well, and false if call returns with |
| 184 | * FFA_ERROR_32. |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 185 | */ |
| 186 | bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id, |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 187 | ffa_vcpu_count_t vcpu_count); |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 188 | |
| 189 | /** |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 190 | * Destroys the notifications bitmap for the given VM ID. |
| 191 | */ |
| 192 | struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 193 | |
| 194 | /** |
| 195 | * Helper to get the struct notifications, depending on the sender's id. |
| 196 | */ |
| 197 | struct notifications *plat_ffa_vm_get_notifications_senders_world( |
| 198 | struct vm_locked vm_locked, ffa_vm_id_t sender_id); |
| 199 | |
| 200 | /** |
| 201 | * Helper to check if FF-A ID is a VM ID. |
| 202 | */ |
| 203 | bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id); |
Raghu Krishnamurthy | 62f97a7 | 2021-07-27 02:14:59 -0700 | [diff] [blame] | 204 | |
| 205 | /** |
| 206 | * Forward normal world calls of FFA_RUN ABI to other world. |
| 207 | */ |
| 208 | bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx, |
| 209 | struct ffa_value *ret); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 210 | |
| 211 | bool plat_ffa_notification_info_get_call(struct ffa_value *ret); |
| 212 | |
| 213 | bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count, |
| 214 | uint32_t *lists_sizes, |
| 215 | uint32_t *lists_count, |
| 216 | const uint32_t ids_count_max); |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 217 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 218 | /** Helper to set SRI current state. */ |
| 219 | void plat_ffa_sri_state_set(enum plat_ffa_sri_state state); |
| 220 | |
| 221 | /** |
| 222 | * Helper to send SRI and safely update `ffa_sri_state`, if there has been |
| 223 | * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed. |
| 224 | * To be called at a context switch to the NWd. |
| 225 | */ |
| 226 | void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu); |
| 227 | |
| 228 | /** |
| 229 | * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been |
| 230 | * delayed in call to FFA_NOTIFICATION_SET. |
| 231 | */ |
| 232 | void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu); |
| 233 | |
| 234 | /** |
| 235 | * Initialize Schedule Receiver Interrupts needed in the context of |
| 236 | * notifications support. |
| 237 | */ |
| 238 | void plat_ffa_sri_init(struct cpu *cpu); |
| 239 | |
J-Alves | ca058c2 | 2021-09-10 14:02:07 +0100 | [diff] [blame] | 240 | void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count, |
| 241 | uint32_t *lists_sizes, |
| 242 | uint32_t *lists_count, |
| 243 | const uint32_t ids_count_max); |
| 244 | |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 245 | bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current); |
| 246 | bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current); |
Madhukar Pappireddy | b11e0d1 | 2021-08-02 19:44:35 -0500 | [diff] [blame] | 247 | |
Madhukar Pappireddy | 5522c67 | 2021-12-17 16:35:51 -0600 | [diff] [blame] | 248 | bool plat_ffa_msg_wait_prepare(struct vcpu *current, struct vcpu **next, |
| 249 | struct ffa_value *ret_args); |
| 250 | |
Madhukar Pappireddy | b11e0d1 | 2021-08-02 19:44:35 -0500 | [diff] [blame] | 251 | /** |
| 252 | * Check if current SP can resume target VM/SP using FFA_RUN ABI. |
| 253 | */ |
| 254 | bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id, |
Raghu Krishnamurthy | 048d63f | 2021-12-11 12:45:41 -0800 | [diff] [blame] | 255 | ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret, |
| 256 | struct vcpu **next); |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 257 | |
| 258 | /** |
| 259 | * Deactivate interrupt. |
| 260 | */ |
| 261 | int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id, |
| 262 | struct vcpu *current); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 263 | |
Madhukar Pappireddy | dc0c801 | 2022-06-21 15:23:14 -0500 | [diff] [blame] | 264 | struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current, |
| 265 | struct vcpu **next, |
| 266 | bool from_normal_world); |
Madhukar Pappireddy | ed4ab94 | 2021-08-03 14:22:53 -0500 | [diff] [blame] | 267 | struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current, |
| 268 | struct vcpu **next); |
| 269 | struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current, |
| 270 | struct vcpu **next); |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 271 | |
J-Alves | 6e2abc6 | 2021-12-02 14:58:56 +0000 | [diff] [blame] | 272 | bool plat_ffa_inject_notification_pending_interrupt( |
| 273 | struct vcpu_locked next_locked, struct vcpu *current, |
| 274 | struct vm_locked receiver_locked); |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 275 | |
| 276 | void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid, |
Daniel Boulby | b46cad1 | 2021-12-13 17:47:21 +0000 | [diff] [blame] | 277 | const uint32_t flags, |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 278 | struct ffa_partition_info *partitions, |
| 279 | ffa_vm_count_t *ret_count); |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 280 | |
| 281 | void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked, |
| 282 | paddr_t fdt_addr, |
| 283 | size_t fdt_allocated_size, |
| 284 | const struct manifest_vm *manifest_vm, |
| 285 | struct mpool *ppool); |
Olivier Deprez | d614d32 | 2021-06-18 15:21:00 +0200 | [diff] [blame] | 286 | |
| 287 | /** |
| 288 | * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at |
| 289 | * the virtual FF-A instance. |
| 290 | */ |
| 291 | bool plat_ffa_is_secondary_ep_register_supported(void); |
Madhukar Pappireddy | 0ea239a | 2022-06-21 17:26:57 -0500 | [diff] [blame] | 292 | |
| 293 | /** |
| 294 | * Perform checks for the state transition being requested by the Partition |
| 295 | * based on it's runtime model and return false if an illegal transition is |
| 296 | * being performed. |
| 297 | */ |
| 298 | bool plat_ffa_check_runtime_state_transition(struct vcpu *current, |
| 299 | ffa_vm_id_t vm_id, |
| 300 | ffa_vm_id_t receiver_vm_id, |
| 301 | struct vcpu *vcpu, uint32_t func, |
| 302 | enum vcpu_state *next_state); |
Madhukar Pappireddy | 49fe670 | 2022-06-21 17:52:23 -0500 | [diff] [blame] | 303 | |
| 304 | void plat_ffa_wind_call_chain_ffa_direct_req( |
| 305 | struct vcpu_locked current_locked, |
| 306 | struct vcpu_locked receiver_vcpu_locked); |
Madhukar Pappireddy | c0fb87e | 2022-06-21 17:59:15 -0500 | [diff] [blame^] | 307 | |
| 308 | void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current, |
| 309 | struct vcpu *next); |