Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2021 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | #include "hf/ffa.h" |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 12 | #include "hf/manifest.h" |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 13 | #include "hf/vcpu.h" |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 14 | #include "hf/vm.h" |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 15 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 16 | /** |
| 17 | * The following enum relates to a state machine to guide the handling of the |
| 18 | * Scheduler Receiver Interrupt. |
| 19 | * The SRI is used to signal the receiver scheduler that there are pending |
| 20 | * notifications for the receiver, and it is sent when there is a valid call to |
| 21 | * FFA_NOTIFICATION_SET. |
| 22 | * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler, |
| 23 | * after which the FF-A driver should process the returned list, and request |
| 24 | * the receiver scheduler to give the receiver CPU cycles to process the |
| 25 | * notification. |
| 26 | * The use of the following state machine allows for synchronized sending |
| 27 | * and handling of the SRI, as well as avoiding the occurrence of spurious |
| 28 | * SRI. A spurious SRI would be one such that upon handling a call to |
| 29 | * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible |
| 30 | * in an MP system. |
| 31 | * The state machine also aims at resolving the delay of the SRI by setting |
| 32 | * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By |
| 33 | * delaying, the SRI is sent in context switching to the primary endpoint. |
| 34 | * The SPMC is implemented under the assumption the receiver scheduler is a |
| 35 | * NWd endpoint, hence the SRI is triggered at the world switch. |
| 36 | * If concurrently another notification is set that requires immediate action, |
| 37 | * the SRI is triggered immediately within that same execution context. |
| 38 | * |
| 39 | * HANDLED is the initial state, and means a new SRI can be sent. The following |
| 40 | * state transitions are possible: |
| 41 | * * HANDLED => DELAYED: Setting notification, and requesting SRI delay. |
| 42 | * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay. |
| 43 | * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the |
| 44 | * receiver scheduler is being done. |
| 45 | * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 46 | * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 47 | */ |
| 48 | enum plat_ffa_sri_state { |
| 49 | HANDLED = 0, |
| 50 | DELAYED, |
| 51 | TRIGGERED, |
| 52 | }; |
| 53 | |
Daniel Boulby | 87b2dc8 | 2021-08-04 14:07:43 +0100 | [diff] [blame] | 54 | /** Returns information on features that are specific to the platform. */ |
J-Alves | 6f72ca8 | 2021-11-01 12:34:58 +0000 | [diff] [blame] | 55 | struct ffa_value plat_ffa_features(uint32_t function_feature_id); |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 56 | /** Returns the SPMC ID. */ |
| 57 | struct ffa_value plat_ffa_spmc_id_get(void); |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 58 | |
| 59 | void plat_ffa_log_init(void); |
J-Alves | a09ac2d | 2022-06-07 13:46:59 +0100 | [diff] [blame] | 60 | void plat_ffa_set_tee_enabled(bool tee_enabled); |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 61 | void plat_ffa_init(struct mpool *ppool); |
Maksims Svecovs | a3d570c | 2021-12-08 11:16:32 +0000 | [diff] [blame] | 62 | bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id, |
| 63 | uint32_t share_func); |
| 64 | |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 65 | bool plat_ffa_is_direct_request_valid(struct vcpu *current, |
| 66 | ffa_vm_id_t sender_vm_id, |
| 67 | ffa_vm_id_t receiver_vm_id); |
| 68 | bool plat_ffa_is_direct_response_valid(struct vcpu *current, |
| 69 | ffa_vm_id_t sender_vm_id, |
| 70 | ffa_vm_id_t receiver_vm_id); |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 71 | bool plat_ffa_is_direct_request_supported(struct vm *sender_vm, |
| 72 | struct vm *receiver_vm); |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 73 | bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id, |
| 74 | struct ffa_value args, |
| 75 | struct ffa_value *ret); |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 76 | |
Federico Recanati | 7bef0b9 | 2022-03-17 14:56:22 +0100 | [diff] [blame] | 77 | bool plat_ffa_rx_release_forward(struct vm_locked vm_locked, |
| 78 | struct ffa_value *ret); |
| 79 | |
Federico Recanati | 7bef0b9 | 2022-03-17 14:56:22 +0100 | [diff] [blame] | 80 | bool plat_ffa_acquire_receiver_rx(struct vm_locked locked, |
Federico Recanati | 644f046 | 2022-03-17 12:04:00 +0100 | [diff] [blame] | 81 | struct ffa_value *ret); |
| 82 | |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 83 | bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked, |
| 84 | struct vm_locked receiver_locked); |
| 85 | |
| 86 | bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id, |
| 87 | ffa_vm_id_t sender_vm_id, |
| 88 | struct ffa_value *ret); |
| 89 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 90 | bool plat_ffa_is_notifications_create_valid(struct vcpu *current, |
| 91 | ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 92 | |
| 93 | bool plat_ffa_is_notifications_bind_valid(struct vcpu *current, |
| 94 | ffa_vm_id_t sender_id, |
| 95 | ffa_vm_id_t receiver_id); |
J-Alves | b15e940 | 2021-09-08 11:44:42 +0100 | [diff] [blame] | 96 | bool plat_ffa_notifications_update_bindings_forward( |
| 97 | ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags, |
| 98 | ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 99 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 100 | bool plat_ffa_is_notification_set_valid(struct vcpu *current, |
| 101 | ffa_vm_id_t sender_id, |
| 102 | ffa_vm_id_t receiver_id); |
| 103 | |
J-Alves | de7bd2f | 2021-09-09 19:54:35 +0100 | [diff] [blame] | 104 | bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id, |
| 105 | ffa_vm_id_t receiver_vm_id, |
| 106 | uint32_t flags, |
| 107 | ffa_notifications_bitmap_t bitmap, |
| 108 | struct ffa_value *ret); |
| 109 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 110 | bool plat_ffa_is_notification_get_valid(struct vcpu *current, |
J-Alves | fc95a30 | 2022-04-22 14:18:23 +0100 | [diff] [blame] | 111 | ffa_vm_id_t receiver_id, |
| 112 | uint32_t flags); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 113 | |
J-Alves | 98ff956 | 2021-09-09 14:39:41 +0100 | [diff] [blame] | 114 | bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked, |
| 115 | ffa_vcpu_index_t vcpu_id, |
| 116 | ffa_notifications_bitmap_t *from_sp, |
| 117 | struct ffa_value *ret); |
| 118 | |
J-Alves | d605a09 | 2022-03-28 14:20:48 +0100 | [diff] [blame] | 119 | bool plat_ffa_notifications_get_framework_notifications( |
| 120 | struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk, |
| 121 | uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret); |
| 122 | |
Federico Recanati | 8d8b1cf | 2022-04-14 13:16:00 +0200 | [diff] [blame] | 123 | void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked); |
| 124 | |
Federico Recanati | 10bd06c | 2022-02-23 17:32:59 +0100 | [diff] [blame] | 125 | void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked); |
| 126 | |
J-Alves | 7007993 | 2022-12-07 17:32:20 +0000 | [diff] [blame] | 127 | void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked); |
Federico Recanati | 8da9e33 | 2022-02-10 11:00:17 +0100 | [diff] [blame] | 128 | |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 129 | /** |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 130 | * Checks whether managed exit is supported by given SP. |
| 131 | */ |
| 132 | bool plat_ffa_vm_managed_exit_supported(struct vm *vm); |
| 133 | |
| 134 | /** |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 135 | * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec. |
| 136 | */ |
| 137 | ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index); |
| 138 | |
| 139 | /** |
| 140 | * Checks whether given handle was allocated by current world, according to |
| 141 | * handle encoding rules. |
| 142 | */ |
| 143 | bool plat_ffa_memory_handle_allocated_by_current_world( |
| 144 | ffa_memory_handle_t handle); |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 145 | |
| 146 | /** |
J-Alves | 7db3200 | 2021-12-14 14:44:50 +0000 | [diff] [blame] | 147 | * For non-secure memory, retrieve the NS mode if the partition manager supports |
| 148 | * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals |
| 149 | * with NS accesses by default. |
| 150 | */ |
| 151 | uint32_t plat_ffa_other_world_mode(void); |
| 152 | |
| 153 | /** |
| 154 | * For memory management operations between SWd and NWd, the SPMC might need |
| 155 | * to operate NS-memory. The function below returns the mode to use in the mm.h |
| 156 | * library, depending on the memory ownder's id. |
| 157 | */ |
| 158 | uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id); |
| 159 | |
| 160 | /** |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 161 | * Return the FF-A partition info VM/SP properties given the VM id. |
| 162 | */ |
| 163 | ffa_partition_properties_t plat_ffa_partition_properties( |
J-Alves | fa78209 | 2021-10-13 16:02:16 +0100 | [diff] [blame] | 164 | ffa_vm_id_t vm_id, const struct vm *target); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 165 | |
| 166 | /** |
| 167 | * Get NWd VM's structure. |
| 168 | */ |
| 169 | struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id); |
| 170 | |
Federico Recanati | 8d8b1cf | 2022-04-14 13:16:00 +0200 | [diff] [blame] | 171 | struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id); |
| 172 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 173 | /** |
| 174 | * Creates a bitmap for the VM of the given ID. |
| 175 | */ |
| 176 | struct ffa_value plat_ffa_notifications_bitmap_create( |
| 177 | ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count); |
| 178 | |
| 179 | /** |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 180 | * Issues a FFA_NOTIFICATION_BITMAP_CREATE. |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 181 | * Returns true if the call goes well, and false if call returns with |
| 182 | * FFA_ERROR_32. |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 183 | */ |
| 184 | bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id, |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 185 | ffa_vcpu_count_t vcpu_count); |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 186 | |
| 187 | /** |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 188 | * Destroys the notifications bitmap for the given VM ID. |
| 189 | */ |
| 190 | struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 191 | |
| 192 | /** |
| 193 | * Helper to get the struct notifications, depending on the sender's id. |
| 194 | */ |
| 195 | struct notifications *plat_ffa_vm_get_notifications_senders_world( |
| 196 | struct vm_locked vm_locked, ffa_vm_id_t sender_id); |
| 197 | |
| 198 | /** |
| 199 | * Helper to check if FF-A ID is a VM ID. |
| 200 | */ |
| 201 | bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id); |
Raghu Krishnamurthy | 62f97a7 | 2021-07-27 02:14:59 -0700 | [diff] [blame] | 202 | |
| 203 | /** |
| 204 | * Forward normal world calls of FFA_RUN ABI to other world. |
| 205 | */ |
| 206 | bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx, |
| 207 | struct ffa_value *ret); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 208 | |
| 209 | bool plat_ffa_notification_info_get_call(struct ffa_value *ret); |
| 210 | |
| 211 | bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count, |
| 212 | uint32_t *lists_sizes, |
| 213 | uint32_t *lists_count, |
| 214 | const uint32_t ids_count_max); |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 215 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 216 | /** Helper to set SRI current state. */ |
| 217 | void plat_ffa_sri_state_set(enum plat_ffa_sri_state state); |
| 218 | |
| 219 | /** |
| 220 | * Helper to send SRI and safely update `ffa_sri_state`, if there has been |
| 221 | * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed. |
| 222 | * To be called at a context switch to the NWd. |
| 223 | */ |
| 224 | void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu); |
| 225 | |
| 226 | /** |
| 227 | * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been |
| 228 | * delayed in call to FFA_NOTIFICATION_SET. |
| 229 | */ |
| 230 | void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu); |
| 231 | |
| 232 | /** |
| 233 | * Initialize Schedule Receiver Interrupts needed in the context of |
| 234 | * notifications support. |
| 235 | */ |
| 236 | void plat_ffa_sri_init(struct cpu *cpu); |
| 237 | |
J-Alves | ca058c2 | 2021-09-10 14:02:07 +0100 | [diff] [blame] | 238 | void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count, |
| 239 | uint32_t *lists_sizes, |
| 240 | uint32_t *lists_count, |
| 241 | const uint32_t ids_count_max); |
| 242 | |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 243 | bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current); |
| 244 | bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current); |
Madhukar Pappireddy | b11e0d1 | 2021-08-02 19:44:35 -0500 | [diff] [blame] | 245 | |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 246 | struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked, |
Madhukar Pappireddy | dd88320 | 2022-10-24 16:49:28 -0500 | [diff] [blame] | 247 | struct vcpu **next); |
Madhukar Pappireddy | 5522c67 | 2021-12-17 16:35:51 -0600 | [diff] [blame] | 248 | |
Madhukar Pappireddy | b11e0d1 | 2021-08-02 19:44:35 -0500 | [diff] [blame] | 249 | /** |
| 250 | * Check if current SP can resume target VM/SP using FFA_RUN ABI. |
| 251 | */ |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 252 | bool plat_ffa_run_checks(struct vcpu_locked current_locked, |
| 253 | ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx, |
| 254 | struct ffa_value *run_ret, struct vcpu **next); |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 255 | |
| 256 | /** |
| 257 | * Deactivate interrupt. |
| 258 | */ |
| 259 | int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id, |
| 260 | struct vcpu *current); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 261 | |
Madhukar Pappireddy | dc0c801 | 2022-06-21 15:23:14 -0500 | [diff] [blame] | 262 | struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current, |
| 263 | struct vcpu **next, |
| 264 | bool from_normal_world); |
J-Alves | 6e2abc6 | 2021-12-02 14:58:56 +0000 | [diff] [blame] | 265 | bool plat_ffa_inject_notification_pending_interrupt( |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 266 | struct vcpu_locked next_locked, struct vcpu_locked current_locked, |
J-Alves | 6e2abc6 | 2021-12-02 14:58:56 +0000 | [diff] [blame] | 267 | struct vm_locked receiver_locked); |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 268 | |
Raghu Krishnamurthy | ef432cb | 2022-12-29 06:56:32 -0800 | [diff] [blame] | 269 | bool plat_ffa_partition_info_get_regs_forward( |
| 270 | const struct ffa_uuid *uuid, const uint16_t start_index, |
| 271 | const uint16_t tag, struct ffa_partition_info *partitions, |
| 272 | uint16_t partitions_len, ffa_vm_count_t *ret_count); |
| 273 | |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 274 | void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid, |
Daniel Boulby | b46cad1 | 2021-12-13 17:47:21 +0000 | [diff] [blame] | 275 | const uint32_t flags, |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 276 | struct ffa_partition_info *partitions, |
| 277 | ffa_vm_count_t *ret_count); |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 278 | |
| 279 | void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked, |
| 280 | paddr_t fdt_addr, |
| 281 | size_t fdt_allocated_size, |
| 282 | const struct manifest_vm *manifest_vm, |
J-Alves | 77b6f4f | 2023-03-15 11:34:49 +0000 | [diff] [blame] | 283 | const struct boot_params *boot_params, |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 284 | struct mpool *ppool); |
Olivier Deprez | d614d32 | 2021-06-18 15:21:00 +0200 | [diff] [blame] | 285 | |
| 286 | /** |
| 287 | * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at |
| 288 | * the virtual FF-A instance. |
| 289 | */ |
| 290 | bool plat_ffa_is_secondary_ep_register_supported(void); |
Madhukar Pappireddy | 0ea239a | 2022-06-21 17:26:57 -0500 | [diff] [blame] | 291 | |
| 292 | /** |
| 293 | * Perform checks for the state transition being requested by the Partition |
| 294 | * based on it's runtime model and return false if an illegal transition is |
| 295 | * being performed. |
| 296 | */ |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 297 | bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked, |
Madhukar Pappireddy | 0ea239a | 2022-06-21 17:26:57 -0500 | [diff] [blame] | 298 | ffa_vm_id_t vm_id, |
| 299 | ffa_vm_id_t receiver_vm_id, |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 300 | struct vcpu_locked locked_vcpu, |
| 301 | uint32_t func, |
Madhukar Pappireddy | 0ea239a | 2022-06-21 17:26:57 -0500 | [diff] [blame] | 302 | enum vcpu_state *next_state); |
Madhukar Pappireddy | 49fe670 | 2022-06-21 17:52:23 -0500 | [diff] [blame] | 303 | |
Madhukar Pappireddy | d46c06e | 2022-06-21 18:14:52 -0500 | [diff] [blame] | 304 | struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current); |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 305 | void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked, |
Madhukar Pappireddy | 1480fce | 2022-06-21 18:09:25 -0500 | [diff] [blame] | 306 | struct vcpu_locked target_locked); |
| 307 | |
Madhukar Pappireddy | 49fe670 | 2022-06-21 17:52:23 -0500 | [diff] [blame] | 308 | void plat_ffa_wind_call_chain_ffa_direct_req( |
| 309 | struct vcpu_locked current_locked, |
| 310 | struct vcpu_locked receiver_vcpu_locked); |
Madhukar Pappireddy | c0fb87e | 2022-06-21 17:59:15 -0500 | [diff] [blame] | 311 | |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 312 | void plat_ffa_unwind_call_chain_ffa_direct_resp( |
| 313 | struct vcpu_locked current_locked, struct vcpu_locked next_locked); |
Madhukar Pappireddy | 046dad0 | 2022-06-21 18:43:33 -0500 | [diff] [blame] | 314 | |
Madhukar Pappireddy | 486360d | 2022-09-06 15:32:24 -0500 | [diff] [blame] | 315 | void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked, |
| 316 | struct vm_locked vm_locked); |
J-Alves | 6665225 | 2022-07-06 09:49:51 +0100 | [diff] [blame] | 317 | |
J-Alves | b5084cf | 2022-07-06 14:20:12 +0100 | [diff] [blame] | 318 | bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked, |
| 319 | struct vcpu **next, |
| 320 | struct ffa_value to_ret, |
| 321 | struct ffa_value *signal_interrupt); |
J-Alves | 6665225 | 2022-07-06 09:49:51 +0100 | [diff] [blame] | 322 | /* |
| 323 | * Handles FF-A memory share calls with recipients from the other world. |
| 324 | */ |
| 325 | struct ffa_value plat_ffa_other_world_mem_send( |
| 326 | struct vm *from, uint32_t share_func, |
| 327 | struct ffa_memory_region **memory_region, uint32_t length, |
| 328 | uint32_t fragment_length, struct mpool *page_pool); |
J-Alves | fc19b37 | 2022-07-06 12:17:35 +0100 | [diff] [blame] | 329 | |
| 330 | /** |
| 331 | * Handles the memory reclaim if a memory handle from the other world is |
| 332 | * provided. |
| 333 | */ |
| 334 | struct ffa_value plat_ffa_other_world_mem_reclaim( |
| 335 | struct vm *to, ffa_memory_handle_t handle, |
| 336 | ffa_memory_region_flags_t flags, struct mpool *page_pool); |
Madhukar Pappireddy | 2f76e49 | 2022-09-06 15:21:59 -0500 | [diff] [blame] | 337 | |
J-Alves | b5084cf | 2022-07-06 14:20:12 +0100 | [diff] [blame] | 338 | /** |
| 339 | * Handles the memory retrieve request if the specified memory handle belongs |
| 340 | * to the other world. |
| 341 | */ |
| 342 | struct ffa_value plat_ffa_other_world_mem_retrieve( |
| 343 | struct vm_locked to_locked, struct ffa_memory_region *retrieve_request, |
| 344 | uint32_t length, struct mpool *page_pool); |
J-Alves | fdd2927 | 2022-07-19 13:16:31 +0100 | [diff] [blame] | 345 | |
| 346 | /** |
| 347 | * Handles the continuation of the memory send operation in case the memory |
| 348 | * region descriptor contains multiple segments. |
| 349 | */ |
| 350 | struct ffa_value plat_ffa_other_world_mem_send_continue( |
| 351 | struct vm *from, void *fragment, uint32_t fragment_length, |
| 352 | ffa_memory_handle_t handle, struct mpool *page_pool); |
Madhukar Pappireddy | 5fd3248 | 2022-01-07 14:53:26 -0600 | [diff] [blame] | 353 | |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 354 | bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked); |
J-Alves | 27b7196 | 2022-12-12 15:29:58 +0000 | [diff] [blame] | 355 | |
| 356 | /** |
| 357 | * This FF-A v1.0 FFA_MSG_SEND interface. |
| 358 | * Implemented for the Hypervisor, but not in the SPMC. |
| 359 | */ |
| 360 | struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id, |
| 361 | ffa_vm_id_t receiver_vm_id, uint32_t size, |
| 362 | struct vcpu *current, struct vcpu **next); |
Madhukar Pappireddy | 1f2f213 | 2023-02-14 17:48:44 -0600 | [diff] [blame] | 363 | |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame^] | 364 | struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked, |
Madhukar Pappireddy | 184501c | 2023-05-23 17:24:06 -0500 | [diff] [blame] | 365 | struct vcpu **next, |
| 366 | uint32_t timeout_low, |
| 367 | uint32_t timeout_high); |