Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2021 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | #include "hf/ffa.h" |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 12 | #include "hf/manifest.h" |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 13 | #include "hf/vcpu.h" |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 14 | #include "hf/vm.h" |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 15 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 16 | /** |
| 17 | * The following enum relates to a state machine to guide the handling of the |
| 18 | * Scheduler Receiver Interrupt. |
| 19 | * The SRI is used to signal the receiver scheduler that there are pending |
| 20 | * notifications for the receiver, and it is sent when there is a valid call to |
| 21 | * FFA_NOTIFICATION_SET. |
| 22 | * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler, |
| 23 | * after which the FF-A driver should process the returned list, and request |
| 24 | * the receiver scheduler to give the receiver CPU cycles to process the |
| 25 | * notification. |
| 26 | * The use of the following state machine allows for synchronized sending |
| 27 | * and handling of the SRI, as well as avoiding the occurrence of spurious |
| 28 | * SRI. A spurious SRI would be one such that upon handling a call to |
| 29 | * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible |
| 30 | * in an MP system. |
| 31 | * The state machine also aims at resolving the delay of the SRI by setting |
| 32 | * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By |
| 33 | * delaying, the SRI is sent in context switching to the primary endpoint. |
| 34 | * The SPMC is implemented under the assumption the receiver scheduler is a |
| 35 | * NWd endpoint, hence the SRI is triggered at the world switch. |
| 36 | * If concurrently another notification is set that requires immediate action, |
| 37 | * the SRI is triggered immediately within that same execution context. |
| 38 | * |
| 39 | * HANDLED is the initial state, and means a new SRI can be sent. The following |
| 40 | * state transitions are possible: |
| 41 | * * HANDLED => DELAYED: Setting notification, and requesting SRI delay. |
| 42 | * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay. |
| 43 | * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the |
| 44 | * receiver scheduler is being done. |
| 45 | * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 46 | * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET. |
| 47 | */ |
| 48 | enum plat_ffa_sri_state { |
| 49 | HANDLED = 0, |
| 50 | DELAYED, |
| 51 | TRIGGERED, |
| 52 | }; |
| 53 | |
Daniel Boulby | 87b2dc8 | 2021-08-04 14:07:43 +0100 | [diff] [blame] | 54 | /** Returns information on features that are specific to the platform. */ |
J-Alves | 6f72ca8 | 2021-11-01 12:34:58 +0000 | [diff] [blame] | 55 | struct ffa_value plat_ffa_features(uint32_t function_feature_id); |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 56 | /** Returns the SPMC ID. */ |
| 57 | struct ffa_value plat_ffa_spmc_id_get(void); |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 58 | |
| 59 | void plat_ffa_log_init(void); |
| 60 | void plat_ffa_init(bool tee_enabled); |
| 61 | bool plat_ffa_is_direct_request_valid(struct vcpu *current, |
| 62 | ffa_vm_id_t sender_vm_id, |
| 63 | ffa_vm_id_t receiver_vm_id); |
| 64 | bool plat_ffa_is_direct_response_valid(struct vcpu *current, |
| 65 | ffa_vm_id_t sender_vm_id, |
| 66 | ffa_vm_id_t receiver_vm_id); |
| 67 | bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id, |
| 68 | struct ffa_value args, |
| 69 | struct ffa_value *ret); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 70 | bool plat_ffa_is_notifications_create_valid(struct vcpu *current, |
| 71 | ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 72 | |
| 73 | bool plat_ffa_is_notifications_bind_valid(struct vcpu *current, |
| 74 | ffa_vm_id_t sender_id, |
| 75 | ffa_vm_id_t receiver_id); |
J-Alves | b15e940 | 2021-09-08 11:44:42 +0100 | [diff] [blame] | 76 | bool plat_ffa_notifications_update_bindings_forward( |
| 77 | ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags, |
| 78 | ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 79 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 80 | bool plat_ffa_is_notification_set_valid(struct vcpu *current, |
| 81 | ffa_vm_id_t sender_id, |
| 82 | ffa_vm_id_t receiver_id); |
| 83 | |
J-Alves | de7bd2f | 2021-09-09 19:54:35 +0100 | [diff] [blame] | 84 | bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id, |
| 85 | ffa_vm_id_t receiver_vm_id, |
| 86 | uint32_t flags, |
| 87 | ffa_notifications_bitmap_t bitmap, |
| 88 | struct ffa_value *ret); |
| 89 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 90 | bool plat_ffa_is_notification_get_valid(struct vcpu *current, |
| 91 | ffa_vm_id_t receiver_id); |
| 92 | |
J-Alves | 98ff956 | 2021-09-09 14:39:41 +0100 | [diff] [blame] | 93 | bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked, |
| 94 | ffa_vcpu_index_t vcpu_id, |
| 95 | ffa_notifications_bitmap_t *from_sp, |
| 96 | struct ffa_value *ret); |
| 97 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 98 | bool plat_ffa_notifications_get_call(ffa_vm_id_t receiver_id, uint32_t vcpu_id, |
| 99 | uint32_t flags, struct ffa_value *ret); |
| 100 | |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 101 | /** |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 102 | * Checks whether managed exit is supported by given SP. |
| 103 | */ |
| 104 | bool plat_ffa_vm_managed_exit_supported(struct vm *vm); |
| 105 | |
| 106 | /** |
Olivier Deprez | 55a189e | 2021-06-09 15:45:27 +0200 | [diff] [blame] | 107 | * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec. |
| 108 | */ |
| 109 | ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index); |
| 110 | |
| 111 | /** |
| 112 | * Checks whether given handle was allocated by current world, according to |
| 113 | * handle encoding rules. |
| 114 | */ |
| 115 | bool plat_ffa_memory_handle_allocated_by_current_world( |
| 116 | ffa_memory_handle_t handle); |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 117 | |
| 118 | /** |
| 119 | * Return the FF-A partition info VM/SP properties given the VM id. |
| 120 | */ |
| 121 | ffa_partition_properties_t plat_ffa_partition_properties( |
J-Alves | fa78209 | 2021-10-13 16:02:16 +0100 | [diff] [blame] | 122 | ffa_vm_id_t vm_id, const struct vm *target); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 123 | |
| 124 | /** |
| 125 | * Get NWd VM's structure. |
| 126 | */ |
| 127 | struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id); |
| 128 | |
| 129 | /** |
| 130 | * Creates a bitmap for the VM of the given ID. |
| 131 | */ |
| 132 | struct ffa_value plat_ffa_notifications_bitmap_create( |
| 133 | ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count); |
| 134 | |
| 135 | /** |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 136 | * Issues a FFA_NOTIFICATION_BITMAP_CREATE. |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 137 | * Returns true if the call goes well, and false if call returns with |
| 138 | * FFA_ERROR_32. |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 139 | */ |
| 140 | bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id, |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 141 | ffa_vcpu_count_t vcpu_count); |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 142 | |
| 143 | /** |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 144 | * Destroys the notifications bitmap for the given VM ID. |
| 145 | */ |
| 146 | struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 147 | |
| 148 | /** |
| 149 | * Helper to get the struct notifications, depending on the sender's id. |
| 150 | */ |
| 151 | struct notifications *plat_ffa_vm_get_notifications_senders_world( |
| 152 | struct vm_locked vm_locked, ffa_vm_id_t sender_id); |
| 153 | |
| 154 | /** |
| 155 | * Helper to check if FF-A ID is a VM ID. |
| 156 | */ |
| 157 | bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id); |
Raghu Krishnamurthy | 62f97a7 | 2021-07-27 02:14:59 -0700 | [diff] [blame] | 158 | |
| 159 | /** |
| 160 | * Forward normal world calls of FFA_RUN ABI to other world. |
| 161 | */ |
| 162 | bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx, |
| 163 | struct ffa_value *ret); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 164 | |
| 165 | bool plat_ffa_notification_info_get_call(struct ffa_value *ret); |
| 166 | |
| 167 | bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count, |
| 168 | uint32_t *lists_sizes, |
| 169 | uint32_t *lists_count, |
| 170 | const uint32_t ids_count_max); |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 171 | |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 172 | /** Helper to set SRI current state. */ |
| 173 | void plat_ffa_sri_state_set(enum plat_ffa_sri_state state); |
| 174 | |
| 175 | /** |
| 176 | * Helper to send SRI and safely update `ffa_sri_state`, if there has been |
| 177 | * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed. |
| 178 | * To be called at a context switch to the NWd. |
| 179 | */ |
| 180 | void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu); |
| 181 | |
| 182 | /** |
| 183 | * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been |
| 184 | * delayed in call to FFA_NOTIFICATION_SET. |
| 185 | */ |
| 186 | void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu); |
| 187 | |
| 188 | /** |
| 189 | * Initialize Schedule Receiver Interrupts needed in the context of |
| 190 | * notifications support. |
| 191 | */ |
| 192 | void plat_ffa_sri_init(struct cpu *cpu); |
| 193 | |
J-Alves | ca058c2 | 2021-09-10 14:02:07 +0100 | [diff] [blame] | 194 | void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count, |
| 195 | uint32_t *lists_sizes, |
| 196 | uint32_t *lists_count, |
| 197 | const uint32_t ids_count_max); |
| 198 | |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 199 | bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current); |
| 200 | bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current); |
Madhukar Pappireddy | b11e0d1 | 2021-08-02 19:44:35 -0500 | [diff] [blame] | 201 | |
| 202 | /** |
| 203 | * Check if current SP can resume target VM/SP using FFA_RUN ABI. |
| 204 | */ |
| 205 | bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id, |
Raghu Krishnamurthy | 048d63f | 2021-12-11 12:45:41 -0800 | [diff] [blame] | 206 | ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret, |
| 207 | struct vcpu **next); |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 208 | |
| 209 | /** |
| 210 | * Deactivate interrupt. |
| 211 | */ |
| 212 | int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id, |
| 213 | struct vcpu *current); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 214 | |
| 215 | void plat_ffa_secure_interrupt(struct vcpu *current, struct vcpu **next); |
Madhukar Pappireddy | 9e7a11f | 2021-08-03 13:59:42 -0500 | [diff] [blame] | 216 | struct ffa_value plat_ffa_delegate_ffa_interrupt(struct vcpu *current, |
| 217 | struct vcpu **next); |
Madhukar Pappireddy | ed4ab94 | 2021-08-03 14:22:53 -0500 | [diff] [blame] | 218 | struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current, |
| 219 | struct vcpu **next); |
| 220 | struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current, |
| 221 | struct vcpu **next); |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 222 | |
| 223 | void plat_ffa_inject_notification_pending_interrupt_context_switch( |
| 224 | struct vcpu *next, struct vcpu *current); |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 225 | |
| 226 | void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid, |
Daniel Boulby | b46cad1 | 2021-12-13 17:47:21 +0000 | [diff] [blame] | 227 | const uint32_t flags, |
Olivier Deprez | e562e54 | 2020-06-11 17:31:54 +0200 | [diff] [blame] | 228 | struct ffa_partition_info *partitions, |
| 229 | ffa_vm_count_t *ret_count); |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 230 | |
| 231 | void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked, |
| 232 | paddr_t fdt_addr, |
| 233 | size_t fdt_allocated_size, |
| 234 | const struct manifest_vm *manifest_vm, |
| 235 | struct mpool *ppool); |