| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 9 | #include "hf/ffa/setup_and_discovery.h" |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 10 | |
| 11 | #include "hf/arch/other_world.h" |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 12 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 13 | #include "hf/check.h" |
| Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 14 | #include "hf/ffa/vm.h" |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 15 | #include "hf/manifest.h" |
| 16 | #include "hf/vm.h" |
| 17 | |
| 18 | #include "hypervisor.h" |
| 19 | #include "smc.h" |
| 20 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 21 | struct ffa_value ffa_setup_spmc_id_get(void) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 22 | { |
| 23 | if (plat_ffa_is_tee_enabled()) { |
| 24 | /* |
| 25 | * Fetch the SPMC ID from the SPMD using FFA_SPM_ID_GET. |
| 26 | * DEN0077A FF-A v1.1 Beta0 section 13.9.2 |
| 27 | * "FFA_SPM_ID_GET invocation at a non-secure physical FF-A |
| 28 | * instance returns the ID of the SPMC." |
| 29 | */ |
| 30 | return smc_ffa_call( |
| 31 | (struct ffa_value){.func = FFA_SPM_ID_GET_32}); |
| 32 | } |
| 33 | |
| 34 | return (struct ffa_value){.func = FFA_ERROR_32, |
| 35 | .arg2 = FFA_NOT_SUPPORTED}; |
| 36 | } |
| 37 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 38 | /** |
| 39 | * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the |
| 40 | * non-secure FF-A instances. |
| 41 | */ |
| 42 | bool ffa_setup_is_secondary_ep_register_supported(void) |
| 43 | { |
| 44 | return false; |
| 45 | } |
| 46 | |
| 47 | void ffa_setup_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 48 | { |
| 49 | struct ffa_value ret; |
| 50 | |
| 51 | ret = arch_other_world_call((struct ffa_value){.func = FFA_RXTX_MAP_64, |
| 52 | .arg1 = pa_addr(recv), |
| 53 | .arg2 = pa_addr(send), |
| 54 | .arg3 = page_count}); |
| 55 | CHECK(ret.func == FFA_SUCCESS_32); |
| 56 | } |
| 57 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 58 | void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 59 | { |
| 60 | struct vm *vm = vm_locked.vm; |
| 61 | struct vm *other_world; |
| 62 | |
| 63 | if (!plat_ffa_is_tee_enabled()) { |
| 64 | vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 65 | return; |
| 66 | } |
| 67 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 68 | if (!ffa_vm_supports_indirect_messages(vm)) { |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 69 | return; |
| 70 | } |
| 71 | |
| 72 | /* Hypervisor always forward the call to the SPMC. */ |
| 73 | |
| 74 | other_world = vm_find(HF_OTHER_WORLD_ID); |
| 75 | |
| 76 | /* Fill the buffers descriptor in SPMC's RX buffer. */ |
| 77 | ffa_endpoint_rx_tx_descriptor_init( |
| 78 | (struct ffa_endpoint_rx_tx_descriptor *) |
| 79 | other_world->mailbox.recv, |
| 80 | vm->id, (uintptr_t)vm->mailbox.recv, |
| 81 | (uintptr_t)vm->mailbox.send); |
| 82 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 83 | ffa_setup_rxtx_map_spmc(pa_init(0), pa_init(0), 0); |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 84 | |
| 85 | vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED; |
| 86 | |
| 87 | dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id); |
| 88 | } |
| 89 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 90 | void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 91 | { |
| 92 | struct ffa_value ret; |
| 93 | uint64_t func; |
| 94 | ffa_id_t id; |
| 95 | |
| 96 | assert(vm_locked.vm != NULL); |
| 97 | |
| 98 | id = vm_locked.vm->id; |
| 99 | |
| 100 | if (!plat_ffa_is_tee_enabled()) { |
| 101 | return; |
| 102 | } |
| 103 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 104 | if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) { |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 105 | return; |
| 106 | } |
| 107 | |
| 108 | /* Hypervisor always forwards forward RXTX_UNMAP to SPMC. */ |
| 109 | ret = arch_other_world_call( |
| 110 | (struct ffa_value){.func = FFA_RXTX_UNMAP_32, |
| 111 | .arg1 = id << FFA_RXTX_ALLOCATOR_SHIFT}); |
| 112 | func = ret.func & ~SMCCC_CONVENTION_MASK; |
| 113 | if (ret.func == (uint64_t)SMCCC_ERROR_UNKNOWN) { |
| 114 | panic("Unknown error forwarding RXTX_UNMAP.\n"); |
| 115 | } else if (func == FFA_ERROR_32) { |
| 116 | panic("Error %d forwarding RX/TX buffers.\n", ret.arg2); |
| 117 | } else if (func != FFA_SUCCESS_32) { |
| 118 | panic("Unexpected function %#x returned forwarding RX/TX " |
| 119 | "buffers.", |
| 120 | ret.func); |
| 121 | } |
| 122 | } |
| 123 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 124 | bool ffa_setup_partition_info_get_regs_forward_allowed(void) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 125 | { |
| 126 | /* |
| 127 | * Allow forwarding from the Hypervisor if TEE or SPMC exists and |
| 128 | * declared as such in the Hypervisor manifest. |
| 129 | */ |
| 130 | return plat_ffa_is_tee_enabled(); |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Forward helper for FFA_PARTITION_INFO_GET. |
| 135 | * Emits FFA_PARTITION_INFO_GET from Hypervisor to SPMC if allowed. |
| 136 | */ |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 137 | ffa_vm_count_t ffa_setup_partition_info_get_forward( |
| Karl Meakin | 86c8027 | 2024-07-26 15:13:23 +0100 | [diff] [blame] | 138 | const struct ffa_uuid *uuid, uint32_t flags, |
| 139 | struct ffa_partition_info *partitions, ffa_vm_count_t vm_count) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 140 | { |
| 141 | const struct vm *tee = vm_find(HF_TEE_VM_ID); |
| 142 | struct ffa_partition_info *tee_partitions; |
| 143 | ffa_vm_count_t tee_partitions_count; |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 144 | struct ffa_value ret; |
| 145 | |
| 146 | CHECK(tee != NULL); |
| 147 | CHECK(vm_count < MAX_VMS); |
| 148 | |
| 149 | /* |
| 150 | * Allow forwarding from the Hypervisor if TEE or SPMC exists and |
| 151 | * declared as such in the Hypervisor manifest. |
| 152 | */ |
| 153 | if (!plat_ffa_is_tee_enabled()) { |
| Karl Meakin | 86c8027 | 2024-07-26 15:13:23 +0100 | [diff] [blame] | 154 | return vm_count; |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | ret = arch_other_world_call( |
| 158 | (struct ffa_value){.func = FFA_PARTITION_INFO_GET_32, |
| 159 | .arg1 = uuid->uuid[0], |
| 160 | .arg2 = uuid->uuid[1], |
| 161 | .arg3 = uuid->uuid[2], |
| 162 | .arg4 = uuid->uuid[3], |
| 163 | .arg5 = flags}); |
| 164 | if (ffa_func_id(ret) != FFA_SUCCESS_32) { |
| 165 | dlog_verbose( |
| 166 | "Failed forwarding FFA_PARTITION_INFO_GET to " |
| 167 | "the SPMC.\n"); |
| Karl Meakin | 86c8027 | 2024-07-26 15:13:23 +0100 | [diff] [blame] | 168 | return vm_count; |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | tee_partitions_count = ffa_partition_info_get_count(ret); |
| 172 | if (tee_partitions_count == 0 || tee_partitions_count > MAX_VMS) { |
| 173 | dlog_verbose("Invalid number of SPs returned by the SPMC.\n"); |
| Karl Meakin | 86c8027 | 2024-07-26 15:13:23 +0100 | [diff] [blame] | 174 | return vm_count; |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | if ((flags & FFA_PARTITION_COUNT_FLAG_MASK) == |
| 178 | FFA_PARTITION_COUNT_FLAG) { |
| 179 | vm_count += tee_partitions_count; |
| 180 | } else { |
| 181 | tee_partitions = (struct ffa_partition_info *)tee->mailbox.send; |
| 182 | for (ffa_vm_count_t index = 0; index < tee_partitions_count; |
| 183 | index++) { |
| 184 | partitions[vm_count] = tee_partitions[index]; |
| 185 | ++vm_count; |
| 186 | } |
| 187 | |
| 188 | /* Release the RX buffer. */ |
| 189 | ret = arch_other_world_call( |
| 190 | (struct ffa_value){.func = FFA_RX_RELEASE_32}); |
| 191 | CHECK(ret.func == FFA_SUCCESS_32); |
| 192 | } |
| 193 | |
| Karl Meakin | 86c8027 | 2024-07-26 15:13:23 +0100 | [diff] [blame] | 194 | return vm_count; |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 195 | } |
| 196 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 197 | void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked, |
| 198 | paddr_t fdt_addr, |
| 199 | size_t fdt_allocated_size, |
| 200 | const struct manifest_vm *manifest_vm, |
| 201 | const struct boot_params *boot_params, |
| 202 | struct mpool *ppool) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 203 | { |
| 204 | struct fdt partition_fdt; |
| 205 | |
| 206 | /* |
| 207 | * If the partition is an FF-A partition and is not |
| 208 | * hypervisor loaded, the manifest is passed in the |
| 209 | * partition package and is parsed during |
| 210 | * manifest_init() and secondary fdt should be empty. |
| 211 | */ |
| 212 | CHECK(manifest_vm->is_hyp_loaded); |
| 213 | CHECK(mm_identity_map(stage1_locked, fdt_addr, |
| 214 | pa_add(fdt_addr, fdt_allocated_size), MM_MODE_R, |
| 215 | ppool) != NULL); |
| 216 | // NOLINTNEXTLINE(performance-no-int-to-ptr) |
| 217 | CHECK(fdt_init_from_ptr(&partition_fdt, (void *)pa_addr(fdt_addr), |
| 218 | fdt_allocated_size) == true); |
| 219 | CHECK(parse_ffa_manifest(&partition_fdt, |
| 220 | (struct manifest_vm *)manifest_vm, NULL, |
| 221 | boot_params) == MANIFEST_SUCCESS); |
| 222 | CHECK(mm_unmap(stage1_locked, fdt_addr, |
| 223 | pa_add(fdt_addr, fdt_allocated_size), ppool) == true); |
| 224 | } |
| 225 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 226 | ffa_partition_properties_t ffa_setup_partition_properties( |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 227 | ffa_id_t caller_id, const struct vm *target) |
| 228 | { |
| 229 | ffa_partition_properties_t result = target->messaging_method; |
| 230 | /* |
| 231 | * VMs support indirect messaging only in the Normal World. |
| 232 | * Primary VM cannot receive direct requests. |
| 233 | * Secondary VMs cannot send direct requests. |
| 234 | */ |
| 235 | if (!vm_id_is_current_world(caller_id)) { |
| 236 | result &= ~FFA_PARTITION_INDIRECT_MSG; |
| 237 | } |
| 238 | if (vm_is_primary(target)) { |
| 239 | result &= ~FFA_PARTITION_DIRECT_REQ_RECV; |
| 240 | } else { |
| 241 | result &= ~FFA_PARTITION_DIRECT_REQ_SEND; |
| 242 | } |
| 243 | return result; |
| 244 | } |
| 245 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 246 | bool ffa_setup_rx_release_forward(struct vm_locked vm_locked, |
| 247 | struct ffa_value *ret) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 248 | { |
| 249 | struct vm *vm = vm_locked.vm; |
| 250 | ffa_id_t vm_id = vm->id; |
| 251 | |
| 252 | if (!plat_ffa_is_tee_enabled() || |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 253 | !ffa_vm_supports_indirect_messages(vm)) { |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 254 | return false; |
| 255 | } |
| 256 | |
| 257 | CHECK(vm_id_is_current_world(vm_id)); |
| 258 | |
| 259 | /* Hypervisor always forward VM's RX_RELEASE to SPMC. */ |
| 260 | *ret = arch_other_world_call( |
| 261 | (struct ffa_value){.func = FFA_RX_RELEASE_32, .arg1 = vm_id}); |
| 262 | |
| 263 | if (ret->func == FFA_SUCCESS_32) { |
| 264 | /* |
| 265 | * The SPMC owns the VM's RX buffer after a successful |
| 266 | * FFA_RX_RELEASE call. |
| 267 | */ |
| 268 | vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED; |
| 269 | } else { |
| 270 | dlog_verbose("FFA_RX_RELEASE forwarded failed for VM ID %#x.\n", |
| 271 | vm_locked.vm->id); |
| 272 | } |
| 273 | |
| 274 | return true; |
| 275 | } |
| 276 | |
| 277 | /** |
| 278 | * Acquire the RX buffer of a VM from the SPM. |
| 279 | * |
| 280 | * VM RX/TX buffers must have been previously mapped in the SPM either |
| 281 | * by forwarding VM's RX_TX_MAP API or another way if buffers were |
| 282 | * declared in manifest. |
| 283 | * |
| 284 | * Returns true if the ownership belongs to the hypervisor. |
| 285 | */ |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 286 | bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked, |
| 287 | struct ffa_value *ret) |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 288 | { |
| 289 | struct ffa_value other_world_ret; |
| 290 | |
| 291 | /* |
| 292 | * Do not forward the call if either: |
| 293 | * - The TEE is not present. |
| 294 | * - The VM's version is not FF-A v1.1. |
| 295 | * - If the mailbox ownership hasn't been transferred to the SPMC. |
| 296 | */ |
| 297 | if (!plat_ffa_is_tee_enabled() || |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 298 | !ffa_vm_supports_indirect_messages(to_locked.vm) || |
| Karl Meakin | 48e049c | 2024-07-25 18:07:41 +0100 | [diff] [blame] | 299 | to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) { |
| 300 | return true; |
| 301 | } |
| 302 | |
| 303 | other_world_ret = arch_other_world_call((struct ffa_value){ |
| 304 | .func = FFA_RX_ACQUIRE_32, .arg1 = to_locked.vm->id}); |
| 305 | |
| 306 | if (ret != NULL) { |
| 307 | *ret = other_world_ret; |
| 308 | } |
| 309 | |
| 310 | if (other_world_ret.func == FFA_SUCCESS_32) { |
| 311 | to_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 312 | } |
| 313 | |
| 314 | return other_world_ret.func == FFA_SUCCESS_32; |
| 315 | } |