Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 9 | #include "hf/ffa/ffa_memory.h" |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 10 | |
| 11 | #include "hf/arch/other_world.h" |
| 12 | |
Karl Meakin | d8c9fff | 2025-02-10 12:26:48 +0000 | [diff] [blame^] | 13 | #include "hf/ffa/init.h" |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 14 | #include "hf/ffa_internal.h" |
| 15 | #include "hf/ffa_memory_internal.h" |
| 16 | #include "hf/std.h" |
| 17 | #include "hf/vm.h" |
| 18 | |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 19 | #include "sysregs.h" |
| 20 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 21 | enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void) |
Karl Meakin | 1a760e7 | 2024-07-25 18:58:37 +0100 | [diff] [blame] | 22 | { |
| 23 | return FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR; |
| 24 | } |
| 25 | |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 26 | static struct ffa_value ffa_other_world_mem_reclaim( |
| 27 | ffa_memory_handle_t handle, ffa_memory_region_flags_t flags) |
| 28 | { |
| 29 | return arch_other_world_call((struct ffa_value){ |
| 30 | .func = FFA_MEM_RECLAIM_32, |
| 31 | .arg1 = (uint32_t)handle, |
| 32 | .arg2 = (uint32_t)(handle >> 32), |
| 33 | .arg3 = flags, |
| 34 | }); |
| 35 | } |
| 36 | |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 37 | /** |
| 38 | * Check validity of the FF-A memory send function attempt. |
| 39 | */ |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 40 | bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender, |
| 41 | uint32_t share_func, bool multiple_borrower) |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 42 | { |
| 43 | /* |
| 44 | * Currently memory interfaces are not forwarded from hypervisor to |
| 45 | * SPMC. However, in absence of SPMC this function should allow |
| 46 | * NS-endpoint to SP memory send in order for trusty tests to work. |
| 47 | */ |
| 48 | |
| 49 | (void)share_func; |
| 50 | (void)receiver; |
| 51 | (void)sender; |
| 52 | (void)multiple_borrower; |
| 53 | |
| 54 | return true; |
| 55 | } |
| 56 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 57 | uint32_t ffa_memory_get_other_world_mode(void) |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 58 | { |
| 59 | return 0U; |
| 60 | } |
| 61 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 62 | bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current) |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 63 | { |
| 64 | (void)current; |
| 65 | return has_vhe_support(); |
| 66 | } |
| 67 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 68 | bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current) |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 69 | { |
| 70 | (void)current; |
| 71 | return has_vhe_support(); |
| 72 | } |
| 73 | |
| 74 | /** Forwards a memory send message on to the other world. */ |
| 75 | static struct ffa_value memory_send_other_world_forward( |
| 76 | struct vm_locked other_world_locked, uint32_t share_func, |
| 77 | struct ffa_memory_region *memory_region, uint32_t memory_share_length, |
| 78 | uint32_t fragment_length) |
| 79 | { |
| 80 | struct ffa_value ret; |
| 81 | |
| 82 | /* Use its own RX buffer. */ |
| 83 | memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, |
| 84 | memory_region, fragment_length); |
| 85 | |
| 86 | other_world_locked.vm->mailbox.recv_func = share_func; |
| 87 | other_world_locked.vm->mailbox.state = MAILBOX_STATE_FULL; |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 88 | ret = arch_other_world_call((struct ffa_value){ |
| 89 | .func = share_func, |
| 90 | .arg1 = memory_share_length, |
| 91 | .arg2 = fragment_length, |
| 92 | }); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 93 | /* |
| 94 | * After the call to the other world completes it must have finished |
| 95 | * reading its RX buffer, so it is ready for another message. |
| 96 | */ |
| 97 | other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 98 | |
| 99 | return ret; |
| 100 | } |
| 101 | |
| 102 | /** |
| 103 | * Validates a call to donate, lend or share memory to the other world and then |
| 104 | * updates the stage-2 page tables. Specifically, check if the message length |
| 105 | * and number of memory region constituents match, and if the transition is |
| 106 | * valid for the type of memory sending operation. |
| 107 | * |
| 108 | * Assumes that the caller has already found and locked the sender VM and the |
| 109 | * other world VM, and copied the memory region descriptor from the sender's TX |
| 110 | * buffer to a freshly allocated page from Hafnium's internal pool. The caller |
| 111 | * must have also validated that the receiver VM ID is valid. |
| 112 | * |
| 113 | * This function takes ownership of the `memory_region` passed in and will free |
| 114 | * it when necessary; it must not be freed by the caller. |
| 115 | */ |
| 116 | static struct ffa_value ffa_memory_other_world_send( |
| 117 | struct vm_locked from_locked, struct vm_locked to_locked, |
| 118 | struct ffa_memory_region *memory_region, uint32_t memory_share_length, |
| 119 | uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool) |
| 120 | { |
| 121 | ffa_memory_handle_t handle; |
| 122 | struct share_states_locked share_states; |
| 123 | struct ffa_memory_share_state *share_state; |
| 124 | struct ffa_value ret; |
| 125 | struct ffa_value reclaim_ret; |
| 126 | (void)reclaim_ret; |
| 127 | |
| 128 | /* |
| 129 | * If there is an error validating the `memory_region` then we need to |
| 130 | * free it because we own it but we won't be storing it in a share state |
| 131 | * after all. |
| 132 | */ |
| 133 | ret = ffa_memory_send_validate(from_locked, memory_region, |
| 134 | memory_share_length, fragment_length, |
| 135 | share_func); |
| 136 | if (ret.func != FFA_SUCCESS_32) { |
| 137 | goto out_err; |
| 138 | } |
| 139 | |
| 140 | share_states = share_states_lock(); |
| 141 | |
| 142 | if (fragment_length == memory_share_length) { |
| 143 | /* No more fragments to come, everything fits in one message. */ |
| 144 | |
| 145 | /* Forward memory send message on to other world. */ |
| 146 | ret = memory_send_other_world_forward( |
| 147 | to_locked, share_func, memory_region, |
| 148 | memory_share_length, fragment_length); |
| 149 | if (ret.func != FFA_SUCCESS_32) { |
| 150 | dlog_verbose( |
| 151 | "%s: failed to forward memory send message to " |
| 152 | "other world: %s(%s).\n", |
| 153 | __func__, ffa_func_name(ret.func), |
| 154 | ffa_error_name(ffa_error_code(ret))); |
| 155 | goto out; |
| 156 | } |
| 157 | |
| 158 | handle = ffa_mem_success_handle(ret); |
| 159 | share_state = allocate_share_state(share_states, share_func, |
| 160 | memory_region, |
| 161 | fragment_length, handle); |
| 162 | if (share_state == NULL) { |
| 163 | dlog_verbose("%s: failed to allocate share state.\n", |
| 164 | __func__); |
| 165 | ret = ffa_error(FFA_NO_MEMORY); |
| 166 | |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 167 | reclaim_ret = ffa_other_world_mem_reclaim(handle, 0); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 168 | assert(reclaim_ret.func == FFA_SUCCESS_32); |
| 169 | goto out; |
| 170 | } |
| 171 | |
| 172 | ret = ffa_memory_send_complete(from_locked, share_states, |
| 173 | share_state, page_pool, |
| 174 | &share_state->sender_orig_mode); |
| 175 | if (ret.func != FFA_SUCCESS_32) { |
| 176 | dlog_verbose( |
| 177 | "%s: failed to complete memory send: %s(%s).\n", |
| 178 | __func__, ffa_func_name(ret.func), |
| 179 | ffa_error_name(ffa_error_code(ret))); |
| 180 | |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 181 | reclaim_ret = ffa_other_world_mem_reclaim(handle, 0); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 182 | assert(reclaim_ret.func == FFA_SUCCESS_32); |
| 183 | goto out; |
| 184 | } |
| 185 | /* |
| 186 | * Don't free the memory region fragment, as it has been stored |
| 187 | * in the share state. |
| 188 | */ |
| 189 | memory_region = NULL; |
| 190 | } else { |
| 191 | /* More fragments remaining, fragmented message. */ |
| 192 | dlog_verbose("%s: more fragments remaining: %d/%d\n", __func__, |
| 193 | fragment_length, memory_share_length); |
| 194 | |
| 195 | /* |
| 196 | * We need to wait for the rest of the fragments before we can |
| 197 | * check whether the transaction is valid and unmap the memory. |
| 198 | * Call the other world so it can do its initial validation and |
| 199 | * assign a handle, and allocate a share state to keep what we |
| 200 | * have so far. |
| 201 | */ |
| 202 | ret = memory_send_other_world_forward( |
| 203 | to_locked, share_func, memory_region, |
| 204 | memory_share_length, fragment_length); |
| 205 | if (ret.func != FFA_MEM_FRAG_RX_32) { |
| 206 | dlog_warning( |
| 207 | "%s: failed to forward to other world: " |
| 208 | "%s(%s)\n", |
| 209 | __func__, ffa_func_name(ret.func), |
| 210 | ffa_error_name(ffa_error_code(ret))); |
| 211 | goto out; |
| 212 | } |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 213 | if (ret.arg3 != fragment_length) { |
| 214 | dlog_warning( |
| 215 | "%s: got unexpected fragment offset for %s " |
| 216 | "from other world (expected %d, got %lu)\n", |
| 217 | __func__, ffa_func_name(FFA_MEM_FRAG_RX_32), |
| 218 | fragment_length, ret.arg3); |
| 219 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 220 | goto out; |
| 221 | } |
| 222 | if (ffa_frag_sender(ret) != from_locked.vm->id) { |
| 223 | dlog_warning( |
| 224 | "%s: got unexpected sender ID for %s from " |
| 225 | "other world (expected %d, got %d)\n", |
| 226 | __func__, ffa_func_name(FFA_MEM_FRAG_RX_32), |
| 227 | from_locked.vm->id, ffa_frag_sender(ret)); |
| 228 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 229 | goto out; |
| 230 | } |
| 231 | handle = ffa_frag_handle(ret); |
| 232 | share_state = allocate_share_state(share_states, share_func, |
| 233 | memory_region, |
| 234 | fragment_length, handle); |
| 235 | if (share_state == NULL) { |
| 236 | dlog_verbose("%s: failed to allocate share state.\n", |
| 237 | __func__); |
| 238 | ret = ffa_error(FFA_NO_MEMORY); |
| 239 | |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 240 | reclaim_ret = ffa_other_world_mem_reclaim(handle, 0); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 241 | assert(reclaim_ret.func == FFA_SUCCESS_32); |
| 242 | goto out; |
| 243 | } |
| 244 | ret = (struct ffa_value){ |
| 245 | .func = FFA_MEM_FRAG_RX_32, |
| 246 | .arg1 = (uint32_t)handle, |
| 247 | .arg2 = (uint32_t)(handle >> 32), |
| 248 | .arg3 = fragment_length, |
| 249 | }; |
| 250 | /* |
| 251 | * Don't free the memory region fragment, as it has been stored |
| 252 | * in the share state. |
| 253 | */ |
| 254 | memory_region = NULL; |
| 255 | } |
| 256 | |
| 257 | out: |
| 258 | share_states_unlock(&share_states); |
| 259 | out_err: |
| 260 | if (memory_region != NULL) { |
| 261 | mpool_free(page_pool, memory_region); |
| 262 | } |
| 263 | return ret; |
| 264 | } |
| 265 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 266 | struct ffa_value ffa_memory_other_world_mem_send( |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 267 | struct vm *from, uint32_t share_func, |
| 268 | struct ffa_memory_region **memory_region, uint32_t length, |
| 269 | uint32_t fragment_length, struct mpool *page_pool) |
| 270 | { |
| 271 | struct vm *to; |
| 272 | struct ffa_value ret; |
| 273 | |
| 274 | to = vm_find(HF_OTHER_WORLD_ID); |
| 275 | |
| 276 | /* |
| 277 | * The 'to' VM lock is only needed in the case that it is the |
| 278 | * TEE VM. |
| 279 | */ |
| 280 | struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from); |
| 281 | |
| 282 | /* Check if the `to` VM has the mailbox busy. */ |
| 283 | if (vm_is_mailbox_busy(vm_to_from_lock.vm1)) { |
| 284 | dlog_verbose("The other world VM has a message. %x\n", |
| 285 | vm_to_from_lock.vm1.vm->id); |
| 286 | ret = ffa_error(FFA_BUSY); |
| 287 | } else { |
| 288 | ret = ffa_memory_other_world_send( |
| 289 | vm_to_from_lock.vm2, vm_to_from_lock.vm1, |
| 290 | *memory_region, length, fragment_length, share_func, |
| 291 | page_pool); |
| 292 | /* |
| 293 | * ffa_other_world_memory_send takes ownership of the |
| 294 | * memory_region, so make sure we don't free it. |
| 295 | */ |
| 296 | *memory_region = NULL; |
| 297 | } |
| 298 | |
| 299 | vm_unlock(&vm_to_from_lock.vm1); |
| 300 | vm_unlock(&vm_to_from_lock.vm2); |
| 301 | |
| 302 | return ret; |
| 303 | } |
| 304 | |
| 305 | /** |
| 306 | * Validates that the reclaim transition is allowed for the memory region with |
| 307 | * the given handle which was previously shared with the SPMC. Tells the |
| 308 | * SPMC to mark it as reclaimed, and updates the page table of the reclaiming |
| 309 | * VM. |
| 310 | * |
| 311 | * To do this information about the memory region is first fetched from the |
| 312 | * SPMC. |
| 313 | */ |
| 314 | static struct ffa_value ffa_memory_other_world_reclaim( |
| 315 | struct vm_locked to_locked, ffa_memory_handle_t handle, |
| 316 | ffa_memory_region_flags_t flags, struct mpool *page_pool) |
| 317 | { |
| 318 | struct share_states_locked share_states; |
| 319 | struct ffa_memory_share_state *share_state; |
| 320 | struct ffa_memory_region *memory_region; |
| 321 | struct ffa_value ret; |
| 322 | |
| 323 | dump_share_states(); |
| 324 | |
| 325 | share_states = share_states_lock(); |
| 326 | |
| 327 | share_state = get_share_state(share_states, handle); |
| 328 | if (share_state == NULL) { |
| 329 | dlog_verbose("Unable to find share state for handle %#lx.\n", |
| 330 | handle); |
| 331 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 332 | goto out; |
| 333 | } |
| 334 | memory_region = share_state->memory_region; |
| 335 | |
| 336 | CHECK(memory_region != NULL); |
| 337 | |
| 338 | if (vm_id_is_current_world(to_locked.vm->id) && |
| 339 | to_locked.vm->id != memory_region->sender) { |
| 340 | dlog_verbose( |
| 341 | "VM %#x attempted to reclaim memory handle %#lx " |
| 342 | "originally sent by VM %#x.\n", |
| 343 | to_locked.vm->id, handle, memory_region->sender); |
| 344 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 345 | goto out; |
| 346 | } |
| 347 | |
| 348 | if (!share_state->sending_complete) { |
| 349 | dlog_verbose( |
| 350 | "Memory with handle %#lx not fully sent, can't " |
| 351 | "reclaim.\n", |
| 352 | handle); |
| 353 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 354 | goto out; |
| 355 | } |
| 356 | |
| 357 | for (uint32_t i = 0; i < memory_region->receiver_count; i++) { |
| 358 | struct ffa_memory_access *receiver = |
| 359 | ffa_memory_region_get_receiver(memory_region, i); |
| 360 | struct ffa_memory_region_attributes receiver_permissions; |
| 361 | |
| 362 | CHECK(receiver != NULL); |
| 363 | |
| 364 | receiver_permissions = receiver->receiver_permissions; |
| 365 | |
| 366 | /* Skip the entries that relate to SPs. */ |
| 367 | if (!ffa_is_vm_id(receiver_permissions.receiver)) { |
| 368 | continue; |
| 369 | } |
| 370 | |
| 371 | /* Check that all VMs have relinquished. */ |
| 372 | if (share_state->retrieved_fragment_count[i] != 0) { |
| 373 | dlog_verbose( |
| 374 | "Tried to reclaim memory handle %#lx " |
| 375 | "that has not been relinquished by all " |
| 376 | "borrowers(%x).\n", |
| 377 | handle, receiver_permissions.receiver); |
| 378 | ret = ffa_error(FFA_DENIED); |
| 379 | goto out; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | /* |
| 384 | * Call to the SPMC, for it to free the memory state tracking |
| 385 | * structures. This can fail if the SPs haven't finished using the |
| 386 | * memory. |
| 387 | */ |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 388 | ret = ffa_other_world_mem_reclaim(handle, flags); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 389 | |
| 390 | if (ret.func != FFA_SUCCESS_32) { |
| 391 | dlog_verbose( |
| 392 | "FFA_MEM_RECLAIM returned an error. Expected " |
| 393 | "FFA_SUCCESS, got %s (%s)\n", |
| 394 | ffa_func_name(ret.func), ffa_error_name(ret.arg2)); |
| 395 | goto out; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * Masking the CLEAR flag, as this operation was expected to have been |
| 400 | * done by the SPMC. |
| 401 | */ |
| 402 | flags &= ~FFA_MEMORY_REGION_FLAG_CLEAR; |
| 403 | ret = ffa_retrieve_check_update( |
| 404 | to_locked, share_state->fragments, |
| 405 | share_state->fragment_constituent_counts, |
| 406 | share_state->fragment_count, share_state->sender_orig_mode, |
| 407 | FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool, |
| 408 | NULL, false); |
| 409 | |
| 410 | if (ret.func == FFA_SUCCESS_32) { |
| 411 | share_state_free(share_states, share_state, page_pool); |
| 412 | dlog_verbose("Freed share state after successful reclaim.\n"); |
| 413 | } |
| 414 | |
| 415 | out: |
| 416 | share_states_unlock(&share_states); |
| 417 | return ret; |
| 418 | } |
| 419 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 420 | struct ffa_value ffa_memory_other_world_mem_reclaim( |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 421 | struct vm *to, ffa_memory_handle_t handle, |
| 422 | ffa_memory_region_flags_t flags, struct mpool *page_pool) |
| 423 | { |
| 424 | struct ffa_value ret; |
| 425 | struct vm *from = vm_find(HF_TEE_VM_ID); |
| 426 | struct two_vm_locked vm_to_from_lock; |
| 427 | |
| 428 | if (!plat_ffa_is_tee_enabled()) { |
| 429 | dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n", |
| 430 | handle); |
| 431 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 432 | } |
| 433 | |
| 434 | vm_to_from_lock = vm_lock_both(to, from); |
| 435 | |
| 436 | ret = ffa_memory_other_world_reclaim(vm_to_from_lock.vm1, handle, flags, |
| 437 | page_pool); |
| 438 | |
| 439 | vm_unlock(&vm_to_from_lock.vm1); |
| 440 | vm_unlock(&vm_to_from_lock.vm2); |
| 441 | |
| 442 | return ret; |
| 443 | } |
| 444 | |
| 445 | /** |
| 446 | * Forwards a memory send continuation message on to the other world. |
| 447 | */ |
| 448 | static struct ffa_value memory_send_continue_other_world_forward( |
| 449 | struct vm_locked other_world_locked, ffa_id_t sender_vm_id, |
| 450 | void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle) |
| 451 | { |
| 452 | struct ffa_value ret; |
| 453 | |
| 454 | memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, |
| 455 | fragment, fragment_length); |
| 456 | |
| 457 | other_world_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32; |
| 458 | other_world_locked.vm->mailbox.state = MAILBOX_STATE_FULL; |
| 459 | ret = arch_other_world_call( |
| 460 | (struct ffa_value){.func = FFA_MEM_FRAG_TX_32, |
| 461 | .arg1 = (uint32_t)handle, |
| 462 | .arg2 = (uint32_t)(handle >> 32), |
| 463 | .arg3 = fragment_length, |
| 464 | .arg4 = (uint64_t)sender_vm_id << 16}); |
| 465 | |
| 466 | /* |
| 467 | * After the call to the other world completes it must have finished |
| 468 | * reading its RX buffer, so it is ready for another message. |
| 469 | */ |
| 470 | other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 471 | |
| 472 | return ret; |
| 473 | } |
| 474 | |
| 475 | /** |
| 476 | * Continues an operation to donate, lend or share memory to the other world VM. |
| 477 | * If this is the last fragment then checks that the transition is valid for the |
| 478 | * type of memory sending operation and updates the stage-2 page tables of the |
| 479 | * sender. |
| 480 | * |
| 481 | * Assumes that the caller has already found and locked the sender VM and copied |
| 482 | * the memory region descriptor from the sender's TX buffer to a freshly |
| 483 | * allocated page from Hafnium's internal pool. |
| 484 | * |
| 485 | * This function takes ownership of the `memory_region` passed in and will free |
| 486 | * it when necessary; it must not be freed by the caller. |
| 487 | */ |
| 488 | static struct ffa_value ffa_memory_other_world_send_continue( |
| 489 | struct vm_locked from_locked, struct vm_locked to_locked, |
| 490 | void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle, |
| 491 | struct mpool *page_pool) |
| 492 | { |
| 493 | struct share_states_locked share_states = share_states_lock(); |
| 494 | struct ffa_memory_share_state *share_state; |
| 495 | struct ffa_value ret; |
| 496 | struct ffa_memory_region *memory_region; |
| 497 | |
| 498 | ret = ffa_memory_send_continue_validate(share_states, handle, |
| 499 | &share_state, |
| 500 | from_locked.vm->id, page_pool); |
| 501 | if (ret.func != FFA_SUCCESS_32) { |
| 502 | goto out_free_fragment; |
| 503 | } |
| 504 | memory_region = share_state->memory_region; |
| 505 | |
| 506 | if (!memory_region_receivers_from_other_world(memory_region)) { |
| 507 | dlog_error( |
| 508 | "Got SPM-allocated handle for memory send to non-other " |
| 509 | "world VM. This should never happen, and indicates a " |
| 510 | "bug.\n"); |
| 511 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 512 | goto out_free_fragment; |
| 513 | } |
| 514 | |
| 515 | if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY || |
| 516 | to_locked.vm->mailbox.recv == NULL) { |
| 517 | /* |
| 518 | * If the other_world RX buffer is not available, tell the |
| 519 | * sender to retry by returning the current offset again. |
| 520 | */ |
| 521 | ret = (struct ffa_value){ |
| 522 | .func = FFA_MEM_FRAG_RX_32, |
| 523 | .arg1 = (uint32_t)handle, |
| 524 | .arg2 = (uint32_t)(handle >> 32), |
| 525 | .arg3 = share_state_next_fragment_offset(share_states, |
| 526 | share_state), |
| 527 | }; |
| 528 | goto out_free_fragment; |
| 529 | } |
| 530 | |
| 531 | /* Add this fragment. */ |
| 532 | share_state->fragments[share_state->fragment_count] = fragment; |
| 533 | share_state->fragment_constituent_counts[share_state->fragment_count] = |
| 534 | fragment_length / sizeof(struct ffa_memory_region_constituent); |
| 535 | share_state->fragment_count++; |
| 536 | |
| 537 | /* Check whether the memory send operation is now ready to complete. */ |
| 538 | if (share_state_sending_complete(share_states, share_state)) { |
| 539 | struct mpool local_page_pool; |
| 540 | |
| 541 | /* |
| 542 | * Use a local page pool so that we can roll back if necessary. |
| 543 | */ |
| 544 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 545 | |
| 546 | ret = ffa_memory_send_complete(from_locked, share_states, |
| 547 | share_state, &local_page_pool, |
| 548 | &share_state->sender_orig_mode); |
| 549 | |
| 550 | if (ret.func == FFA_SUCCESS_32) { |
| 551 | /* |
| 552 | * Forward final fragment on to the other_world so that |
| 553 | * it can complete the memory sending operation. |
| 554 | */ |
| 555 | ret = memory_send_continue_other_world_forward( |
| 556 | to_locked, from_locked.vm->id, fragment, |
| 557 | fragment_length, handle); |
| 558 | |
| 559 | if (ret.func != FFA_SUCCESS_32) { |
| 560 | /* |
| 561 | * The error will be passed on to the caller, |
| 562 | * but log it here too. |
| 563 | */ |
| 564 | dlog_verbose( |
| 565 | "other_world didn't successfully " |
| 566 | "complete " |
| 567 | "memory send operation; returned %#lx " |
| 568 | "(%lu). Rolling back.\n", |
| 569 | ret.func, ret.arg2); |
| 570 | |
| 571 | /* |
| 572 | * The other_world failed to complete the send |
| 573 | * operation, so roll back the page table update |
| 574 | * for the VM. This can't fail because it won't |
| 575 | * try to allocate more memory than was freed |
| 576 | * into the `local_page_pool` by |
| 577 | * `ffa_send_check_update` in the initial |
| 578 | * update. |
| 579 | */ |
| 580 | CHECK(ffa_region_group_identity_map( |
| 581 | from_locked, |
| 582 | share_state->fragments, |
| 583 | share_state |
| 584 | ->fragment_constituent_counts, |
| 585 | share_state->fragment_count, |
| 586 | share_state->sender_orig_mode, |
| 587 | &local_page_pool, |
| 588 | MAP_ACTION_COMMIT, NULL) |
| 589 | .func == FFA_SUCCESS_32); |
| 590 | } |
| 591 | } else { |
| 592 | /* Abort sending to other_world. */ |
| 593 | struct ffa_value other_world_ret = |
Karl Meakin | 3d32eef | 2024-11-25 16:40:09 +0000 | [diff] [blame] | 594 | ffa_other_world_mem_reclaim(handle, 0); |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 595 | |
| 596 | if (other_world_ret.func != FFA_SUCCESS_32) { |
| 597 | /* |
| 598 | * Nothing we can do if other_world doesn't |
| 599 | * abort properly, just log it. |
| 600 | */ |
| 601 | dlog_verbose( |
| 602 | "other_world didn't successfully abort " |
| 603 | "failed memory send operation; " |
| 604 | "returned %#lx %lu).\n", |
| 605 | other_world_ret.func, |
| 606 | other_world_ret.arg2); |
| 607 | } |
| 608 | /* |
| 609 | * We don't need to free the share state in this case |
| 610 | * because ffa_memory_send_complete does that already. |
| 611 | */ |
| 612 | } |
| 613 | |
| 614 | mpool_fini(&local_page_pool); |
| 615 | } else { |
| 616 | uint32_t next_fragment_offset = |
| 617 | share_state_next_fragment_offset(share_states, |
| 618 | share_state); |
| 619 | |
| 620 | ret = memory_send_continue_other_world_forward( |
| 621 | to_locked, from_locked.vm->id, fragment, |
| 622 | fragment_length, handle); |
| 623 | |
| 624 | if (ret.func != FFA_MEM_FRAG_RX_32 || |
| 625 | ffa_frag_handle(ret) != handle || |
| 626 | ret.arg3 != next_fragment_offset || |
| 627 | ffa_frag_sender(ret) != from_locked.vm->id) { |
| 628 | dlog_verbose( |
| 629 | "Got unexpected result from forwarding " |
| 630 | "FFA_MEM_FRAG_TX to other_world: %#lx (handle " |
| 631 | "%#lx, offset %lu, sender %d); expected " |
| 632 | "FFA_MEM_FRAG_RX (handle %#lx, offset %d, " |
| 633 | "sender %d).\n", |
| 634 | ret.func, ffa_frag_handle(ret), ret.arg3, |
| 635 | ffa_frag_sender(ret), handle, |
| 636 | next_fragment_offset, from_locked.vm->id); |
| 637 | /* Free share state. */ |
| 638 | share_state_free(share_states, share_state, page_pool); |
| 639 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 640 | goto out; |
| 641 | } |
| 642 | |
| 643 | ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32, |
| 644 | .arg1 = (uint32_t)handle, |
| 645 | .arg2 = (uint32_t)(handle >> 32), |
| 646 | .arg3 = next_fragment_offset}; |
| 647 | } |
| 648 | goto out; |
| 649 | |
| 650 | out_free_fragment: |
| 651 | mpool_free(page_pool, fragment); |
| 652 | |
| 653 | out: |
| 654 | share_states_unlock(&share_states); |
| 655 | return ret; |
| 656 | } |
| 657 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 658 | struct ffa_value ffa_memory_other_world_mem_send_continue( |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 659 | struct vm *from, void *fragment, uint32_t fragment_length, |
| 660 | ffa_memory_handle_t handle, struct mpool *page_pool) |
| 661 | { |
| 662 | struct ffa_value ret; |
| 663 | struct vm *to = vm_find(HF_TEE_VM_ID); |
| 664 | struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from); |
| 665 | |
| 666 | /* |
| 667 | * The TEE RX buffer state is checked in |
| 668 | * `ffa_memory_other_world_send_continue` rather than here, as |
| 669 | * we need to return `FFA_MEM_FRAG_RX` with the current offset |
| 670 | * rather than FFA_ERROR FFA_BUSY in case it is busy. |
| 671 | */ |
| 672 | |
| 673 | ret = ffa_memory_other_world_send_continue( |
| 674 | vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment, |
| 675 | fragment_length, handle, page_pool); |
| 676 | /* |
| 677 | * `ffa_memory_other_world_send_continue` takes ownership of the |
| 678 | * fragment_copy, so we don't need to free it here. |
| 679 | */ |
| 680 | |
| 681 | vm_unlock(&vm_to_from_lock.vm1); |
| 682 | vm_unlock(&vm_to_from_lock.vm2); |
| 683 | |
| 684 | return ret; |
| 685 | } |
| 686 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 687 | ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode( |
Karl Meakin | 64cadf5 | 2024-07-24 17:42:57 +0100 | [diff] [blame] | 688 | ffa_memory_attributes_t attributes, uint32_t mode) |
| 689 | { |
| 690 | (void)mode; |
| 691 | |
| 692 | return attributes; |
| 693 | } |