| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 9 | #include "hf/ffa/indirect_messaging.h" |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 10 | |
| 11 | #include "hf/arch/other_world.h" |
| 12 | |
| 13 | #include "hf/api.h" |
| 14 | #include "hf/ffa_internal.h" |
| 15 | #include "hf/vm.h" |
| 16 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 17 | bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked, |
| 18 | struct vm_locked receiver_locked) |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 19 | { |
| 20 | (void)sender_locked; |
| 21 | (void)receiver_locked; |
| 22 | |
| 23 | /* |
| 24 | * Hypervisor is only for testing purposes, always allow indirect |
| 25 | * messages from VM. |
| 26 | */ |
| 27 | return true; |
| 28 | } |
| 29 | |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 30 | bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id, |
| 31 | ffa_id_t sender_vm_id, |
| 32 | struct ffa_value *ret) |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 33 | { |
| 34 | /* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */ |
| Karl Meakin | 940a8d7 | 2024-10-15 14:13:30 +0100 | [diff] [blame] | 35 | if (vm_id_is_current_world(receiver_vm_id)) { |
| 36 | return false; |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 37 | } |
| 38 | |
| Karl Meakin | 940a8d7 | 2024-10-15 14:13:30 +0100 | [diff] [blame] | 39 | /* |
| 40 | * Set the sender in arg1 to allow the SPMC to retrieve |
| 41 | * VM's TX buffer to copy in SP's RX buffer. |
| 42 | */ |
| 43 | *ret = arch_other_world_call((struct ffa_value){ |
| 44 | .func = FFA_MSG_SEND2_32, |
| 45 | .arg1 = sender_vm_id << 16, |
| 46 | }); |
| 47 | |
| 48 | if (ffa_func_id(*ret) != FFA_SUCCESS_32) { |
| 49 | dlog_verbose( |
| 50 | "Failed forwarding FFA_MSG_SEND2_32 to the " |
| 51 | "SPMC, got error %s (%d).\n", |
| 52 | ffa_error_name(ffa_error_code(*ret)), |
| 53 | ffa_error_code(*ret)); |
| 54 | } |
| 55 | |
| 56 | return true; |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | /** |
| 60 | * Checks whether the vCPU's attempt to wait for a message has already been |
| 61 | * interrupted or whether it is allowed to block. |
| 62 | */ |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 63 | static bool ffa_indirect_msg_recv_block_interrupted( |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 64 | struct vcpu_locked current_locked) |
| 65 | { |
| 66 | bool interrupted; |
| 67 | |
| 68 | /* |
| 69 | * Don't block if there are enabled and pending interrupts, to match |
| 70 | * behaviour of wait_for_interrupt. |
| 71 | */ |
| 72 | interrupted = (vcpu_interrupt_count_get(current_locked) > 0); |
| 73 | |
| 74 | return interrupted; |
| 75 | } |
| 76 | |
| 77 | /** |
| 78 | * Returns true if there is something in the return code, either a v1.0 |
| 79 | * FFA_MSG_SEND, or an FFA_ERROR. |
| 80 | */ |
| 81 | static bool plat_ffa_return_pending_messages(struct vm_locked vm_locked, |
| 82 | struct ffa_value *ret) |
| 83 | { |
| 84 | /* Return pending messages without blocking. */ |
| 85 | if (vm_locked.vm->mailbox.state == MAILBOX_STATE_FULL) { |
| 86 | *ret = ffa_msg_recv_return(vm_locked.vm); |
| 87 | if (ret->func == FFA_MSG_SEND_32) { |
| 88 | vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 89 | } |
| 90 | return true; |
| 91 | } |
| 92 | |
| 93 | return false; |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * Receives a message from the mailbox. If one isn't available, this function |
| 98 | * can optionally block the caller until one becomes available. |
| 99 | * |
| 100 | * No new messages can be received until the mailbox has been cleared. |
| 101 | */ |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 102 | struct ffa_value ffa_indirect_msg_recv(bool block, |
| 103 | struct vcpu_locked current_locked, |
| 104 | struct vcpu **next) |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 105 | { |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 106 | struct vm *vm = current_locked.vcpu->vm; |
| 107 | struct vcpu *current = current_locked.vcpu; |
| 108 | struct vm_locked vm_locked; |
| 109 | struct ffa_value return_code; |
| 110 | |
| 111 | /* |
| 112 | * The primary VM will receive messages as a status code from running |
| 113 | * vCPUs and must not call this function. |
| 114 | */ |
| 115 | if (vm_is_primary(vm)) { |
| 116 | return ffa_error(FFA_NOT_SUPPORTED); |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ |
| 121 | * invocation. |
| 122 | */ |
| Karl Meakin | 940a8d7 | 2024-10-15 14:13:30 +0100 | [diff] [blame] | 123 | if (is_ffa_direct_msg_request_ongoing(current_locked)) { |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 124 | return ffa_error(FFA_DENIED); |
| 125 | } |
| 126 | |
| 127 | vcpu_unlock(¤t_locked); |
| 128 | vm_locked = vm_lock(vm); |
| 129 | current_locked = vcpu_lock(current); |
| 130 | |
| 131 | if (plat_ffa_return_pending_messages(vm_locked, &return_code)) { |
| 132 | goto out; |
| 133 | } |
| 134 | |
| 135 | /* No pending message so fail if not allowed to block. */ |
| 136 | if (!block) { |
| 137 | return_code = ffa_error(FFA_RETRY); |
| 138 | goto out; |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * From this point onward this call can only be interrupted or a message |
| 143 | * received. If a message is received the return value will be set at |
| 144 | * that time to FFA_SUCCESS. |
| 145 | */ |
| 146 | return_code = ffa_error(FFA_INTERRUPTED); |
| Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 147 | if (ffa_indirect_msg_recv_block_interrupted(current_locked)) { |
| Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 148 | goto out; |
| 149 | } |
| 150 | |
| 151 | { |
| 152 | /* Switch back to primary VM to block. */ |
| 153 | struct ffa_value run_return = { |
| 154 | .func = FFA_MSG_WAIT_32, |
| 155 | .arg1 = ffa_vm_vcpu(vm->id, |
| 156 | vcpu_index(current_locked.vcpu)), |
| 157 | }; |
| 158 | |
| 159 | *next = api_switch_to_primary(current_locked, run_return, |
| 160 | VCPU_STATE_WAITING); |
| 161 | } |
| 162 | out: |
| 163 | vm_unlock(&vm_locked); |
| 164 | |
| 165 | return return_code; |
| 166 | } |
| Karl Meakin | 2ae1abb | 2025-01-31 13:09:35 +0000 | [diff] [blame] | 167 | |
| 168 | /** |
| 169 | * Notifies the `to` VM about the message currently in its mailbox, possibly |
| 170 | * with the help of the primary VM. |
| 171 | */ |
| 172 | static struct ffa_value deliver_msg(struct vm_locked to, ffa_id_t from_id, |
| 173 | struct vcpu_locked current_locked, |
| 174 | struct vcpu **next) |
| 175 | { |
| 176 | struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32}; |
| 177 | struct ffa_value primary_ret = { |
| 178 | .func = FFA_MSG_SEND_32, |
| 179 | .arg1 = ((uint32_t)from_id << 16) | to.vm->id, |
| 180 | }; |
| 181 | |
| 182 | /* Messages for the primary VM are delivered directly. */ |
| 183 | if (vm_is_primary(to.vm)) { |
| 184 | /* |
| 185 | * Only tell the primary VM the size and other details if the |
| 186 | * message is for it, to avoid leaking data about messages for |
| 187 | * other VMs. |
| 188 | */ |
| 189 | primary_ret = ffa_msg_recv_return(to.vm); |
| 190 | |
| 191 | *next = api_switch_to_primary(current_locked, primary_ret, |
| 192 | VCPU_STATE_BLOCKED); |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | to.vm->mailbox.state = MAILBOX_STATE_FULL; |
| 197 | |
| 198 | /* Messages for the TEE are sent on via the dispatcher. */ |
| 199 | if (to.vm->id == HF_TEE_VM_ID) { |
| 200 | struct ffa_value call = ffa_msg_recv_return(to.vm); |
| 201 | |
| 202 | ret = arch_other_world_call(call); |
| 203 | /* |
| 204 | * After the call to the TEE completes it must have finished |
| 205 | * reading its RX buffer, so it is ready for another message. |
| 206 | */ |
| 207 | to.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 208 | /* |
| 209 | * Don't return to the primary VM in this case, as the TEE is |
| 210 | * not (yet) scheduled via FF-A. |
| 211 | */ |
| 212 | return ret; |
| 213 | } |
| 214 | |
| 215 | /* Return to the primary VM directly or with a switch. */ |
| 216 | if (from_id != HF_PRIMARY_VM_ID) { |
| 217 | *next = api_switch_to_primary(current_locked, primary_ret, |
| 218 | VCPU_STATE_BLOCKED); |
| 219 | } |
| 220 | |
| 221 | return ret; |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * Copies data from the sender's send buffer to the recipient's receive buffer |
| 226 | * and notifies the recipient. |
| 227 | * |
| 228 | * If the recipient's receive buffer is busy, it can optionally register the |
| 229 | * caller to be notified when the recipient's receive buffer becomes available. |
| 230 | */ |
| 231 | struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id, |
| 232 | ffa_id_t receiver_vm_id, uint32_t size, |
| 233 | struct vcpu *current, struct vcpu **next) |
| 234 | { |
| 235 | struct vm *from = current->vm; |
| 236 | struct vm *to; |
| 237 | struct vm_locked to_locked; |
| 238 | const void *from_msg; |
| 239 | struct ffa_value ret; |
| 240 | struct vcpu_locked current_locked; |
| 241 | bool is_direct_request_ongoing; |
| 242 | |
| 243 | /* Ensure sender VM ID corresponds to the current VM. */ |
| 244 | if (sender_vm_id != from->id) { |
| 245 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 246 | } |
| 247 | |
| 248 | /* Disallow reflexive requests as this suggests an error in the VM. */ |
| 249 | if (receiver_vm_id == from->id) { |
| 250 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 251 | } |
| 252 | |
| 253 | /* Limit the size of transfer. */ |
| 254 | if (size > FFA_MSG_PAYLOAD_MAX) { |
| 255 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 256 | } |
| 257 | |
| 258 | /* Ensure the receiver VM exists. */ |
| 259 | to = vm_find(receiver_vm_id); |
| 260 | if (to == NULL) { |
| 261 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ |
| 266 | * invocation. |
| 267 | */ |
| 268 | current_locked = vcpu_lock(current); |
| 269 | is_direct_request_ongoing = |
| 270 | is_ffa_direct_msg_request_ongoing(current_locked); |
| 271 | |
| 272 | if (is_direct_request_ongoing) { |
| 273 | ret = ffa_error(FFA_DENIED); |
| 274 | goto out_current; |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * Check that the sender has configured its send buffer. If the tx |
| 279 | * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can |
| 280 | * be safely accessed after releasing the lock since the tx mailbox |
| 281 | * address can only be configured once. |
| 282 | * A VM's lock must be acquired before any of its vCPU's lock. Hence, |
| 283 | * unlock current vCPU and acquire it immediately after its VM's lock. |
| 284 | */ |
| 285 | vcpu_unlock(¤t_locked); |
| 286 | sl_lock(&from->lock); |
| 287 | current_locked = vcpu_lock(current); |
| 288 | from_msg = from->mailbox.send; |
| 289 | sl_unlock(&from->lock); |
| 290 | |
| 291 | if (from_msg == NULL) { |
| 292 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 293 | goto out_current; |
| 294 | } |
| 295 | |
| 296 | to_locked = vm_lock(to); |
| 297 | |
| 298 | if (vm_is_mailbox_busy(to_locked)) { |
| 299 | ret = ffa_error(FFA_BUSY); |
| 300 | goto out; |
| 301 | } |
| 302 | |
| 303 | /* Copy data. */ |
| 304 | memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size); |
| 305 | to->mailbox.recv_size = size; |
| 306 | to->mailbox.recv_sender = sender_vm_id; |
| 307 | to->mailbox.recv_func = FFA_MSG_SEND_32; |
| 308 | to->mailbox.state = MAILBOX_STATE_FULL; |
| 309 | ret = deliver_msg(to_locked, sender_vm_id, current_locked, next); |
| 310 | |
| 311 | out: |
| 312 | vm_unlock(&to_locked); |
| 313 | |
| 314 | out_current: |
| 315 | vcpu_unlock(¤t_locked); |
| 316 | |
| 317 | return ret; |
| 318 | } |