Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 1 | #include "hf/api.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 2 | |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 3 | #include <assert.h> |
| 4 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 5 | #include "hf/std.h" |
| 6 | #include "hf/vm.h" |
| 7 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 8 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 9 | |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 10 | static_assert(HF_RPC_REQUEST_MAX_SIZE == PAGE_SIZE, |
| 11 | "Currently, a page is mapped for the send and receive buffers so " |
| 12 | "the maximum request is the size of a page."); |
| 13 | |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 14 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 15 | * Switches the physical CPU back to the corresponding vcpu of the primary VM. |
| 16 | */ |
Andrew Scull | 6bca35e | 2018-10-02 12:05:32 +0100 | [diff] [blame] | 17 | static struct vcpu *api_switch_to_primary(size_t primary_retval, |
| 18 | enum vcpu_state secondary_state) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 19 | { |
| 20 | struct vcpu *vcpu = cpu()->current; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 21 | struct vm *primary = vm_get(HF_PRIMARY_VM_ID); |
| 22 | struct vcpu *next = &primary->vcpus[cpu_index(cpu())]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 23 | |
| 24 | /* Switch back to primary VM. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 25 | vm_set_current(primary); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * Inidicate to primary VM that this vcpu blocked waiting for an |
| 29 | * interrupt. |
| 30 | */ |
| 31 | arch_regs_set_retval(&next->regs, primary_retval); |
| 32 | |
| 33 | /* Mark the vcpu as waiting. */ |
| 34 | sl_lock(&vcpu->lock); |
| 35 | vcpu->state = secondary_state; |
| 36 | sl_unlock(&vcpu->lock); |
| 37 | |
| 38 | return next; |
| 39 | } |
| 40 | |
| 41 | /** |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 42 | * Returns the number of VMs configured to run. |
| 43 | */ |
| 44 | int32_t api_vm_get_count(void) |
| 45 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 46 | return vm_get_count(); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | /** |
| 50 | * Returns the number of vcpus configured in the given VM. |
| 51 | */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 52 | int32_t api_vcpu_get_count(uint32_t vm_id) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 53 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 54 | struct vm *vm; |
| 55 | |
| 56 | /* Only the primary VM needs to know about vcpus for scheduling. */ |
| 57 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 58 | return -1; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 59 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 60 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 61 | vm = vm_get(vm_id); |
| 62 | if (vm == NULL) { |
| 63 | return -1; |
| 64 | } |
| 65 | |
| 66 | return vm->vcpu_count; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | /** |
| 70 | * Runs the given vcpu of the given vm. |
| 71 | */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 72 | int32_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 73 | { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 74 | struct vm *vm; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 75 | struct vcpu *vcpu; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 76 | int32_t ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 77 | |
| 78 | /* Only the primary VM can switch vcpus. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 79 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
| 80 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 81 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 82 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 83 | /* Only secondary VM vcpus can be run. */ |
| 84 | if (vm_id == HF_PRIMARY_VM_ID) { |
| 85 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 86 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 87 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 88 | /* The requested VM must exist. */ |
| 89 | vm = vm_get(vm_id); |
| 90 | if (vm == NULL) { |
| 91 | goto fail; |
| 92 | } |
| 93 | |
| 94 | /* The requested vcpu must exist. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 95 | if (vcpu_idx >= vm->vcpu_count) { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 96 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 97 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 98 | |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 99 | vcpu = &vm->vcpus[vcpu_idx]; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 100 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 101 | sl_lock(&vcpu->lock); |
| 102 | if (vcpu->state != vcpu_state_ready) { |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 103 | ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 104 | } else { |
| 105 | vcpu->state = vcpu_state_running; |
| 106 | vm_set_current(vm); |
| 107 | *next = vcpu; |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 108 | ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 109 | } |
| 110 | sl_unlock(&vcpu->lock); |
| 111 | |
| 112 | return ret; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 113 | |
| 114 | fail: |
| 115 | return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 119 | * Configures the VM to send/receive data through the specified pages. The pages |
| 120 | * must not be shared. |
| 121 | */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 122 | int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 123 | { |
| 124 | struct vm *vm = cpu()->current->vm; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 125 | paddr_t pa_send_begin; |
| 126 | paddr_t pa_send_end; |
| 127 | paddr_t pa_recv_begin; |
| 128 | paddr_t pa_recv_end; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 129 | int32_t ret; |
| 130 | |
| 131 | /* Fail if addresses are not page-aligned. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 132 | if ((ipa_addr(send) & (PAGE_SIZE - 1)) || |
| 133 | (ipa_addr(recv) & (PAGE_SIZE - 1))) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 134 | return -1; |
| 135 | } |
| 136 | |
| 137 | sl_lock(&vm->lock); |
| 138 | |
| 139 | /* We only allow these to be setup once. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 140 | if (vm->rpc.send || vm->rpc.recv) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 141 | ret = -1; |
| 142 | goto exit; |
| 143 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 144 | |
| 145 | /* |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 146 | * TODO: Once memory sharing is implemented, we need to make sure that |
| 147 | * these pages aren't and won't be shared. |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 148 | */ |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 149 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 150 | /* |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 151 | * Convert the intermediate physical addresses to physical address |
| 152 | * provided the address was acessible from the VM which ensures that the |
| 153 | * caller isn't trying to use another VM's memory. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 154 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 155 | if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) || |
| 156 | !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 157 | ret = -1; |
| 158 | goto exit; |
| 159 | } |
| 160 | |
Andrew Scull | c9ccb3f | 2018-08-13 15:27:12 +0100 | [diff] [blame] | 161 | /* Fail if the same page is used for the send and receive pages. */ |
| 162 | if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) { |
| 163 | ret = -1; |
| 164 | goto exit; |
| 165 | } |
| 166 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 167 | pa_send_end = pa_add(pa_send_begin, PAGE_SIZE); |
| 168 | pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 169 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 170 | /* Map the send page as read-only in the hypervisor address space. */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 171 | vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R); |
| 172 | if (!vm->rpc.send) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 173 | ret = -1; |
| 174 | goto exit; |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Map the receive page as writable in the hypervisor address space. On |
| 179 | * failure, unmap the send page before returning. |
| 180 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 181 | vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W); |
| 182 | if (!vm->rpc.recv) { |
| 183 | vm->rpc.send = NULL; |
| 184 | mm_unmap(pa_send_begin, pa_send_end, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 185 | ret = -1; |
| 186 | goto exit; |
| 187 | } |
| 188 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 189 | /* TODO: Notify any waiters. */ |
| 190 | |
| 191 | ret = 0; |
| 192 | exit: |
| 193 | sl_unlock(&vm->lock); |
| 194 | |
| 195 | return ret; |
| 196 | } |
| 197 | |
| 198 | /** |
| 199 | * Sends an RPC request from the primary VM to a secondary VM. Data is copied |
| 200 | * from the caller's send buffer to the destination's receive buffer. |
| 201 | */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 202 | int32_t api_rpc_request(uint32_t vm_id, size_t size) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 203 | { |
| 204 | struct vm *from = cpu()->current->vm; |
| 205 | struct vm *to; |
| 206 | const void *from_buf; |
| 207 | int32_t ret; |
| 208 | |
| 209 | /* Basic argument validation. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 210 | if (size > HF_RPC_REQUEST_MAX_SIZE) { |
| 211 | return -1; |
| 212 | } |
| 213 | |
| 214 | /* Disallow reflexive requests as this suggests an error in the VM. */ |
| 215 | if (vm_id == from->id) { |
| 216 | return -1; |
| 217 | } |
| 218 | |
| 219 | /* Ensure the target VM exists. */ |
| 220 | to = vm_get(vm_id); |
| 221 | if (to == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 222 | return -1; |
| 223 | } |
| 224 | |
| 225 | /* Only the primary VM can make calls. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 226 | if (from->id != HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 227 | return -1; |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Check that the sender has configured its send buffer. It is safe to |
| 232 | * use from_buf after releasing the lock because the buffer cannot be |
| 233 | * modified once it's configured. |
| 234 | */ |
| 235 | sl_lock(&from->lock); |
| 236 | from_buf = from->rpc.send; |
| 237 | sl_unlock(&from->lock); |
| 238 | if (!from_buf) { |
| 239 | return -1; |
| 240 | } |
| 241 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 242 | sl_lock(&to->lock); |
| 243 | |
| 244 | if (to->rpc.state != rpc_state_idle || !to->rpc.recv) { |
| 245 | /* Fail if the target isn't currently ready to receive data. */ |
| 246 | ret = -1; |
| 247 | } else { |
| 248 | /* Copy data. */ |
| 249 | memcpy(to->rpc.recv, from_buf, size); |
| 250 | to->rpc.recv_bytes = size; |
| 251 | |
| 252 | if (!to->rpc.recv_waiter) { |
| 253 | to->rpc.state = rpc_state_pending; |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 254 | ret = to->vcpu_count; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 255 | } else { |
| 256 | struct vcpu *to_vcpu = to->rpc.recv_waiter; |
| 257 | |
| 258 | to->rpc.state = rpc_state_inflight; |
| 259 | |
| 260 | /* |
| 261 | * Take target vcpu out of waiter list and mark as ready |
| 262 | * to run again. |
| 263 | */ |
| 264 | sl_lock(&to_vcpu->lock); |
| 265 | to->rpc.recv_waiter = to_vcpu->rpc_next; |
| 266 | to_vcpu->state = vcpu_state_ready; |
| 267 | arch_regs_set_retval(&to_vcpu->regs, size); |
| 268 | sl_unlock(&to_vcpu->lock); |
| 269 | |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 270 | ret = to_vcpu - to->vcpus; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 271 | } |
| 272 | } |
| 273 | |
| 274 | sl_unlock(&to->lock); |
| 275 | |
| 276 | return ret; |
| 277 | } |
| 278 | |
| 279 | /** |
| 280 | * Reads a request sent from a previous call to api_rpc_request. If one isn't |
| 281 | * available, this function can optionally block the caller until one becomes |
| 282 | * available. |
| 283 | * |
| 284 | * Once the caller has completed handling a request, it must indicate it by |
| 285 | * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted |
| 286 | * until the current one is acknowledged. |
| 287 | */ |
| 288 | int32_t api_rpc_read_request(bool block, struct vcpu **next) |
| 289 | { |
| 290 | struct vcpu *vcpu = cpu()->current; |
| 291 | struct vm *vm = vcpu->vm; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 292 | struct vm *primary = vm_get(HF_PRIMARY_VM_ID); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 293 | int32_t ret; |
| 294 | |
| 295 | /* Only the secondary VMs can receive calls. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 296 | if (vm->id == HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 297 | return -1; |
| 298 | } |
| 299 | |
| 300 | sl_lock(&vm->lock); |
| 301 | if (vm->rpc.state == rpc_state_pending) { |
| 302 | ret = vm->rpc.recv_bytes; |
| 303 | vm->rpc.state = rpc_state_inflight; |
| 304 | } else if (!block) { |
| 305 | ret = -1; |
| 306 | } else { |
| 307 | sl_lock(&vcpu->lock); |
| 308 | vcpu->state = vcpu_state_blocked_rpc; |
| 309 | |
| 310 | /* Push vcpu into waiter list. */ |
| 311 | vcpu->rpc_next = vm->rpc.recv_waiter; |
| 312 | vm->rpc.recv_waiter = vcpu; |
| 313 | sl_unlock(&vcpu->lock); |
| 314 | |
| 315 | /* Switch back to primary vm. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 316 | *next = &primary->vcpus[cpu_index(cpu())]; |
| 317 | vm_set_current(primary); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 318 | |
| 319 | /* |
| 320 | * Inidicate to primary VM that this vcpu blocked waiting for an |
| 321 | * interrupt. |
| 322 | */ |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 323 | arch_regs_set_retval( |
| 324 | &(*next)->regs, |
| 325 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, |
| 326 | 0)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 327 | ret = 0; |
| 328 | } |
| 329 | sl_unlock(&vm->lock); |
| 330 | |
| 331 | return ret; |
| 332 | } |
| 333 | |
| 334 | /** |
| 335 | * Sends a reply from a secondary VM to the primary VM. Data is copied from the |
| 336 | * caller's send buffer to the destination's receive buffer. |
| 337 | * |
| 338 | * It can optionally acknowledge the pending request. |
| 339 | */ |
| 340 | int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next) |
| 341 | { |
| 342 | struct vm *from = cpu()->current->vm; |
| 343 | struct vm *to; |
| 344 | const void *from_buf; |
Wedson Almeida Filho | 4fa4249 | 2018-08-11 00:21:31 +0100 | [diff] [blame] | 345 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 346 | /* Basic argument validation. */ |
| 347 | if (size > PAGE_SIZE) { |
| 348 | return -1; |
| 349 | } |
| 350 | |
| 351 | /* Only the secondary VM can send responses. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 352 | if (from->id == HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 353 | return -1; |
| 354 | } |
| 355 | |
| 356 | /* Acknowledge the current pending request if requested. */ |
| 357 | if (ack) { |
| 358 | api_rpc_ack(); |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * Check that the sender has configured its send buffer. It is safe to |
| 363 | * use from_buf after releasing the lock because the buffer cannot be |
| 364 | * modified once it's configured. |
| 365 | */ |
| 366 | sl_lock(&from->lock); |
| 367 | from_buf = from->rpc.send; |
| 368 | sl_unlock(&from->lock); |
| 369 | if (!from_buf) { |
| 370 | return -1; |
| 371 | } |
| 372 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 373 | to = vm_get(HF_PRIMARY_VM_ID); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 374 | sl_lock(&to->lock); |
| 375 | |
| 376 | if (to->rpc.state != rpc_state_idle || !to->rpc.recv) { |
| 377 | /* |
| 378 | * Fail if the target isn't currently ready to receive a |
| 379 | * response. |
| 380 | */ |
| 381 | sl_unlock(&to->lock); |
| 382 | return -1; |
| 383 | } |
| 384 | |
| 385 | /* Copy data. */ |
| 386 | memcpy(to->rpc.recv, from_buf, size); |
| 387 | to->rpc.recv_bytes = size; |
| 388 | to->rpc.state = rpc_state_inflight; |
| 389 | sl_unlock(&to->lock); |
| 390 | |
| 391 | /* |
| 392 | * Switch back to primary VM so that it is aware that a response was |
| 393 | * received. But we leave the current vcpu still runnable. |
| 394 | */ |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 395 | *next = api_switch_to_primary( |
| 396 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_RESPONSE_READY, size), |
| 397 | vcpu_state_ready); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | |
| 402 | /** |
| 403 | * Acknowledges that either a request or a reply has been received and handled. |
| 404 | * After this call completes, the caller will be able to receive additional |
| 405 | * requests or replies. |
| 406 | */ |
| 407 | int32_t api_rpc_ack(void) |
| 408 | { |
| 409 | struct vm *vm = cpu()->current->vm; |
| 410 | int32_t ret; |
| 411 | |
| 412 | sl_lock(&vm->lock); |
| 413 | if (vm->rpc.state != rpc_state_inflight) { |
| 414 | ret = -1; |
| 415 | } else { |
| 416 | ret = 0; |
| 417 | vm->rpc.state = rpc_state_idle; |
| 418 | } |
| 419 | sl_unlock(&vm->lock); |
| 420 | |
| 421 | if (ret == 0) { |
| 422 | /* TODO: Notify waiters, if any. */ |
| 423 | } |
| 424 | |
| 425 | return ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 426 | } |
Andrew Scull | 6bca35e | 2018-10-02 12:05:32 +0100 | [diff] [blame] | 427 | |
| 428 | /** |
| 429 | * Returns to the primary vm leaving the current vcpu ready to be scheduled |
| 430 | * again. |
| 431 | */ |
| 432 | struct vcpu *api_yield(void) |
| 433 | { |
| 434 | return api_switch_to_primary(HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0), |
| 435 | vcpu_state_ready); |
| 436 | } |
| 437 | |
| 438 | /** |
| 439 | * Puts the current vcpu in wait for interrupt mode, and returns to the primary |
| 440 | * vm. |
| 441 | */ |
| 442 | struct vcpu *api_wait_for_interrupt(void) |
| 443 | { |
| 444 | return api_switch_to_primary( |
| 445 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0), |
| 446 | vcpu_state_blocked_interrupt); |
| 447 | } |