Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/api.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 19 | #include <assert.h> |
| 20 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 21 | #include "hf/std.h" |
| 22 | #include "hf/vm.h" |
| 23 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 24 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 25 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 26 | static_assert(HF_MAILBOX_SIZE == PAGE_SIZE, |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 27 | "Currently, a page is mapped for the send and receive buffers so " |
| 28 | "the maximum request is the size of a page."); |
| 29 | |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 30 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 31 | * Switches the physical CPU back to the corresponding vcpu of the primary VM. |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 32 | * |
| 33 | * This triggers the scheduling logic to run. Run in the context of secondary VM |
| 34 | * to cause HF_VCPU_RUN to return and the primary VM to regain control of the |
| 35 | * cpu. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 36 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 37 | static struct vcpu *api_switch_to_primary(struct hf_vcpu_run_return primary_ret, |
Andrew Scull | 6bca35e | 2018-10-02 12:05:32 +0100 | [diff] [blame] | 38 | enum vcpu_state secondary_state) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 39 | { |
| 40 | struct vcpu *vcpu = cpu()->current; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 41 | struct vm *primary = vm_get(HF_PRIMARY_VM_ID); |
| 42 | struct vcpu *next = &primary->vcpus[cpu_index(cpu())]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 43 | |
| 44 | /* Switch back to primary VM. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 45 | vm_set_current(primary); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 46 | |
| 47 | /* |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 48 | * Set the return value for the primary VM's call to HF_VCPU_RUN. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 49 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 50 | arch_regs_set_retval(&next->regs, |
| 51 | hf_vcpu_run_return_encode(primary_ret)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 52 | |
| 53 | /* Mark the vcpu as waiting. */ |
| 54 | sl_lock(&vcpu->lock); |
| 55 | vcpu->state = secondary_state; |
| 56 | sl_unlock(&vcpu->lock); |
| 57 | |
| 58 | return next; |
| 59 | } |
| 60 | |
| 61 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 62 | * Returns to the primary vm leaving the current vcpu ready to be scheduled |
| 63 | * again. |
| 64 | */ |
| 65 | struct vcpu *api_yield(void) |
| 66 | { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 67 | struct hf_vcpu_run_return ret = { |
| 68 | .code = HF_VCPU_RUN_YIELD, |
| 69 | }; |
| 70 | return api_switch_to_primary(ret, vcpu_state_ready); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | /** |
| 74 | * Puts the current vcpu in wait for interrupt mode, and returns to the primary |
| 75 | * vm. |
| 76 | */ |
| 77 | struct vcpu *api_wait_for_interrupt(void) |
| 78 | { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 79 | struct hf_vcpu_run_return ret = { |
| 80 | .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT, |
| 81 | }; |
| 82 | return api_switch_to_primary(ret, vcpu_state_blocked_interrupt); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | /** |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 86 | * Returns the number of VMs configured to run. |
| 87 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 88 | int64_t api_vm_get_count(void) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 89 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 90 | return vm_get_count(); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | /** |
| 94 | * Returns the number of vcpus configured in the given VM. |
| 95 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 96 | int64_t api_vcpu_get_count(uint32_t vm_id) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 97 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 98 | struct vm *vm; |
| 99 | |
| 100 | /* Only the primary VM needs to know about vcpus for scheduling. */ |
| 101 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 102 | return -1; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 103 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 104 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 105 | vm = vm_get(vm_id); |
| 106 | if (vm == NULL) { |
| 107 | return -1; |
| 108 | } |
| 109 | |
| 110 | return vm->vcpu_count; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | /** |
| 114 | * Runs the given vcpu of the given vm. |
| 115 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 116 | struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, |
| 117 | struct vcpu **next) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 118 | { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 119 | struct vm *vm; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 120 | struct vcpu *vcpu; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 121 | struct hf_vcpu_run_return ret = { |
| 122 | .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT, |
| 123 | }; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 124 | |
| 125 | /* Only the primary VM can switch vcpus. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 126 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 127 | goto out; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 128 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 129 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 130 | /* Only secondary VM vcpus can be run. */ |
| 131 | if (vm_id == HF_PRIMARY_VM_ID) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 132 | goto out; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 133 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 134 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 135 | /* The requested VM must exist. */ |
| 136 | vm = vm_get(vm_id); |
| 137 | if (vm == NULL) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 138 | goto out; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /* The requested vcpu must exist. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 142 | if (vcpu_idx >= vm->vcpu_count) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 143 | goto out; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 144 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 145 | |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 146 | vcpu = &vm->vcpus[vcpu_idx]; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 147 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 148 | sl_lock(&vcpu->lock); |
| 149 | if (vcpu->state != vcpu_state_ready) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 150 | ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 151 | } else { |
| 152 | vcpu->state = vcpu_state_running; |
| 153 | vm_set_current(vm); |
| 154 | *next = vcpu; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 155 | ret.code = HF_VCPU_RUN_YIELD; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 156 | } |
| 157 | sl_unlock(&vcpu->lock); |
| 158 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 159 | out: |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 160 | return ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 164 | * Configures the VM to send/receive data through the specified pages. The pages |
| 165 | * must not be shared. |
| 166 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 167 | int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 168 | { |
| 169 | struct vm *vm = cpu()->current->vm; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 170 | paddr_t pa_send_begin; |
| 171 | paddr_t pa_send_end; |
| 172 | paddr_t pa_recv_begin; |
| 173 | paddr_t pa_recv_end; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 174 | int64_t ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 175 | |
| 176 | /* Fail if addresses are not page-aligned. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 177 | if ((ipa_addr(send) & (PAGE_SIZE - 1)) || |
| 178 | (ipa_addr(recv) & (PAGE_SIZE - 1))) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 179 | return -1; |
| 180 | } |
| 181 | |
| 182 | sl_lock(&vm->lock); |
| 183 | |
| 184 | /* We only allow these to be setup once. */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 185 | if (vm->mailbox.send || vm->mailbox.recv) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 186 | ret = -1; |
| 187 | goto exit; |
| 188 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 189 | |
| 190 | /* |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 191 | * TODO: Once memory sharing is implemented, we need to make sure that |
| 192 | * these pages aren't and won't be shared. |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 193 | */ |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 194 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 195 | /* |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 196 | * Convert the intermediate physical addresses to physical address |
| 197 | * provided the address was acessible from the VM which ensures that the |
| 198 | * caller isn't trying to use another VM's memory. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 199 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 200 | if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) || |
| 201 | !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 202 | ret = -1; |
| 203 | goto exit; |
| 204 | } |
| 205 | |
Andrew Scull | c9ccb3f | 2018-08-13 15:27:12 +0100 | [diff] [blame] | 206 | /* Fail if the same page is used for the send and receive pages. */ |
| 207 | if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) { |
| 208 | ret = -1; |
| 209 | goto exit; |
| 210 | } |
| 211 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 212 | pa_send_end = pa_add(pa_send_begin, PAGE_SIZE); |
| 213 | pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 214 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 215 | /* Map the send page as read-only in the hypervisor address space. */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 216 | vm->mailbox.send = |
| 217 | mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R); |
| 218 | if (!vm->mailbox.send) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 219 | ret = -1; |
| 220 | goto exit; |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Map the receive page as writable in the hypervisor address space. On |
| 225 | * failure, unmap the send page before returning. |
| 226 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 227 | vm->mailbox.recv = |
| 228 | mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W); |
| 229 | if (!vm->mailbox.recv) { |
| 230 | vm->mailbox.send = NULL; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 231 | mm_unmap(pa_send_begin, pa_send_end, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 232 | ret = -1; |
| 233 | goto exit; |
| 234 | } |
| 235 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 236 | /* TODO: Notify any waiters. */ |
| 237 | |
| 238 | ret = 0; |
| 239 | exit: |
| 240 | sl_unlock(&vm->lock); |
| 241 | |
| 242 | return ret; |
| 243 | } |
| 244 | |
| 245 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 246 | * Copies data from the sender's send buffer to the recipient's receive buffer |
| 247 | * and notifies the recipient. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 248 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 249 | int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 250 | { |
| 251 | struct vm *from = cpu()->current->vm; |
| 252 | struct vm *to; |
| 253 | const void *from_buf; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 254 | uint16_t vcpu; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 255 | int64_t ret; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 256 | struct hf_vcpu_run_return primary_ret = { |
| 257 | .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT, |
| 258 | }; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 259 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 260 | /* Limit the size of transfer. */ |
| 261 | if (size > HF_MAILBOX_SIZE) { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 262 | return -1; |
| 263 | } |
| 264 | |
| 265 | /* Disallow reflexive requests as this suggests an error in the VM. */ |
| 266 | if (vm_id == from->id) { |
| 267 | return -1; |
| 268 | } |
| 269 | |
| 270 | /* Ensure the target VM exists. */ |
| 271 | to = vm_get(vm_id); |
| 272 | if (to == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 273 | return -1; |
| 274 | } |
| 275 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 276 | /* |
| 277 | * Check that the sender has configured its send buffer. It is safe to |
| 278 | * use from_buf after releasing the lock because the buffer cannot be |
| 279 | * modified once it's configured. |
| 280 | */ |
| 281 | sl_lock(&from->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 282 | from_buf = from->mailbox.send; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 283 | sl_unlock(&from->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 284 | if (from_buf == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 285 | return -1; |
| 286 | } |
| 287 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 288 | sl_lock(&to->lock); |
| 289 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 290 | if (to->mailbox.state != mailbox_state_empty || |
| 291 | to->mailbox.recv == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 292 | /* Fail if the target isn't currently ready to receive data. */ |
| 293 | ret = -1; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 294 | goto out; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 295 | } |
| 296 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 297 | /* Copy data. */ |
| 298 | memcpy(to->mailbox.recv, from_buf, size); |
| 299 | to->mailbox.recv_bytes = size; |
| 300 | to->mailbox.recv_from_id = from->id; |
| 301 | to->mailbox.state = mailbox_state_read; |
| 302 | |
| 303 | /* Messages for the primary VM are delivered directly. */ |
| 304 | if (to->id == HF_PRIMARY_VM_ID) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 305 | primary_ret.code = HF_VCPU_RUN_MESSAGE; |
| 306 | primary_ret.message.size = size; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 307 | ret = 0; |
Andrew Scull | 4a49657 | 2018-10-11 18:44:08 +0100 | [diff] [blame] | 308 | /* |
| 309 | * clang-tidy isn't able to prove that |
| 310 | * `from->id != HF_PRIMARY_VM_ID` so cover that specific case |
Andrew Scull | a1317a1 | 2018-10-12 18:15:20 +0100 | [diff] [blame] | 311 | * explicitly so as not to hide other possible bugs. clang-check |
| 312 | * is more clever and finds that this is dead code so we also |
| 313 | * pretend to use the new value. |
Andrew Scull | 4a49657 | 2018-10-11 18:44:08 +0100 | [diff] [blame] | 314 | */ |
| 315 | if (from->id == HF_PRIMARY_VM_ID) { |
| 316 | vcpu = 0; |
Andrew Scull | a1317a1 | 2018-10-12 18:15:20 +0100 | [diff] [blame] | 317 | (void)vcpu; |
Andrew Scull | 4a49657 | 2018-10-11 18:44:08 +0100 | [diff] [blame] | 318 | } |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 319 | goto out; |
| 320 | } |
| 321 | |
| 322 | /* |
| 323 | * Try to find a vcpu to handle the message and tell the scheduler to |
| 324 | * run it. |
| 325 | */ |
| 326 | if (to->mailbox.recv_waiter == NULL) { |
| 327 | /* |
| 328 | * The scheduler must choose a vcpu to interrupt so it can |
| 329 | * handle the message. |
| 330 | */ |
| 331 | to->mailbox.state = mailbox_state_received; |
| 332 | vcpu = HF_INVALID_VCPU; |
| 333 | } else { |
| 334 | struct vcpu *to_vcpu = to->mailbox.recv_waiter; |
| 335 | |
| 336 | /* |
| 337 | * Take target vcpu out of waiter list and mark as ready |
| 338 | * to run again. |
| 339 | */ |
| 340 | sl_lock(&to_vcpu->lock); |
| 341 | to->mailbox.recv_waiter = to_vcpu->mailbox_next; |
| 342 | to_vcpu->state = vcpu_state_ready; |
| 343 | |
| 344 | /* Return from HF_MAILBOX_RECEIVE. */ |
| 345 | arch_regs_set_retval(&to_vcpu->regs, |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 346 | hf_mailbox_receive_return_encode(( |
| 347 | struct hf_mailbox_receive_return){ |
| 348 | .vm_id = to->mailbox.recv_from_id, |
| 349 | .size = size, |
| 350 | })); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 351 | |
| 352 | sl_unlock(&to_vcpu->lock); |
| 353 | |
| 354 | vcpu = to_vcpu - to->vcpus; |
| 355 | } |
| 356 | |
| 357 | /* Return to the primary VM directly or with a switch. */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 358 | primary_ret.code = HF_VCPU_RUN_WAKE_UP; |
| 359 | primary_ret.wake_up.vm_id = to->id; |
| 360 | primary_ret.wake_up.vcpu = vcpu; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 361 | ret = 0; |
| 362 | |
| 363 | out: |
| 364 | /* |
| 365 | * Unlock before routing the return values as switching to the primary |
| 366 | * will acquire more locks and nesting the locks is avoidable. |
| 367 | */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 368 | sl_unlock(&to->lock); |
| 369 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 370 | /* Report errors to the sender. */ |
| 371 | if (ret != 0) { |
| 372 | return ret; |
| 373 | } |
| 374 | |
| 375 | /* If the sender is the primary, return the vcpu to schedule. */ |
| 376 | if (from->id == HF_PRIMARY_VM_ID) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 377 | return primary_ret.wake_up.vcpu; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 378 | } |
| 379 | |
| 380 | /* Switch to primary for scheduling and return success to the sender. */ |
| 381 | *next = api_switch_to_primary(primary_ret, vcpu_state_ready); |
| 382 | return 0; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 386 | * Receives a message from the mailbox. If one isn't available, this function |
| 387 | * can optionally block the caller until one becomes available. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 388 | * |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 389 | * No new messages can be received until the mailbox has been cleared. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 390 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 391 | struct hf_mailbox_receive_return api_mailbox_receive(bool block, |
| 392 | struct vcpu **next) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 393 | { |
| 394 | struct vcpu *vcpu = cpu()->current; |
| 395 | struct vm *vm = vcpu->vm; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 396 | struct hf_mailbox_receive_return ret = { |
| 397 | .vm_id = HF_INVALID_VM_ID, |
| 398 | }; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 399 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 400 | /* |
| 401 | * The primary VM will receive messages as a status code from running |
| 402 | * vcpus and must not call this function. |
| 403 | */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 404 | if (vm->id == HF_PRIMARY_VM_ID) { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 405 | return ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | sl_lock(&vm->lock); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 409 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 410 | /* Return pending messages without blocking. */ |
| 411 | if (vm->mailbox.state == mailbox_state_received) { |
| 412 | vm->mailbox.state = mailbox_state_read; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame^] | 413 | ret.vm_id = vm->mailbox.recv_from_id; |
| 414 | ret.size = vm->mailbox.recv_bytes; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 415 | goto out; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 416 | } |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 417 | |
| 418 | /* No pending message so fail if not allowed to block. */ |
| 419 | if (!block) { |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 420 | goto out; |
| 421 | } |
| 422 | |
| 423 | sl_lock(&vcpu->lock); |
| 424 | vcpu->state = vcpu_state_blocked_mailbox; |
| 425 | |
| 426 | /* Push vcpu into waiter list. */ |
| 427 | vcpu->mailbox_next = vm->mailbox.recv_waiter; |
| 428 | vm->mailbox.recv_waiter = vcpu; |
| 429 | sl_unlock(&vcpu->lock); |
| 430 | |
| 431 | /* Switch back to primary vm to block. */ |
| 432 | *next = api_wait_for_interrupt(); |
| 433 | |
| 434 | out: |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 435 | sl_unlock(&vm->lock); |
| 436 | |
| 437 | return ret; |
| 438 | } |
| 439 | |
| 440 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 441 | * Clears the caller's mailbox so that a new message can be received. The caller |
| 442 | * must have copied out all data they wish to preserve as new messages will |
| 443 | * overwrite the old and will arrive asynchronously. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 444 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 445 | int64_t api_mailbox_clear(void) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 446 | { |
| 447 | struct vm *vm = cpu()->current->vm; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 448 | int64_t ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 449 | |
| 450 | sl_lock(&vm->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 451 | if (vm->mailbox.state == mailbox_state_read) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 452 | ret = 0; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 453 | vm->mailbox.state = mailbox_state_empty; |
| 454 | } else { |
| 455 | ret = -1; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 456 | } |
| 457 | sl_unlock(&vm->lock); |
| 458 | |
| 459 | if (ret == 0) { |
| 460 | /* TODO: Notify waiters, if any. */ |
| 461 | } |
| 462 | |
| 463 | return ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 464 | } |