Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 1 | #include "hf/api.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 2 | |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 3 | #include <assert.h> |
| 4 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 5 | #include "hf/std.h" |
| 6 | #include "hf/vm.h" |
| 7 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 8 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 9 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 10 | static_assert(HF_MAILBOX_SIZE == PAGE_SIZE, |
Andrew Scull | 13652af | 2018-09-17 14:49:08 +0100 | [diff] [blame] | 11 | "Currently, a page is mapped for the send and receive buffers so " |
| 12 | "the maximum request is the size of a page."); |
| 13 | |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 14 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 15 | * Switches the physical CPU back to the corresponding vcpu of the primary VM. |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 16 | * |
| 17 | * This triggers the scheduling logic to run. Run in the context of secondary VM |
| 18 | * to cause HF_VCPU_RUN to return and the primary VM to regain control of the |
| 19 | * cpu. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 20 | */ |
Andrew Scull | 6bca35e | 2018-10-02 12:05:32 +0100 | [diff] [blame] | 21 | static struct vcpu *api_switch_to_primary(size_t primary_retval, |
| 22 | enum vcpu_state secondary_state) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 23 | { |
| 24 | struct vcpu *vcpu = cpu()->current; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 25 | struct vm *primary = vm_get(HF_PRIMARY_VM_ID); |
| 26 | struct vcpu *next = &primary->vcpus[cpu_index(cpu())]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 27 | |
| 28 | /* Switch back to primary VM. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 29 | vm_set_current(primary); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 30 | |
| 31 | /* |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 32 | * Set the return valuefor the primary VM's call to HF_VCPU_RUN. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 33 | */ |
| 34 | arch_regs_set_retval(&next->regs, primary_retval); |
| 35 | |
| 36 | /* Mark the vcpu as waiting. */ |
| 37 | sl_lock(&vcpu->lock); |
| 38 | vcpu->state = secondary_state; |
| 39 | sl_unlock(&vcpu->lock); |
| 40 | |
| 41 | return next; |
| 42 | } |
| 43 | |
| 44 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 45 | * Returns to the primary vm leaving the current vcpu ready to be scheduled |
| 46 | * again. |
| 47 | */ |
| 48 | struct vcpu *api_yield(void) |
| 49 | { |
| 50 | return api_switch_to_primary( |
| 51 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0), |
| 52 | vcpu_state_ready); |
| 53 | } |
| 54 | |
| 55 | /** |
| 56 | * Puts the current vcpu in wait for interrupt mode, and returns to the primary |
| 57 | * vm. |
| 58 | */ |
| 59 | struct vcpu *api_wait_for_interrupt(void) |
| 60 | { |
| 61 | return api_switch_to_primary( |
| 62 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0), |
| 63 | vcpu_state_blocked_interrupt); |
| 64 | } |
| 65 | |
| 66 | /** |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 67 | * Returns the number of VMs configured to run. |
| 68 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 69 | int64_t api_vm_get_count(void) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 70 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 71 | return vm_get_count(); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /** |
| 75 | * Returns the number of vcpus configured in the given VM. |
| 76 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 77 | int64_t api_vcpu_get_count(uint32_t vm_id) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 78 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 79 | struct vm *vm; |
| 80 | |
| 81 | /* Only the primary VM needs to know about vcpus for scheduling. */ |
| 82 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 83 | return -1; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 84 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 85 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 86 | vm = vm_get(vm_id); |
| 87 | if (vm == NULL) { |
| 88 | return -1; |
| 89 | } |
| 90 | |
| 91 | return vm->vcpu_count; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | /** |
| 95 | * Runs the given vcpu of the given vm. |
| 96 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 97 | int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next) |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 98 | { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 99 | struct vm *vm; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 100 | struct vcpu *vcpu; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 101 | int64_t ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 102 | |
| 103 | /* Only the primary VM can switch vcpus. */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 104 | if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) { |
| 105 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 106 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 107 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 108 | /* Only secondary VM vcpus can be run. */ |
| 109 | if (vm_id == HF_PRIMARY_VM_ID) { |
| 110 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 111 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 112 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 113 | /* The requested VM must exist. */ |
| 114 | vm = vm_get(vm_id); |
| 115 | if (vm == NULL) { |
| 116 | goto fail; |
| 117 | } |
| 118 | |
| 119 | /* The requested vcpu must exist. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 120 | if (vcpu_idx >= vm->vcpu_count) { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 121 | goto fail; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 122 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 123 | |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 124 | vcpu = &vm->vcpus[vcpu_idx]; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 125 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 126 | sl_lock(&vcpu->lock); |
| 127 | if (vcpu->state != vcpu_state_ready) { |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 128 | ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, |
| 129 | 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 130 | } else { |
| 131 | vcpu->state = vcpu_state_running; |
| 132 | vm_set_current(vm); |
| 133 | *next = vcpu; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 134 | ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 135 | } |
| 136 | sl_unlock(&vcpu->lock); |
| 137 | |
| 138 | return ret; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 139 | |
| 140 | fail: |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 141 | return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 145 | * Configures the VM to send/receive data through the specified pages. The pages |
| 146 | * must not be shared. |
| 147 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 148 | int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 149 | { |
| 150 | struct vm *vm = cpu()->current->vm; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 151 | paddr_t pa_send_begin; |
| 152 | paddr_t pa_send_end; |
| 153 | paddr_t pa_recv_begin; |
| 154 | paddr_t pa_recv_end; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 155 | int64_t ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 156 | |
| 157 | /* Fail if addresses are not page-aligned. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 158 | if ((ipa_addr(send) & (PAGE_SIZE - 1)) || |
| 159 | (ipa_addr(recv) & (PAGE_SIZE - 1))) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 160 | return -1; |
| 161 | } |
| 162 | |
| 163 | sl_lock(&vm->lock); |
| 164 | |
| 165 | /* We only allow these to be setup once. */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 166 | if (vm->mailbox.send || vm->mailbox.recv) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 167 | ret = -1; |
| 168 | goto exit; |
| 169 | } |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 170 | |
| 171 | /* |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 172 | * TODO: Once memory sharing is implemented, we need to make sure that |
| 173 | * these pages aren't and won't be shared. |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 174 | */ |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 175 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 176 | /* |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 177 | * Convert the intermediate physical addresses to physical address |
| 178 | * provided the address was acessible from the VM which ensures that the |
| 179 | * caller isn't trying to use another VM's memory. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 180 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 181 | if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) || |
| 182 | !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 183 | ret = -1; |
| 184 | goto exit; |
| 185 | } |
| 186 | |
Andrew Scull | c9ccb3f | 2018-08-13 15:27:12 +0100 | [diff] [blame] | 187 | /* Fail if the same page is used for the send and receive pages. */ |
| 188 | if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) { |
| 189 | ret = -1; |
| 190 | goto exit; |
| 191 | } |
| 192 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 193 | pa_send_end = pa_add(pa_send_begin, PAGE_SIZE); |
| 194 | pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 195 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 196 | /* Map the send page as read-only in the hypervisor address space. */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 197 | vm->mailbox.send = |
| 198 | mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R); |
| 199 | if (!vm->mailbox.send) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 200 | ret = -1; |
| 201 | goto exit; |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Map the receive page as writable in the hypervisor address space. On |
| 206 | * failure, unmap the send page before returning. |
| 207 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 208 | vm->mailbox.recv = |
| 209 | mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W); |
| 210 | if (!vm->mailbox.recv) { |
| 211 | vm->mailbox.send = NULL; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 212 | mm_unmap(pa_send_begin, pa_send_end, 0); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 213 | ret = -1; |
| 214 | goto exit; |
| 215 | } |
| 216 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 217 | /* TODO: Notify any waiters. */ |
| 218 | |
| 219 | ret = 0; |
| 220 | exit: |
| 221 | sl_unlock(&vm->lock); |
| 222 | |
| 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 227 | * Copies data from the sender's send buffer to the recipient's receive buffer |
| 228 | * and notifies the recipient. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 229 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 230 | int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 231 | { |
| 232 | struct vm *from = cpu()->current->vm; |
| 233 | struct vm *to; |
| 234 | const void *from_buf; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 235 | uint16_t vcpu; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 236 | int64_t ret; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 237 | int64_t primary_ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 238 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 239 | /* Limit the size of transfer. */ |
| 240 | if (size > HF_MAILBOX_SIZE) { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 241 | return -1; |
| 242 | } |
| 243 | |
| 244 | /* Disallow reflexive requests as this suggests an error in the VM. */ |
| 245 | if (vm_id == from->id) { |
| 246 | return -1; |
| 247 | } |
| 248 | |
| 249 | /* Ensure the target VM exists. */ |
| 250 | to = vm_get(vm_id); |
| 251 | if (to == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 252 | return -1; |
| 253 | } |
| 254 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 255 | /* |
| 256 | * Check that the sender has configured its send buffer. It is safe to |
| 257 | * use from_buf after releasing the lock because the buffer cannot be |
| 258 | * modified once it's configured. |
| 259 | */ |
| 260 | sl_lock(&from->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 261 | from_buf = from->mailbox.send; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 262 | sl_unlock(&from->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 263 | if (from_buf == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 264 | return -1; |
| 265 | } |
| 266 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 267 | sl_lock(&to->lock); |
| 268 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 269 | if (to->mailbox.state != mailbox_state_empty || |
| 270 | to->mailbox.recv == NULL) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 271 | /* Fail if the target isn't currently ready to receive data. */ |
| 272 | ret = -1; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 273 | goto out; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 274 | } |
| 275 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 276 | /* Copy data. */ |
| 277 | memcpy(to->mailbox.recv, from_buf, size); |
| 278 | to->mailbox.recv_bytes = size; |
| 279 | to->mailbox.recv_from_id = from->id; |
| 280 | to->mailbox.state = mailbox_state_read; |
| 281 | |
| 282 | /* Messages for the primary VM are delivered directly. */ |
| 283 | if (to->id == HF_PRIMARY_VM_ID) { |
| 284 | primary_ret = |
| 285 | HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_MESSAGE, 0, size); |
| 286 | ret = 0; |
| 287 | goto out; |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Try to find a vcpu to handle the message and tell the scheduler to |
| 292 | * run it. |
| 293 | */ |
| 294 | if (to->mailbox.recv_waiter == NULL) { |
| 295 | /* |
| 296 | * The scheduler must choose a vcpu to interrupt so it can |
| 297 | * handle the message. |
| 298 | */ |
| 299 | to->mailbox.state = mailbox_state_received; |
| 300 | vcpu = HF_INVALID_VCPU; |
| 301 | } else { |
| 302 | struct vcpu *to_vcpu = to->mailbox.recv_waiter; |
| 303 | |
| 304 | /* |
| 305 | * Take target vcpu out of waiter list and mark as ready |
| 306 | * to run again. |
| 307 | */ |
| 308 | sl_lock(&to_vcpu->lock); |
| 309 | to->mailbox.recv_waiter = to_vcpu->mailbox_next; |
| 310 | to_vcpu->state = vcpu_state_ready; |
| 311 | |
| 312 | /* Return from HF_MAILBOX_RECEIVE. */ |
| 313 | arch_regs_set_retval(&to_vcpu->regs, |
| 314 | HF_MAILBOX_RECEIVE_RESPONSE( |
| 315 | to->mailbox.recv_from_id, size)); |
| 316 | |
| 317 | sl_unlock(&to_vcpu->lock); |
| 318 | |
| 319 | vcpu = to_vcpu - to->vcpus; |
| 320 | } |
| 321 | |
| 322 | /* Return to the primary VM directly or with a switch. */ |
| 323 | primary_ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAKE_UP, to->id, vcpu); |
| 324 | ret = 0; |
| 325 | |
| 326 | out: |
| 327 | /* |
| 328 | * Unlock before routing the return values as switching to the primary |
| 329 | * will acquire more locks and nesting the locks is avoidable. |
| 330 | */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 331 | sl_unlock(&to->lock); |
| 332 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 333 | /* Report errors to the sender. */ |
| 334 | if (ret != 0) { |
| 335 | return ret; |
| 336 | } |
| 337 | |
| 338 | /* If the sender is the primary, return the vcpu to schedule. */ |
| 339 | if (from->id == HF_PRIMARY_VM_ID) { |
| 340 | return vcpu; |
| 341 | } |
| 342 | |
| 343 | /* Switch to primary for scheduling and return success to the sender. */ |
| 344 | *next = api_switch_to_primary(primary_ret, vcpu_state_ready); |
| 345 | return 0; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 349 | * Receives a message from the mailbox. If one isn't available, this function |
| 350 | * can optionally block the caller until one becomes available. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 351 | * |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 352 | * No new messages can be received until the mailbox has been cleared. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 353 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 354 | int64_t api_mailbox_receive(bool block, struct vcpu **next) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 355 | { |
| 356 | struct vcpu *vcpu = cpu()->current; |
| 357 | struct vm *vm = vcpu->vm; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 358 | int64_t ret = 0; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 359 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 360 | /* |
| 361 | * The primary VM will receive messages as a status code from running |
| 362 | * vcpus and must not call this function. |
| 363 | */ |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 364 | if (vm->id == HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 365 | return -1; |
| 366 | } |
| 367 | |
| 368 | sl_lock(&vm->lock); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 369 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 370 | /* Return pending messages without blocking. */ |
| 371 | if (vm->mailbox.state == mailbox_state_received) { |
| 372 | vm->mailbox.state = mailbox_state_read; |
| 373 | block = false; |
| 374 | ret = HF_MAILBOX_RECEIVE_RESPONSE(vm->mailbox.recv_from_id, |
| 375 | vm->mailbox.recv_bytes); |
| 376 | goto out; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 377 | } |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 378 | |
| 379 | /* No pending message so fail if not allowed to block. */ |
| 380 | if (!block) { |
| 381 | ret = -1; |
| 382 | goto out; |
| 383 | } |
| 384 | |
| 385 | sl_lock(&vcpu->lock); |
| 386 | vcpu->state = vcpu_state_blocked_mailbox; |
| 387 | |
| 388 | /* Push vcpu into waiter list. */ |
| 389 | vcpu->mailbox_next = vm->mailbox.recv_waiter; |
| 390 | vm->mailbox.recv_waiter = vcpu; |
| 391 | sl_unlock(&vcpu->lock); |
| 392 | |
| 393 | /* Switch back to primary vm to block. */ |
| 394 | *next = api_wait_for_interrupt(); |
| 395 | |
| 396 | out: |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 397 | sl_unlock(&vm->lock); |
| 398 | |
| 399 | return ret; |
| 400 | } |
| 401 | |
| 402 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 403 | * Clears the caller's mailbox so that a new message can be received. The caller |
| 404 | * must have copied out all data they wish to preserve as new messages will |
| 405 | * overwrite the old and will arrive asynchronously. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 406 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 407 | int64_t api_mailbox_clear(void) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 408 | { |
| 409 | struct vm *vm = cpu()->current->vm; |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 410 | int64_t ret; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 411 | |
| 412 | sl_lock(&vm->lock); |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 413 | if (vm->mailbox.state == mailbox_state_read) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 414 | ret = 0; |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 415 | vm->mailbox.state = mailbox_state_empty; |
| 416 | } else { |
| 417 | ret = -1; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 418 | } |
| 419 | sl_unlock(&vm->lock); |
| 420 | |
| 421 | if (ret == 0) { |
| 422 | /* TODO: Notify waiters, if any. */ |
| 423 | } |
| 424 | |
| 425 | return ret; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 426 | } |