Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | fbc938a | 2018-08-20 14:09:28 +0100 | [diff] [blame] | 17 | #pragma once |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 19 | #include "hf/abi.h" |
| 20 | #include "hf/types.h" |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 21 | |
| 22 | /* Keep macro alignment */ |
| 23 | /* clang-format off */ |
| 24 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 25 | /* TODO: Define constants below according to spec. */ |
| 26 | #define HF_VCPU_RUN 0xff00 |
| 27 | #define HF_VM_GET_COUNT 0xff01 |
| 28 | #define HF_VCPU_GET_COUNT 0xff02 |
| 29 | #define HF_VM_CONFIGURE 0xff03 |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 30 | #define HF_MAILBOX_SEND 0xff04 |
| 31 | #define HF_MAILBOX_RECEIVE 0xff05 |
| 32 | #define HF_MAILBOX_CLEAR 0xff06 |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 33 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 34 | /* The amount of data that can be sent to a mailbox. */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 35 | #define HF_MAILBOX_SIZE 4096 |
| 36 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 37 | /* clang-format on */ |
| 38 | |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 39 | /** |
| 40 | * This function must be implemented to trigger the architecture specific |
| 41 | * mechanism to call to the hypervisor. |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 42 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 43 | int64_t hf_call(size_t arg0, size_t arg1, size_t arg2, size_t arg3); |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 44 | |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 45 | /** |
| 46 | * Runs the given vcpu of the given vm. |
| 47 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 48 | static inline struct hf_vcpu_run_return hf_vcpu_run(uint32_t vm_id, |
| 49 | uint32_t vcpu_idx) |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 50 | { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 51 | return hf_vcpu_run_return_decode( |
| 52 | hf_call(HF_VCPU_RUN, vm_id, vcpu_idx, 0)); |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | /** |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 56 | * Returns the number of secondary VMs. |
| 57 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 58 | static inline int64_t hf_vm_get_count(void) |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 59 | { |
| 60 | return hf_call(HF_VM_GET_COUNT, 0, 0, 0); |
| 61 | } |
| 62 | |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 63 | /** |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 64 | * Returns the number of VCPUs configured in the given secondary VM. |
| 65 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 66 | static inline int64_t hf_vcpu_get_count(uint32_t vm_id) |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 67 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 68 | return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0); |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 69 | } |
| 70 | |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 71 | /** |
| 72 | * Configures the pages to send/receive data through. The pages must not be |
| 73 | * shared. |
| 74 | */ |
Andrew Scull | c0e569a | 2018-10-02 18:05:21 +0100 | [diff] [blame] | 75 | static inline int64_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv) |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 76 | { |
| 77 | return hf_call(HF_VM_CONFIGURE, send, recv, 0); |
| 78 | } |
| 79 | |
| 80 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 81 | * Copies data from the sender's send buffer to the recipient's receive buffer. |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 82 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 83 | static inline int64_t hf_mailbox_send(uint32_t vm_id, size_t size) |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 84 | { |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 85 | return hf_call(HF_MAILBOX_SEND, vm_id, size, 0); |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 89 | * Called by secondary VMs to receive a message. The call can optionally block |
| 90 | * until a message is received. |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 91 | * |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 92 | * If no message was received, the VM ID will be HF_INVALID_VM_ID. |
| 93 | * |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 94 | * The mailbox must be cleared before a new message can be received. |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 95 | */ |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 96 | static inline struct hf_mailbox_receive_return hf_mailbox_receive(bool block) |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 97 | { |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 98 | return hf_mailbox_receive_return_decode( |
| 99 | hf_call(HF_MAILBOX_RECEIVE, block, 0, 0)); |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | /** |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 103 | * Clears the mailbox so a new message can be received. |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 104 | */ |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 105 | static inline int64_t hf_mailbox_clear(void) |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 106 | { |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 107 | return hf_call(HF_MAILBOX_CLEAR, 0, 0, 0); |
Andrew Scull | 5ac05f0 | 2018-08-10 17:23:22 +0100 | [diff] [blame] | 108 | } |