Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
Andrew Walbran | 9553492 | 2019-06-19 11:32:54 +0100 | [diff] [blame] | 19 | #include "hf/spci.h" |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 20 | #include "hf/types.h" |
| 21 | |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 22 | /* Keep macro alignment */ |
| 23 | /* clang-format off */ |
| 24 | |
| 25 | /* TODO: Define constants below according to spec. */ |
| 26 | #define HF_VM_GET_COUNT 0xff01 |
| 27 | #define HF_VCPU_GET_COUNT 0xff02 |
| 28 | #define HF_VM_CONFIGURE 0xff03 |
| 29 | #define HF_MAILBOX_CLEAR 0xff04 |
| 30 | #define HF_MAILBOX_WRITABLE_GET 0xff05 |
| 31 | #define HF_MAILBOX_WAITER_GET 0xff06 |
| 32 | #define HF_INTERRUPT_ENABLE 0xff07 |
| 33 | #define HF_INTERRUPT_GET 0xff08 |
| 34 | #define HF_INTERRUPT_INJECT 0xff09 |
| 35 | #define HF_SHARE_MEMORY 0xff0a |
| 36 | |
| 37 | /* Custom SPCI-like calls returned from SPCI_RUN. */ |
| 38 | #define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff0b |
| 39 | #define HF_SPCI_RUN_WAKE_UP 0xff0c |
| 40 | |
| 41 | /* This matches what Trusty and its ATF module currently use. */ |
| 42 | #define HF_DEBUG_LOG 0xbd000000 |
| 43 | |
| 44 | /* clang-format on */ |
| 45 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 46 | enum hf_vcpu_run_code { |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 47 | /** |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 48 | * The vCPU has been preempted but still has work to do. If the |
| 49 | * scheduling quantum has not expired, the scheduler MUST call |
| 50 | * `hf_vcpu_run` on the vCPU to allow it to continue. |
| 51 | */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 52 | HF_VCPU_RUN_PREEMPTED = 0, |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 53 | |
| 54 | /** |
| 55 | * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a |
| 56 | * scheduling decision to give cycles to those that need them but MUST |
| 57 | * call `hf_vcpu_run` on the vCPU at a later point. |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 58 | */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 59 | HF_VCPU_RUN_YIELD = 1, |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 60 | |
| 61 | /** |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 62 | * The vCPU is blocked waiting for an interrupt. The scheduler MUST take |
| 63 | * it off the run queue and not call `hf_vcpu_run` on the vCPU until it |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 64 | * has injected an interrupt, received `HF_VCPU_RUN_WAKE_UP` for it |
| 65 | * from another vCPU or the timeout provided in |
| 66 | * `hf_vcpu_run_return.sleep` is not `HF_SLEEP_INDEFINITE` and the |
| 67 | * specified duration has expired. |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 68 | */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 69 | HF_VCPU_RUN_WAIT_FOR_INTERRUPT = 2, |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 70 | |
| 71 | /** |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 72 | * The vCPU is blocked waiting for a message. The scheduler MUST take it |
| 73 | * off the run queue and not call `hf_vcpu_run` on the vCPU until it has |
| 74 | * injected an interrupt, sent it a message, or received |
Andrew Walbran | d7a6ea7 | 2019-10-01 15:26:10 +0100 | [diff] [blame] | 75 | * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU, or the timeout |
| 76 | * provided in `hf_vcpu_run_return.sleep` is not `HF_SLEEP_INDEFINITE` |
| 77 | * and the specified duration has expired. |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 78 | */ |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 79 | HF_VCPU_RUN_WAIT_FOR_MESSAGE = 3, |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 80 | |
| 81 | /** |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 82 | * Hafnium would like `hf_vcpu_run` to be called on another vCPU, |
| 83 | * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST either |
| 84 | * wake the vCPU in question up if it is blocked, or preempt and re-run |
| 85 | * it if it is already running somewhere. This gives Hafnium a chance to |
| 86 | * update any CPU state which might have changed. |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 87 | */ |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 88 | HF_VCPU_RUN_WAKE_UP = 4, |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 89 | |
| 90 | /** |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 91 | * A message has been sent by the vCPU. The scheduler MUST run a vCPU |
| 92 | * from the recipient VM and priority SHOULD be given to those vCPUs |
| 93 | * that are waiting for a message. |
Andrew Walbran | 3d72005 | 2018-12-17 18:37:48 +0000 | [diff] [blame] | 94 | */ |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 95 | HF_VCPU_RUN_MESSAGE = 5, |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 96 | |
| 97 | /** |
| 98 | * The vCPU has made the mailbox writable and there are pending waiters. |
| 99 | * The scheduler MUST call hf_mailbox_waiter_get() repeatedly and notify |
| 100 | * all waiters by injecting an HF_MAILBOX_WRITABLE_INTID interrupt. |
| 101 | */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 102 | HF_VCPU_RUN_NOTIFY_WAITERS = 6, |
| 103 | |
| 104 | /** |
| 105 | * The vCPU has aborted triggering the whole VM to abort. The scheduler |
| 106 | * MUST treat this as `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` for this vCPU and |
| 107 | * `HF_VCPU_RUN_WAKE_UP` for all the other vCPUs of the VM. |
| 108 | */ |
| 109 | HF_VCPU_RUN_ABORTED = 7, |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 110 | }; |
| 111 | |
| 112 | struct hf_vcpu_run_return { |
| 113 | enum hf_vcpu_run_code code; |
| 114 | union { |
| 115 | struct { |
Andrew Walbran | 9553492 | 2019-06-19 11:32:54 +0100 | [diff] [blame] | 116 | spci_vm_id_t vm_id; |
Andrew Walbran | b037d5b | 2019-06-25 17:19:41 +0100 | [diff] [blame] | 117 | spci_vcpu_index_t vcpu; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 118 | } wake_up; |
| 119 | struct { |
Andrew Walbran | 9553492 | 2019-06-19 11:32:54 +0100 | [diff] [blame] | 120 | spci_vm_id_t vm_id; |
Andrew Walbran | f1bd632 | 2019-10-03 16:45:11 +0100 | [diff] [blame] | 121 | uint32_t size; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 122 | } message; |
| 123 | struct { |
| 124 | uint64_t ns; |
| 125 | } sleep; |
| 126 | }; |
| 127 | }; |
| 128 | |
Andrew Scull | 6386f25 | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 129 | enum hf_share { |
| 130 | /** |
| 131 | * Relinquish ownership and access to the memory and pass them to the |
| 132 | * recipient. |
| 133 | */ |
| 134 | HF_MEMORY_GIVE, |
| 135 | |
| 136 | /** |
| 137 | * Retain ownership of the memory but relinquish access to the |
| 138 | * recipient. |
| 139 | */ |
| 140 | HF_MEMORY_LEND, |
| 141 | |
| 142 | /** |
| 143 | * Retain ownership and access but additionally allow access to the |
| 144 | * recipient. |
| 145 | */ |
| 146 | HF_MEMORY_SHARE, |
| 147 | }; |
| 148 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 149 | /** |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 150 | * Encode an hf_vcpu_run_return struct in the SPCI ABI. |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 151 | */ |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 152 | static inline struct spci_value hf_vcpu_run_return_encode( |
| 153 | struct hf_vcpu_run_return res, spci_vm_id_t vm_id, |
| 154 | spci_vcpu_index_t vcpu_index) |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 155 | { |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 156 | struct spci_value ret = {0}; |
Andrew Scull | cbefbdb | 2019-01-11 16:36:26 +0000 | [diff] [blame] | 157 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 158 | switch (res.code) { |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 159 | case HF_VCPU_RUN_PREEMPTED: |
| 160 | ret.func = SPCI_INTERRUPT_32; |
| 161 | ret.arg1 = (uint32_t)vm_id << 16 | vcpu_index; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 162 | break; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 163 | case HF_VCPU_RUN_YIELD: |
| 164 | ret.func = SPCI_YIELD_32; |
| 165 | ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 166 | break; |
Andrew Scull | b06d175 | 2019-02-04 10:15:48 +0000 | [diff] [blame] | 167 | case HF_VCPU_RUN_WAIT_FOR_INTERRUPT: |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 168 | ret.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT; |
| 169 | ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id; |
| 170 | if (res.sleep.ns == HF_SLEEP_INDEFINITE) { |
| 171 | ret.arg2 = SPCI_SLEEP_INDEFINITE; |
| 172 | } else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) { |
| 173 | ret.arg2 = 1; |
| 174 | } else { |
| 175 | ret.arg2 = res.sleep.ns; |
| 176 | } |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 177 | break; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 178 | case HF_VCPU_RUN_WAIT_FOR_MESSAGE: |
| 179 | ret.func = SPCI_MSG_WAIT_32; |
| 180 | ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id; |
| 181 | if (res.sleep.ns == HF_SLEEP_INDEFINITE) { |
| 182 | ret.arg2 = SPCI_SLEEP_INDEFINITE; |
| 183 | } else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) { |
| 184 | ret.arg2 = 1; |
| 185 | } else { |
| 186 | ret.arg2 = res.sleep.ns; |
| 187 | } |
| 188 | break; |
| 189 | case HF_VCPU_RUN_WAKE_UP: |
| 190 | ret.func = HF_SPCI_RUN_WAKE_UP; |
| 191 | ret.arg1 = (uint32_t)res.wake_up.vcpu << 16 | res.wake_up.vm_id; |
| 192 | break; |
| 193 | case HF_VCPU_RUN_MESSAGE: |
| 194 | ret.func = SPCI_MSG_SEND_32; |
| 195 | ret.arg1 = (uint32_t)vm_id << 16 | res.message.vm_id; |
| 196 | ret.arg3 = res.message.size; |
| 197 | break; |
| 198 | case HF_VCPU_RUN_NOTIFY_WAITERS: |
| 199 | ret.func = SPCI_RX_RELEASE_32; |
| 200 | break; |
| 201 | case HF_VCPU_RUN_ABORTED: |
| 202 | ret.func = SPCI_ERROR_32; |
| 203 | ret.arg2 = SPCI_ABORTED; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 204 | break; |
| 205 | } |
Andrew Scull | cbefbdb | 2019-01-11 16:36:26 +0000 | [diff] [blame] | 206 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 207 | return ret; |
| 208 | } |
| 209 | |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 210 | static spci_vm_id_t wake_up_get_vm_id(struct spci_value v) |
| 211 | { |
| 212 | return v.arg1 & 0xffff; |
| 213 | } |
| 214 | |
| 215 | static spci_vcpu_index_t wake_up_get_vcpu(struct spci_value v) |
| 216 | { |
| 217 | return (v.arg1 >> 16) & 0xffff; |
| 218 | } |
| 219 | |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 220 | /** |
| 221 | * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI. |
| 222 | */ |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 223 | static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode( |
| 224 | struct spci_value res) |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 225 | { |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 226 | struct hf_vcpu_run_return ret = {.code = HF_VCPU_RUN_PREEMPTED}; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 227 | |
| 228 | /* Some codes include more data. */ |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 229 | switch (res.func) { |
| 230 | case SPCI_INTERRUPT_32: |
| 231 | ret.code = HF_VCPU_RUN_PREEMPTED; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 232 | break; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 233 | case SPCI_YIELD_32: |
| 234 | ret.code = HF_VCPU_RUN_YIELD; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 235 | break; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 236 | case HF_SPCI_RUN_WAIT_FOR_INTERRUPT: |
| 237 | ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT; |
| 238 | if (res.arg2 == SPCI_SLEEP_INDEFINITE) { |
| 239 | ret.sleep.ns = HF_SLEEP_INDEFINITE; |
| 240 | } else { |
| 241 | ret.sleep.ns = res.arg2; |
| 242 | } |
| 243 | break; |
| 244 | case SPCI_MSG_WAIT_32: |
| 245 | ret.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE; |
| 246 | if (res.arg2 == SPCI_SLEEP_INDEFINITE) { |
| 247 | ret.sleep.ns = HF_SLEEP_INDEFINITE; |
| 248 | } else { |
| 249 | ret.sleep.ns = res.arg2; |
| 250 | } |
| 251 | break; |
| 252 | case HF_SPCI_RUN_WAKE_UP: |
| 253 | ret.code = HF_VCPU_RUN_WAKE_UP; |
| 254 | ret.wake_up.vcpu = wake_up_get_vcpu(res); |
| 255 | ret.wake_up.vm_id = wake_up_get_vm_id(res); |
| 256 | break; |
| 257 | case SPCI_MSG_SEND_32: |
| 258 | ret.code = HF_VCPU_RUN_MESSAGE; |
| 259 | ret.message.vm_id = res.arg1 & 0xffff; |
| 260 | ret.message.size = res.arg3; |
| 261 | break; |
| 262 | case SPCI_RX_RELEASE_32: |
| 263 | ret.code = HF_VCPU_RUN_NOTIFY_WAITERS; |
| 264 | break; |
| 265 | case SPCI_ERROR_32: |
| 266 | ret.code = HF_VCPU_RUN_ABORTED; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 267 | break; |
| 268 | default: |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame^] | 269 | ret.code = HF_VCPU_RUN_ABORTED; |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 270 | break; |
| 271 | } |
| 272 | |
| 273 | return ret; |
| 274 | } |