blob: 39d6fb6831d8ecd0adf93575be6ff1fde979ac7a [file] [log] [blame]
Andrew Scull6d2db332018-10-10 15:28:17 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include "hf/types.h"
20
21enum hf_vcpu_run_code {
Andrew Walbran3d720052018-12-17 18:37:48 +000022 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000023 * The vCPU has been preempted but still has work to do. If the
24 * scheduling quantum has not expired, the scheduler MUST call
25 * `hf_vcpu_run` on the vCPU to allow it to continue.
26 */
Andrew Scull9726c252019-01-23 13:44:19 +000027 HF_VCPU_RUN_PREEMPTED = 0,
Andrew Scull33fecd32019-01-08 14:48:27 +000028
29 /**
30 * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a
31 * scheduling decision to give cycles to those that need them but MUST
32 * call `hf_vcpu_run` on the vCPU at a later point.
Andrew Walbran3d720052018-12-17 18:37:48 +000033 */
Andrew Scull9726c252019-01-23 13:44:19 +000034 HF_VCPU_RUN_YIELD = 1,
Andrew Walbran3d720052018-12-17 18:37:48 +000035
36 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000037 * The vCPU is blocked waiting for an interrupt. The scheduler MUST take
38 * it off the run queue and not call `hf_vcpu_run` on the vCPU until it
39 * has injected an interrupt, sent it a message, or got a
Andrew Walbran3d720052018-12-17 18:37:48 +000040 * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU.
41 */
Andrew Scull9726c252019-01-23 13:44:19 +000042 HF_VCPU_RUN_WAIT_FOR_INTERRUPT = 2,
Andrew Walbran3d720052018-12-17 18:37:48 +000043
44 /**
45 * The vCPU would like `hf_vcpu_run` to be called on another vCPU,
Andrew Scull33fecd32019-01-08 14:48:27 +000046 * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST
Andrew Walbran3d720052018-12-17 18:37:48 +000047 * either wake the vCPU in question up if it is blocked, or preempt and
48 * re-run it if it is already running somewhere. This gives Hafnium a
49 * chance to update any CPU state which might have changed.
50 */
Andrew Scull9726c252019-01-23 13:44:19 +000051 HF_VCPU_RUN_WAKE_UP = 3,
Andrew Walbran3d720052018-12-17 18:37:48 +000052
53 /**
54 * A new message is available for the scheduler VM, as specified by
55 * `hf_vcpu_run_return.message`.
56 */
Andrew Scull9726c252019-01-23 13:44:19 +000057 HF_VCPU_RUN_MESSAGE = 4,
Andrew Walbran3d720052018-12-17 18:37:48 +000058
59 /**
60 * Like `HF_VCPU_RUN_WAIT_FOR_INTERRUPT`, but for a limited amount of
61 * time, specified by `hf_vcpu_run_return.sleep`. After at least that
62 * amount of time has passed, or any of the events listed for
Andrew Scull33fecd32019-01-08 14:48:27 +000063 * `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` occur, the scheduler MUST call
Andrew Walbran3d720052018-12-17 18:37:48 +000064 * `hf_vcpu_run` on it again.
65 */
Andrew Scull9726c252019-01-23 13:44:19 +000066 HF_VCPU_RUN_SLEEP = 5,
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000067
68 /**
69 * The vCPU has made the mailbox writable and there are pending waiters.
70 * The scheduler MUST call hf_mailbox_waiter_get() repeatedly and notify
71 * all waiters by injecting an HF_MAILBOX_WRITABLE_INTID interrupt.
72 */
Andrew Scull9726c252019-01-23 13:44:19 +000073 HF_VCPU_RUN_NOTIFY_WAITERS = 6,
74
75 /**
76 * The vCPU has aborted triggering the whole VM to abort. The scheduler
77 * MUST treat this as `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` for this vCPU and
78 * `HF_VCPU_RUN_WAKE_UP` for all the other vCPUs of the VM.
79 */
80 HF_VCPU_RUN_ABORTED = 7,
Andrew Scull6d2db332018-10-10 15:28:17 +010081};
82
83struct hf_vcpu_run_return {
84 enum hf_vcpu_run_code code;
85 union {
86 struct {
87 uint32_t vm_id;
88 uint16_t vcpu;
89 } wake_up;
90 struct {
91 uint32_t size;
92 } message;
93 struct {
94 uint64_t ns;
95 } sleep;
96 };
97};
98
99struct hf_mailbox_receive_return {
100 uint32_t vm_id;
101 uint32_t size;
102};
103
Andrew Scull6386f252018-12-06 13:29:10 +0000104enum hf_share {
105 /**
106 * Relinquish ownership and access to the memory and pass them to the
107 * recipient.
108 */
109 HF_MEMORY_GIVE,
110
111 /**
112 * Retain ownership of the memory but relinquish access to the
113 * recipient.
114 */
115 HF_MEMORY_LEND,
116
117 /**
118 * Retain ownership and access but additionally allow access to the
119 * recipient.
120 */
121 HF_MEMORY_SHARE,
122};
123
Andrew Scull6d2db332018-10-10 15:28:17 +0100124/**
125 * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
126 */
127static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
128{
129 uint64_t ret = res.code & 0xff;
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000130
Andrew Scull6d2db332018-10-10 15:28:17 +0100131 switch (res.code) {
132 case HF_VCPU_RUN_WAKE_UP:
133 ret |= (uint64_t)res.wake_up.vm_id << 32;
134 ret |= (uint64_t)res.wake_up.vcpu << 16;
135 break;
136 case HF_VCPU_RUN_MESSAGE:
137 ret |= (uint64_t)res.message.size << 32;
138 break;
139 case HF_VCPU_RUN_SLEEP:
140 ret |= res.sleep.ns << 8;
141 break;
142 default:
143 break;
144 }
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000145
Andrew Scull6d2db332018-10-10 15:28:17 +0100146 return ret;
147}
148
149/**
150 * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
151 */
152static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
153{
Wedson Almeida Filhobdcd8362018-12-15 03:26:21 +0000154 struct hf_vcpu_run_return ret = {
155 .code = (enum hf_vcpu_run_code)(res & 0xff),
156 };
Andrew Scull6d2db332018-10-10 15:28:17 +0100157
158 /* Some codes include more data. */
159 switch (ret.code) {
160 case HF_VCPU_RUN_WAKE_UP:
161 ret.wake_up.vm_id = res >> 32;
162 ret.wake_up.vcpu = (res >> 16) & 0xffff;
163 break;
164 case HF_VCPU_RUN_MESSAGE:
165 ret.message.size = res >> 32;
166 break;
167 case HF_VCPU_RUN_SLEEP:
168 ret.sleep.ns = res >> 8;
169 break;
170 default:
171 break;
172 }
173
174 return ret;
175}
176
177/**
178 * Encode an hf_mailbox_receive_return struct in the 64-bit packing ABI.
179 */
180static inline uint64_t hf_mailbox_receive_return_encode(
181 struct hf_mailbox_receive_return res)
182{
183 return res.vm_id | ((uint64_t)res.size << 32);
184}
185
186/**
187 * Decode an hf_mailbox_receive_return struct from the 64-bit packing ABI.
188 */
189static inline struct hf_mailbox_receive_return hf_mailbox_receive_return_decode(
190 uint64_t res)
191{
192 return (struct hf_mailbox_receive_return){
193 .vm_id = (uint32_t)(res & 0xffffffff),
194 .size = (uint32_t)(res >> 32),
195 };
196}