blob: 6f43360e3e4321bb357eef40585c9b91ae717dd2 [file] [log] [blame]
Andrew Scull6d2db332018-10-10 15:28:17 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include "hf/types.h"
20
21enum hf_vcpu_run_code {
Andrew Walbran3d720052018-12-17 18:37:48 +000022 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000023 * The vCPU has been preempted but still has work to do. If the
24 * scheduling quantum has not expired, the scheduler MUST call
25 * `hf_vcpu_run` on the vCPU to allow it to continue.
26 */
27 HF_VCPU_RUN_PREEMPTED,
28
29 /**
30 * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a
31 * scheduling decision to give cycles to those that need them but MUST
32 * call `hf_vcpu_run` on the vCPU at a later point.
Andrew Walbran3d720052018-12-17 18:37:48 +000033 */
Andrew Scull6d2db332018-10-10 15:28:17 +010034 HF_VCPU_RUN_YIELD,
Andrew Walbran3d720052018-12-17 18:37:48 +000035
36 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000037 * The vCPU is blocked waiting for an interrupt. The scheduler MUST take
38 * it off the run queue and not call `hf_vcpu_run` on the vCPU until it
39 * has injected an interrupt, sent it a message, or got a
Andrew Walbran3d720052018-12-17 18:37:48 +000040 * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU.
41 */
Andrew Scull6d2db332018-10-10 15:28:17 +010042 HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
Andrew Walbran3d720052018-12-17 18:37:48 +000043
44 /**
45 * The vCPU would like `hf_vcpu_run` to be called on another vCPU,
Andrew Scull33fecd32019-01-08 14:48:27 +000046 * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST
Andrew Walbran3d720052018-12-17 18:37:48 +000047 * either wake the vCPU in question up if it is blocked, or preempt and
48 * re-run it if it is already running somewhere. This gives Hafnium a
49 * chance to update any CPU state which might have changed.
50 */
Andrew Scull6d2db332018-10-10 15:28:17 +010051 HF_VCPU_RUN_WAKE_UP,
Andrew Walbran3d720052018-12-17 18:37:48 +000052
53 /**
54 * A new message is available for the scheduler VM, as specified by
55 * `hf_vcpu_run_return.message`.
56 */
Andrew Scull6d2db332018-10-10 15:28:17 +010057 HF_VCPU_RUN_MESSAGE,
Andrew Walbran3d720052018-12-17 18:37:48 +000058
59 /**
60 * Like `HF_VCPU_RUN_WAIT_FOR_INTERRUPT`, but for a limited amount of
61 * time, specified by `hf_vcpu_run_return.sleep`. After at least that
62 * amount of time has passed, or any of the events listed for
Andrew Scull33fecd32019-01-08 14:48:27 +000063 * `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` occur, the scheduler MUST call
Andrew Walbran3d720052018-12-17 18:37:48 +000064 * `hf_vcpu_run` on it again.
65 */
Andrew Scull6d2db332018-10-10 15:28:17 +010066 HF_VCPU_RUN_SLEEP,
67};
68
69struct hf_vcpu_run_return {
70 enum hf_vcpu_run_code code;
71 union {
72 struct {
73 uint32_t vm_id;
74 uint16_t vcpu;
75 } wake_up;
76 struct {
77 uint32_t size;
78 } message;
79 struct {
80 uint64_t ns;
81 } sleep;
82 };
83};
84
85struct hf_mailbox_receive_return {
86 uint32_t vm_id;
87 uint32_t size;
88};
89
90/**
91 * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
92 */
93static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
94{
95 uint64_t ret = res.code & 0xff;
Andrew Scullcbefbdb2019-01-11 16:36:26 +000096
Andrew Scull6d2db332018-10-10 15:28:17 +010097 switch (res.code) {
98 case HF_VCPU_RUN_WAKE_UP:
99 ret |= (uint64_t)res.wake_up.vm_id << 32;
100 ret |= (uint64_t)res.wake_up.vcpu << 16;
101 break;
102 case HF_VCPU_RUN_MESSAGE:
103 ret |= (uint64_t)res.message.size << 32;
104 break;
105 case HF_VCPU_RUN_SLEEP:
106 ret |= res.sleep.ns << 8;
107 break;
108 default:
109 break;
110 }
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000111
Andrew Scull6d2db332018-10-10 15:28:17 +0100112 return ret;
113}
114
115/**
116 * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
117 */
118static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
119{
Wedson Almeida Filhobdcd8362018-12-15 03:26:21 +0000120 struct hf_vcpu_run_return ret = {
121 .code = (enum hf_vcpu_run_code)(res & 0xff),
122 };
Andrew Scull6d2db332018-10-10 15:28:17 +0100123
124 /* Some codes include more data. */
125 switch (ret.code) {
126 case HF_VCPU_RUN_WAKE_UP:
127 ret.wake_up.vm_id = res >> 32;
128 ret.wake_up.vcpu = (res >> 16) & 0xffff;
129 break;
130 case HF_VCPU_RUN_MESSAGE:
131 ret.message.size = res >> 32;
132 break;
133 case HF_VCPU_RUN_SLEEP:
134 ret.sleep.ns = res >> 8;
135 break;
136 default:
137 break;
138 }
139
140 return ret;
141}
142
143/**
144 * Encode an hf_mailbox_receive_return struct in the 64-bit packing ABI.
145 */
146static inline uint64_t hf_mailbox_receive_return_encode(
147 struct hf_mailbox_receive_return res)
148{
149 return res.vm_id | ((uint64_t)res.size << 32);
150}
151
152/**
153 * Decode an hf_mailbox_receive_return struct from the 64-bit packing ABI.
154 */
155static inline struct hf_mailbox_receive_return hf_mailbox_receive_return_decode(
156 uint64_t res)
157{
158 return (struct hf_mailbox_receive_return){
159 .vm_id = (uint32_t)(res & 0xffffffff),
160 .size = (uint32_t)(res >> 32),
161 };
162}