blob: 2e4ec7bb2708a6b1310f2652a81d5bcd33eca6e1 [file] [log] [blame]
Andrew Scull6d2db332018-10-10 15:28:17 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include "hf/types.h"
20
21enum hf_vcpu_run_code {
Andrew Walbran3d720052018-12-17 18:37:48 +000022 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000023 * The vCPU has been preempted but still has work to do. If the
24 * scheduling quantum has not expired, the scheduler MUST call
25 * `hf_vcpu_run` on the vCPU to allow it to continue.
26 */
27 HF_VCPU_RUN_PREEMPTED,
28
29 /**
30 * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a
31 * scheduling decision to give cycles to those that need them but MUST
32 * call `hf_vcpu_run` on the vCPU at a later point.
Andrew Walbran3d720052018-12-17 18:37:48 +000033 */
Andrew Scull6d2db332018-10-10 15:28:17 +010034 HF_VCPU_RUN_YIELD,
Andrew Walbran3d720052018-12-17 18:37:48 +000035
36 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000037 * The vCPU is blocked waiting for an interrupt. The scheduler MUST take
38 * it off the run queue and not call `hf_vcpu_run` on the vCPU until it
39 * has injected an interrupt, sent it a message, or got a
Andrew Walbran3d720052018-12-17 18:37:48 +000040 * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU.
41 */
Andrew Scull6d2db332018-10-10 15:28:17 +010042 HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
Andrew Walbran3d720052018-12-17 18:37:48 +000043
44 /**
45 * The vCPU would like `hf_vcpu_run` to be called on another vCPU,
Andrew Scull33fecd32019-01-08 14:48:27 +000046 * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST
Andrew Walbran3d720052018-12-17 18:37:48 +000047 * either wake the vCPU in question up if it is blocked, or preempt and
48 * re-run it if it is already running somewhere. This gives Hafnium a
49 * chance to update any CPU state which might have changed.
50 */
Andrew Scull6d2db332018-10-10 15:28:17 +010051 HF_VCPU_RUN_WAKE_UP,
Andrew Walbran3d720052018-12-17 18:37:48 +000052
53 /**
54 * A new message is available for the scheduler VM, as specified by
55 * `hf_vcpu_run_return.message`.
56 */
Andrew Scull6d2db332018-10-10 15:28:17 +010057 HF_VCPU_RUN_MESSAGE,
Andrew Walbran3d720052018-12-17 18:37:48 +000058
59 /**
60 * Like `HF_VCPU_RUN_WAIT_FOR_INTERRUPT`, but for a limited amount of
61 * time, specified by `hf_vcpu_run_return.sleep`. After at least that
62 * amount of time has passed, or any of the events listed for
Andrew Scull33fecd32019-01-08 14:48:27 +000063 * `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` occur, the scheduler MUST call
Andrew Walbran3d720052018-12-17 18:37:48 +000064 * `hf_vcpu_run` on it again.
65 */
Andrew Scull6d2db332018-10-10 15:28:17 +010066 HF_VCPU_RUN_SLEEP,
67};
68
69struct hf_vcpu_run_return {
70 enum hf_vcpu_run_code code;
71 union {
72 struct {
73 uint32_t vm_id;
74 uint16_t vcpu;
75 } wake_up;
76 struct {
77 uint32_t size;
78 } message;
79 struct {
80 uint64_t ns;
81 } sleep;
82 };
83};
84
85struct hf_mailbox_receive_return {
86 uint32_t vm_id;
87 uint32_t size;
88};
89
90/**
91 * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
92 */
93static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
94{
95 uint64_t ret = res.code & 0xff;
96 switch (res.code) {
97 case HF_VCPU_RUN_WAKE_UP:
98 ret |= (uint64_t)res.wake_up.vm_id << 32;
99 ret |= (uint64_t)res.wake_up.vcpu << 16;
100 break;
101 case HF_VCPU_RUN_MESSAGE:
102 ret |= (uint64_t)res.message.size << 32;
103 break;
104 case HF_VCPU_RUN_SLEEP:
105 ret |= res.sleep.ns << 8;
106 break;
107 default:
108 break;
109 }
110 return ret;
111}
112
113/**
114 * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
115 */
116static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
117{
Wedson Almeida Filhobdcd8362018-12-15 03:26:21 +0000118 struct hf_vcpu_run_return ret = {
119 .code = (enum hf_vcpu_run_code)(res & 0xff),
120 };
Andrew Scull6d2db332018-10-10 15:28:17 +0100121
122 /* Some codes include more data. */
123 switch (ret.code) {
124 case HF_VCPU_RUN_WAKE_UP:
125 ret.wake_up.vm_id = res >> 32;
126 ret.wake_up.vcpu = (res >> 16) & 0xffff;
127 break;
128 case HF_VCPU_RUN_MESSAGE:
129 ret.message.size = res >> 32;
130 break;
131 case HF_VCPU_RUN_SLEEP:
132 ret.sleep.ns = res >> 8;
133 break;
134 default:
135 break;
136 }
137
138 return ret;
139}
140
141/**
142 * Encode an hf_mailbox_receive_return struct in the 64-bit packing ABI.
143 */
144static inline uint64_t hf_mailbox_receive_return_encode(
145 struct hf_mailbox_receive_return res)
146{
147 return res.vm_id | ((uint64_t)res.size << 32);
148}
149
150/**
151 * Decode an hf_mailbox_receive_return struct from the 64-bit packing ABI.
152 */
153static inline struct hf_mailbox_receive_return hf_mailbox_receive_return_decode(
154 uint64_t res)
155{
156 return (struct hf_mailbox_receive_return){
157 .vm_id = (uint32_t)(res & 0xffffffff),
158 .size = (uint32_t)(res >> 32),
159 };
160}