blob: 14c3fc69b8d69e07fda55851ccdc0300d6631191 [file] [log] [blame]
Andrew Scull6d2db332018-10-10 15:28:17 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull6d2db332018-10-10 15:28:17 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
Andrew Walbran95534922019-06-19 11:32:54 +010019#include "hf/spci.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010020#include "hf/types.h"
21
22enum hf_vcpu_run_code {
Andrew Walbran3d720052018-12-17 18:37:48 +000023 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000024 * The vCPU has been preempted but still has work to do. If the
25 * scheduling quantum has not expired, the scheduler MUST call
26 * `hf_vcpu_run` on the vCPU to allow it to continue.
27 */
Andrew Scull9726c252019-01-23 13:44:19 +000028 HF_VCPU_RUN_PREEMPTED = 0,
Andrew Scull33fecd32019-01-08 14:48:27 +000029
30 /**
31 * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a
32 * scheduling decision to give cycles to those that need them but MUST
33 * call `hf_vcpu_run` on the vCPU at a later point.
Andrew Walbran3d720052018-12-17 18:37:48 +000034 */
Andrew Scull9726c252019-01-23 13:44:19 +000035 HF_VCPU_RUN_YIELD = 1,
Andrew Walbran3d720052018-12-17 18:37:48 +000036
37 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000038 * The vCPU is blocked waiting for an interrupt. The scheduler MUST take
39 * it off the run queue and not call `hf_vcpu_run` on the vCPU until it
Andrew Scullb06d1752019-02-04 10:15:48 +000040 * has injected an interrupt, received `HF_VCPU_RUN_WAKE_UP` for it
41 * from another vCPU or the timeout provided in
42 * `hf_vcpu_run_return.sleep` is not `HF_SLEEP_INDEFINITE` and the
43 * specified duration has expired.
Andrew Walbran3d720052018-12-17 18:37:48 +000044 */
Andrew Scull9726c252019-01-23 13:44:19 +000045 HF_VCPU_RUN_WAIT_FOR_INTERRUPT = 2,
Andrew Walbran3d720052018-12-17 18:37:48 +000046
47 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000048 * The vCPU is blocked waiting for a message. The scheduler MUST take it
49 * off the run queue and not call `hf_vcpu_run` on the vCPU until it has
50 * injected an interrupt, sent it a message, or received
Andrew Walbrand7a6ea72019-10-01 15:26:10 +010051 * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU, or the timeout
52 * provided in `hf_vcpu_run_return.sleep` is not `HF_SLEEP_INDEFINITE`
53 * and the specified duration has expired.
Andrew Walbran3d720052018-12-17 18:37:48 +000054 */
Andrew Scullb06d1752019-02-04 10:15:48 +000055 HF_VCPU_RUN_WAIT_FOR_MESSAGE = 3,
Andrew Walbran3d720052018-12-17 18:37:48 +000056
57 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000058 * Hafnium would like `hf_vcpu_run` to be called on another vCPU,
59 * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST either
60 * wake the vCPU in question up if it is blocked, or preempt and re-run
61 * it if it is already running somewhere. This gives Hafnium a chance to
62 * update any CPU state which might have changed.
Andrew Walbran3d720052018-12-17 18:37:48 +000063 */
Andrew Scullb06d1752019-02-04 10:15:48 +000064 HF_VCPU_RUN_WAKE_UP = 4,
Andrew Walbran3d720052018-12-17 18:37:48 +000065
66 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000067 * A message has been sent by the vCPU. The scheduler MUST run a vCPU
68 * from the recipient VM and priority SHOULD be given to those vCPUs
69 * that are waiting for a message.
Andrew Walbran3d720052018-12-17 18:37:48 +000070 */
Andrew Scullb06d1752019-02-04 10:15:48 +000071 HF_VCPU_RUN_MESSAGE = 5,
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000072
73 /**
74 * The vCPU has made the mailbox writable and there are pending waiters.
75 * The scheduler MUST call hf_mailbox_waiter_get() repeatedly and notify
76 * all waiters by injecting an HF_MAILBOX_WRITABLE_INTID interrupt.
77 */
Andrew Scull9726c252019-01-23 13:44:19 +000078 HF_VCPU_RUN_NOTIFY_WAITERS = 6,
79
80 /**
81 * The vCPU has aborted triggering the whole VM to abort. The scheduler
82 * MUST treat this as `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` for this vCPU and
83 * `HF_VCPU_RUN_WAKE_UP` for all the other vCPUs of the VM.
84 */
85 HF_VCPU_RUN_ABORTED = 7,
Andrew Scull6d2db332018-10-10 15:28:17 +010086};
87
88struct hf_vcpu_run_return {
89 enum hf_vcpu_run_code code;
90 union {
91 struct {
Andrew Walbran95534922019-06-19 11:32:54 +010092 spci_vm_id_t vm_id;
Andrew Walbranb037d5b2019-06-25 17:19:41 +010093 spci_vcpu_index_t vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +010094 } wake_up;
95 struct {
Andrew Walbran95534922019-06-19 11:32:54 +010096 spci_vm_id_t vm_id;
Andrew Walbranf1bd6322019-10-03 16:45:11 +010097 uint32_t size;
Andrew Scull6d2db332018-10-10 15:28:17 +010098 } message;
99 struct {
100 uint64_t ns;
101 } sleep;
102 };
103};
104
Andrew Scull6386f252018-12-06 13:29:10 +0000105enum hf_share {
106 /**
107 * Relinquish ownership and access to the memory and pass them to the
108 * recipient.
109 */
110 HF_MEMORY_GIVE,
111
112 /**
113 * Retain ownership of the memory but relinquish access to the
114 * recipient.
115 */
116 HF_MEMORY_LEND,
117
118 /**
119 * Retain ownership and access but additionally allow access to the
120 * recipient.
121 */
122 HF_MEMORY_SHARE,
123};
124
Andrew Scull6d2db332018-10-10 15:28:17 +0100125/**
126 * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
127 */
128static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
129{
130 uint64_t ret = res.code & 0xff;
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000131
Andrew Scull6d2db332018-10-10 15:28:17 +0100132 switch (res.code) {
133 case HF_VCPU_RUN_WAKE_UP:
134 ret |= (uint64_t)res.wake_up.vm_id << 32;
135 ret |= (uint64_t)res.wake_up.vcpu << 16;
136 break;
137 case HF_VCPU_RUN_MESSAGE:
Andrew Walbranf1bd6322019-10-03 16:45:11 +0100138 ret |= (uint64_t)res.message.size << 32;
Andrew Scull80232362019-04-01 12:37:41 +0100139 ret |= res.message.vm_id << 8;
Andrew Scull6d2db332018-10-10 15:28:17 +0100140 break;
Andrew Scullb06d1752019-02-04 10:15:48 +0000141 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
142 case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
Andrew Scull6d2db332018-10-10 15:28:17 +0100143 ret |= res.sleep.ns << 8;
144 break;
145 default:
146 break;
147 }
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000148
Andrew Scull6d2db332018-10-10 15:28:17 +0100149 return ret;
150}
151
152/**
153 * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
154 */
155static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
156{
Wedson Almeida Filhobdcd8362018-12-15 03:26:21 +0000157 struct hf_vcpu_run_return ret = {
158 .code = (enum hf_vcpu_run_code)(res & 0xff),
159 };
Andrew Scull6d2db332018-10-10 15:28:17 +0100160
161 /* Some codes include more data. */
162 switch (ret.code) {
163 case HF_VCPU_RUN_WAKE_UP:
164 ret.wake_up.vm_id = res >> 32;
165 ret.wake_up.vcpu = (res >> 16) & 0xffff;
166 break;
167 case HF_VCPU_RUN_MESSAGE:
Andrew Walbranf1bd6322019-10-03 16:45:11 +0100168 ret.message.size = res >> 32;
169 ret.message.vm_id = (res >> 8) & 0xffff;
Andrew Scull6d2db332018-10-10 15:28:17 +0100170 break;
Andrew Scullb06d1752019-02-04 10:15:48 +0000171 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
172 case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
Andrew Scull6d2db332018-10-10 15:28:17 +0100173 ret.sleep.ns = res >> 8;
174 break;
175 default:
176 break;
177 }
178
179 return ret;
180}