blob: a625a8effc598a86794195fbdcb7fbc5980d9cf0 [file] [log] [blame]
Andrew Scull6d2db332018-10-10 15:28:17 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull6d2db332018-10-10 15:28:17 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
Andrew Walbran95534922019-06-19 11:32:54 +010019#include "hf/spci.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010020#include "hf/types.h"
21
22enum hf_vcpu_run_code {
Andrew Walbran3d720052018-12-17 18:37:48 +000023 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000024 * The vCPU has been preempted but still has work to do. If the
25 * scheduling quantum has not expired, the scheduler MUST call
26 * `hf_vcpu_run` on the vCPU to allow it to continue.
27 */
Andrew Scull9726c252019-01-23 13:44:19 +000028 HF_VCPU_RUN_PREEMPTED = 0,
Andrew Scull33fecd32019-01-08 14:48:27 +000029
30 /**
31 * The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a
32 * scheduling decision to give cycles to those that need them but MUST
33 * call `hf_vcpu_run` on the vCPU at a later point.
Andrew Walbran3d720052018-12-17 18:37:48 +000034 */
Andrew Scull9726c252019-01-23 13:44:19 +000035 HF_VCPU_RUN_YIELD = 1,
Andrew Walbran3d720052018-12-17 18:37:48 +000036
37 /**
Andrew Scull33fecd32019-01-08 14:48:27 +000038 * The vCPU is blocked waiting for an interrupt. The scheduler MUST take
39 * it off the run queue and not call `hf_vcpu_run` on the vCPU until it
Andrew Scullb06d1752019-02-04 10:15:48 +000040 * has injected an interrupt, received `HF_VCPU_RUN_WAKE_UP` for it
41 * from another vCPU or the timeout provided in
42 * `hf_vcpu_run_return.sleep` is not `HF_SLEEP_INDEFINITE` and the
43 * specified duration has expired.
Andrew Walbran3d720052018-12-17 18:37:48 +000044 */
Andrew Scull9726c252019-01-23 13:44:19 +000045 HF_VCPU_RUN_WAIT_FOR_INTERRUPT = 2,
Andrew Walbran3d720052018-12-17 18:37:48 +000046
47 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000048 * The vCPU is blocked waiting for a message. The scheduler MUST take it
49 * off the run queue and not call `hf_vcpu_run` on the vCPU until it has
50 * injected an interrupt, sent it a message, or received
51 * `HF_VCPU_RUN_WAKE_UP` for it from another vCPU from another vCPU or
52 * the timeout provided in `hf_vcpu_run_return.sleep` is not
53 * `HF_SLEEP_INDEFINITE` and the specified duration has expired.
Andrew Walbran3d720052018-12-17 18:37:48 +000054 */
Andrew Scullb06d1752019-02-04 10:15:48 +000055 HF_VCPU_RUN_WAIT_FOR_MESSAGE = 3,
Andrew Walbran3d720052018-12-17 18:37:48 +000056
57 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000058 * Hafnium would like `hf_vcpu_run` to be called on another vCPU,
59 * specified by `hf_vcpu_run_return.wake_up`. The scheduler MUST either
60 * wake the vCPU in question up if it is blocked, or preempt and re-run
61 * it if it is already running somewhere. This gives Hafnium a chance to
62 * update any CPU state which might have changed.
Andrew Walbran3d720052018-12-17 18:37:48 +000063 */
Andrew Scullb06d1752019-02-04 10:15:48 +000064 HF_VCPU_RUN_WAKE_UP = 4,
Andrew Walbran3d720052018-12-17 18:37:48 +000065
66 /**
Andrew Scullb06d1752019-02-04 10:15:48 +000067 * A message has been sent by the vCPU. The scheduler MUST run a vCPU
68 * from the recipient VM and priority SHOULD be given to those vCPUs
69 * that are waiting for a message.
Andrew Walbran3d720052018-12-17 18:37:48 +000070 */
Andrew Scullb06d1752019-02-04 10:15:48 +000071 HF_VCPU_RUN_MESSAGE = 5,
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000072
73 /**
74 * The vCPU has made the mailbox writable and there are pending waiters.
75 * The scheduler MUST call hf_mailbox_waiter_get() repeatedly and notify
76 * all waiters by injecting an HF_MAILBOX_WRITABLE_INTID interrupt.
77 */
Andrew Scull9726c252019-01-23 13:44:19 +000078 HF_VCPU_RUN_NOTIFY_WAITERS = 6,
79
80 /**
81 * The vCPU has aborted triggering the whole VM to abort. The scheduler
82 * MUST treat this as `HF_VCPU_RUN_WAIT_FOR_INTERRUPT` for this vCPU and
83 * `HF_VCPU_RUN_WAKE_UP` for all the other vCPUs of the VM.
84 */
85 HF_VCPU_RUN_ABORTED = 7,
Andrew Scull6d2db332018-10-10 15:28:17 +010086};
87
88struct hf_vcpu_run_return {
89 enum hf_vcpu_run_code code;
90 union {
91 struct {
Andrew Walbran95534922019-06-19 11:32:54 +010092 spci_vm_id_t vm_id;
Andrew Scull6d2db332018-10-10 15:28:17 +010093 uint16_t vcpu;
94 } wake_up;
95 struct {
Andrew Walbran95534922019-06-19 11:32:54 +010096 spci_vm_id_t vm_id;
Andrew Scull6d2db332018-10-10 15:28:17 +010097 } message;
98 struct {
99 uint64_t ns;
100 } sleep;
101 };
102};
103
Andrew Scull6386f252018-12-06 13:29:10 +0000104enum hf_share {
105 /**
106 * Relinquish ownership and access to the memory and pass them to the
107 * recipient.
108 */
109 HF_MEMORY_GIVE,
110
111 /**
112 * Retain ownership of the memory but relinquish access to the
113 * recipient.
114 */
115 HF_MEMORY_LEND,
116
117 /**
118 * Retain ownership and access but additionally allow access to the
119 * recipient.
120 */
121 HF_MEMORY_SHARE,
122};
123
Andrew Scull6d2db332018-10-10 15:28:17 +0100124/**
125 * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
126 */
127static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
128{
129 uint64_t ret = res.code & 0xff;
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000130
Andrew Scull6d2db332018-10-10 15:28:17 +0100131 switch (res.code) {
132 case HF_VCPU_RUN_WAKE_UP:
133 ret |= (uint64_t)res.wake_up.vm_id << 32;
134 ret |= (uint64_t)res.wake_up.vcpu << 16;
135 break;
136 case HF_VCPU_RUN_MESSAGE:
Andrew Scull80232362019-04-01 12:37:41 +0100137 ret |= res.message.vm_id << 8;
Andrew Scull6d2db332018-10-10 15:28:17 +0100138 break;
Andrew Scullb06d1752019-02-04 10:15:48 +0000139 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
140 case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
Andrew Scull6d2db332018-10-10 15:28:17 +0100141 ret |= res.sleep.ns << 8;
142 break;
143 default:
144 break;
145 }
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000146
Andrew Scull6d2db332018-10-10 15:28:17 +0100147 return ret;
148}
149
150/**
151 * Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
152 */
153static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
154{
Wedson Almeida Filhobdcd8362018-12-15 03:26:21 +0000155 struct hf_vcpu_run_return ret = {
156 .code = (enum hf_vcpu_run_code)(res & 0xff),
157 };
Andrew Scull6d2db332018-10-10 15:28:17 +0100158
159 /* Some codes include more data. */
160 switch (ret.code) {
161 case HF_VCPU_RUN_WAKE_UP:
162 ret.wake_up.vm_id = res >> 32;
163 ret.wake_up.vcpu = (res >> 16) & 0xffff;
164 break;
165 case HF_VCPU_RUN_MESSAGE:
Andrew Scull80232362019-04-01 12:37:41 +0100166 ret.message.vm_id = res >> 8;
Andrew Scull6d2db332018-10-10 15:28:17 +0100167 break;
Andrew Scullb06d1752019-02-04 10:15:48 +0000168 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
169 case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
Andrew Scull6d2db332018-10-10 15:28:17 +0100170 ret.sleep.ns = res >> 8;
171 break;
172 default:
173 break;
174 }
175
176 return ret;
177}