blob: 0a1e111a29cbcb015131eba872dc9aa165041ad4 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#include "hf/vcpu.h"
10
11#include "hf/check.h"
12#include "hf/dlog.h"
13#include "hf/std.h"
14#include "hf/vm.h"
15
16/**
17 * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
18 */
19struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
20{
21 struct vcpu_locked locked = {
22 .vcpu = vcpu,
23 };
24
25 sl_lock(&vcpu->lock);
26
27 return locked;
28}
29
30/**
Olivier Deprez0b6f10a2020-08-05 18:21:33 +020031 * Locks two vCPUs ensuring that the locking order is according to the locks'
32 * addresses.
33 */
34struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2)
35{
36 struct two_vcpu_locked dual_lock;
37
38 sl_lock_both(&vcpu1->lock, &vcpu2->lock);
39 dual_lock.vcpu1.vcpu = vcpu1;
40 dual_lock.vcpu2.vcpu = vcpu2;
41
42 return dual_lock;
43}
44
45/**
Fuad Tabba5c738432019-12-02 11:02:42 +000046 * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
47 * reflect the fact that the vCPU is no longer locked.
48 */
49void vcpu_unlock(struct vcpu_locked *locked)
50{
51 sl_unlock(&locked->vcpu->lock);
52 locked->vcpu = NULL;
53}
54
55void vcpu_init(struct vcpu *vcpu, struct vm *vm)
56{
57 memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
58 sl_init(&vcpu->lock);
59 vcpu->regs_available = true;
60 vcpu->vm = vm;
61 vcpu->state = VCPU_STATE_OFF;
62}
63
64/**
65 * Initialise the registers for the given vCPU and set the state to
66 * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this.
67 */
68void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
69{
70 arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
71 vcpu.vcpu->state = VCPU_STATE_READY;
72}
73
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010074ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
Fuad Tabba5c738432019-12-02 11:02:42 +000075{
76 size_t index = vcpu - vcpu->vm->vcpus;
77
78 CHECK(index < UINT16_MAX);
79 return index;
80}
81
82/**
83 * Check whether the given vcpu_state is an off state, for the purpose of
84 * turning vCPUs on and off. Note that aborted still counts as on in this
85 * context.
86 */
87bool vcpu_is_off(struct vcpu_locked vcpu)
88{
89 switch (vcpu.vcpu->state) {
90 case VCPU_STATE_OFF:
91 return true;
92 case VCPU_STATE_READY:
93 case VCPU_STATE_RUNNING:
94 case VCPU_STATE_BLOCKED_MAILBOX:
95 case VCPU_STATE_BLOCKED_INTERRUPT:
96 case VCPU_STATE_ABORTED:
97 /*
98 * Aborted still counts as ON for the purposes of PSCI,
99 * because according to the PSCI specification (section
100 * 5.7.1) a core is only considered to be off if it has
101 * been turned off with a CPU_OFF call or hasn't yet
102 * been turned on with a CPU_ON call.
103 */
104 return false;
105 }
106}
107
108/**
109 * Starts a vCPU of a secondary VM.
110 *
111 * Returns true if the secondary was reset and started, or false if it was
112 * already on and so nothing was done.
113 */
114bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
115 uintreg_t arg)
116{
117 struct vcpu_locked vcpu_locked;
118 struct vm *vm = vcpu->vm;
119 bool vcpu_was_off;
120
121 CHECK(vm->id != HF_PRIMARY_VM_ID);
122
123 vcpu_locked = vcpu_lock(vcpu);
124 vcpu_was_off = vcpu_is_off(vcpu_locked);
125 if (vcpu_was_off) {
126 /*
127 * Set vCPU registers to a clean state ready for boot. As this
128 * is a secondary which can migrate between pCPUs, the ID of the
129 * vCPU is defined as the index and does not match the ID of the
130 * pCPU it is running on.
131 */
132 arch_regs_reset(vcpu);
133 vcpu_on(vcpu_locked, entry, arg);
134 }
135 vcpu_unlock(&vcpu_locked);
136
137 return vcpu_was_off;
138}
139
140/**
141 * Handles a page fault. It does so by determining if it's a legitimate or
142 * spurious fault, and recovering from the latter.
143 *
Fuad Tabbaed294af2019-12-20 10:43:01 +0000144 * Returns true if the caller should resume the current vCPU, or false if its VM
Fuad Tabba5c738432019-12-02 11:02:42 +0000145 * should be aborted.
146 */
147bool vcpu_handle_page_fault(const struct vcpu *current,
148 struct vcpu_fault_info *f)
149{
150 struct vm *vm = current->vm;
151 uint32_t mode;
152 uint32_t mask = f->mode | MM_MODE_INVALID;
153 bool resume;
154
155 sl_lock(&vm->lock);
156
157 /*
158 * Check if this is a legitimate fault, i.e., if the page table doesn't
159 * allow the access attempted by the VM.
160 *
161 * Otherwise, this is a spurious fault, likely because another CPU is
162 * updating the page table. It is responsible for issuing global TLB
163 * invalidations while holding the VM lock, so we don't need to do
164 * anything else to recover from it. (Acquiring/releasing the lock
165 * ensured that the invalidations have completed.)
166 */
167 resume = mm_vm_get_mode(&vm->ptable, f->ipaddr, ipa_add(f->ipaddr, 1),
168 &mode) &&
169 (mode & mask) == f->mode;
170
171 sl_unlock(&vm->lock);
172
173 if (!resume) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000174 dlog_warning(
175 "Stage-2 page fault: pc=%#x, vmid=%u, vcpu=%u, "
176 "vaddr=%#x, ipaddr=%#x, mode=%#x\n",
177 f->pc, vm->id, vcpu_index(current), f->vaddr, f->ipaddr,
178 f->mode);
Fuad Tabba5c738432019-12-02 11:02:42 +0000179 }
180
181 return resume;
182}