blob: 7f1746f1b74040e5d798aa8e9e68e1cab254642b [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#include "hf/vcpu.h"
10
11#include "hf/check.h"
12#include "hf/dlog.h"
13#include "hf/std.h"
14#include "hf/vm.h"
15
16/**
17 * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
18 */
19struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
20{
21 struct vcpu_locked locked = {
22 .vcpu = vcpu,
23 };
24
25 sl_lock(&vcpu->lock);
26
27 return locked;
28}
29
30/**
Olivier Deprez0b6f10a2020-08-05 18:21:33 +020031 * Locks two vCPUs ensuring that the locking order is according to the locks'
32 * addresses.
33 */
34struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2)
35{
36 struct two_vcpu_locked dual_lock;
37
38 sl_lock_both(&vcpu1->lock, &vcpu2->lock);
39 dual_lock.vcpu1.vcpu = vcpu1;
40 dual_lock.vcpu2.vcpu = vcpu2;
41
42 return dual_lock;
43}
44
45/**
Fuad Tabba5c738432019-12-02 11:02:42 +000046 * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
47 * reflect the fact that the vCPU is no longer locked.
48 */
49void vcpu_unlock(struct vcpu_locked *locked)
50{
51 sl_unlock(&locked->vcpu->lock);
52 locked->vcpu = NULL;
53}
54
55void vcpu_init(struct vcpu *vcpu, struct vm *vm)
56{
57 memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
58 sl_init(&vcpu->lock);
59 vcpu->regs_available = true;
60 vcpu->vm = vm;
61 vcpu->state = VCPU_STATE_OFF;
Olivier Deprezee9d6a92019-11-26 09:14:11 +000062 vcpu->direct_request_origin_vm_id = HF_INVALID_VM_ID;
Fuad Tabba5c738432019-12-02 11:02:42 +000063}
64
65/**
66 * Initialise the registers for the given vCPU and set the state to
67 * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this.
68 */
69void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
70{
71 arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
72 vcpu.vcpu->state = VCPU_STATE_READY;
73}
74
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
Fuad Tabba5c738432019-12-02 11:02:42 +000076{
77 size_t index = vcpu - vcpu->vm->vcpus;
78
79 CHECK(index < UINT16_MAX);
80 return index;
81}
82
83/**
84 * Check whether the given vcpu_state is an off state, for the purpose of
85 * turning vCPUs on and off. Note that aborted still counts as on in this
86 * context.
87 */
88bool vcpu_is_off(struct vcpu_locked vcpu)
89{
90 switch (vcpu.vcpu->state) {
91 case VCPU_STATE_OFF:
92 return true;
93 case VCPU_STATE_READY:
94 case VCPU_STATE_RUNNING:
95 case VCPU_STATE_BLOCKED_MAILBOX:
96 case VCPU_STATE_BLOCKED_INTERRUPT:
97 case VCPU_STATE_ABORTED:
98 /*
99 * Aborted still counts as ON for the purposes of PSCI,
100 * because according to the PSCI specification (section
101 * 5.7.1) a core is only considered to be off if it has
102 * been turned off with a CPU_OFF call or hasn't yet
103 * been turned on with a CPU_ON call.
104 */
105 return false;
106 }
107}
108
109/**
110 * Starts a vCPU of a secondary VM.
111 *
112 * Returns true if the secondary was reset and started, or false if it was
113 * already on and so nothing was done.
114 */
115bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
116 uintreg_t arg)
117{
118 struct vcpu_locked vcpu_locked;
119 struct vm *vm = vcpu->vm;
120 bool vcpu_was_off;
121
122 CHECK(vm->id != HF_PRIMARY_VM_ID);
123
124 vcpu_locked = vcpu_lock(vcpu);
125 vcpu_was_off = vcpu_is_off(vcpu_locked);
126 if (vcpu_was_off) {
127 /*
128 * Set vCPU registers to a clean state ready for boot. As this
129 * is a secondary which can migrate between pCPUs, the ID of the
130 * vCPU is defined as the index and does not match the ID of the
131 * pCPU it is running on.
132 */
133 arch_regs_reset(vcpu);
134 vcpu_on(vcpu_locked, entry, arg);
135 }
136 vcpu_unlock(&vcpu_locked);
137
138 return vcpu_was_off;
139}
140
141/**
142 * Handles a page fault. It does so by determining if it's a legitimate or
143 * spurious fault, and recovering from the latter.
144 *
Fuad Tabbaed294af2019-12-20 10:43:01 +0000145 * Returns true if the caller should resume the current vCPU, or false if its VM
Fuad Tabba5c738432019-12-02 11:02:42 +0000146 * should be aborted.
147 */
148bool vcpu_handle_page_fault(const struct vcpu *current,
149 struct vcpu_fault_info *f)
150{
151 struct vm *vm = current->vm;
152 uint32_t mode;
153 uint32_t mask = f->mode | MM_MODE_INVALID;
154 bool resume;
155
156 sl_lock(&vm->lock);
157
158 /*
159 * Check if this is a legitimate fault, i.e., if the page table doesn't
160 * allow the access attempted by the VM.
161 *
162 * Otherwise, this is a spurious fault, likely because another CPU is
163 * updating the page table. It is responsible for issuing global TLB
164 * invalidations while holding the VM lock, so we don't need to do
165 * anything else to recover from it. (Acquiring/releasing the lock
166 * ensured that the invalidations have completed.)
167 */
168 resume = mm_vm_get_mode(&vm->ptable, f->ipaddr, ipa_add(f->ipaddr, 1),
169 &mode) &&
170 (mode & mask) == f->mode;
171
172 sl_unlock(&vm->lock);
173
174 if (!resume) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000175 dlog_warning(
176 "Stage-2 page fault: pc=%#x, vmid=%u, vcpu=%u, "
177 "vaddr=%#x, ipaddr=%#x, mode=%#x\n",
178 f->pc, vm->id, vcpu_index(current), f->vaddr, f->ipaddr,
179 f->mode);
Fuad Tabba5c738432019-12-02 11:02:42 +0000180 }
181
182 return resume;
183}