blob: 530ff6b9744980883f1c6b3c64a844f2a2e90742 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull18c78fc2018-08-20 12:57:41 +010019#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010020#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/cpu.h"
Andrew Walbranb037d5b2019-06-25 17:19:41 +010022#include "hf/spci.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010023#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010024
Andrew Scull19503262018-09-20 14:48:39 +010025#include "vmapi/hf/call.h"
26
27static struct vm vms[MAX_VMS];
Andrew Walbran52d99672019-06-25 15:51:11 +010028static spci_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010029
Andrew Walbranc6d23c42019-06-26 13:30:42 +010030bool vm_init(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
31 struct vm **new_vm)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010032{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010033 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010034 struct vm *vm;
35
36 if (vm_count >= MAX_VMS) {
37 return false;
38 }
39
40 vm = &vms[vm_count];
Wedson Almeida Filho87009642018-07-02 10:20:07 +010041
Andrew Scull2b5fbad2019-04-05 13:55:56 +010042 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010043
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000044 list_init(&vm->mailbox.waiter_list);
45 list_init(&vm->mailbox.ready_list);
46 sl_init(&vm->lock);
47
Fuad Tabba494376e2019-08-05 12:35:10 +010048 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
49 vm->id = vm_count + HF_VM_ID_OFFSET;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010050 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010051 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000052 atomic_init(&vm->aborting, false);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010053
Andrew Scullda3df7f2019-01-05 17:49:27 +000054 if (!mm_vm_init(&vm->ptable, ppool)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +000055 return false;
56 }
57
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000058 /* Initialise waiter entries. */
59 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000060 vm->wait_entries[i].waiting_vm = vm;
61 list_init(&vm->wait_entries[i].wait_links);
62 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000063 }
64
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010065 /* Do basic initialization of vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010066 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010067 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010068 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010069
Andrew Scull19503262018-09-20 14:48:39 +010070 ++vm_count;
71 *new_vm = vm;
72
Wedson Almeida Filho03306112018-11-26 00:08:03 +000073 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010074}
75
Andrew Walbran52d99672019-06-25 15:51:11 +010076spci_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +010077{
78 return vm_count;
79}
80
Andrew Walbran42347a92019-05-09 13:59:03 +010081struct vm *vm_find(spci_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +010082{
David Brazdilbc501192019-09-27 13:20:56 +010083 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +010084
David Brazdilbc501192019-09-27 13:20:56 +010085 /* Check that this is not a reserved ID. */
86 if (id < HF_VM_ID_OFFSET) {
Andrew Scull19503262018-09-20 14:48:39 +010087 return NULL;
88 }
89
David Brazdilbc501192019-09-27 13:20:56 +010090 index = id - HF_VM_ID_OFFSET;
91
92 /* Ensure the VM is initialized. */
93 if (index >= vm_count) {
94 return NULL;
95 }
96
97 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +010098}
99
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000100/**
101 * Locks the given VM and updates `locked` to hold the newly locked vm.
102 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100103struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000104{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100105 struct vm_locked locked = {
106 .vm = vm,
107 };
108
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000109 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100110
111 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000112}
113
114/**
Jose Marinho75509b42019-04-09 09:34:59 +0100115 * Locks two VMs ensuring that the locking order is according to the locks'
116 * addresses.
117 */
118struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
119{
120 struct two_vm_locked dual_lock;
121
122 sl_lock_both(&vm1->lock, &vm2->lock);
123 dual_lock.vm1.vm = vm1;
124 dual_lock.vm2.vm = vm2;
125
126 return dual_lock;
127}
128
129/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000130 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
131 * the fact that the VM is no longer locked.
132 */
133void vm_unlock(struct vm_locked *locked)
134{
135 sl_unlock(&locked->vm->lock);
136 locked->vm = NULL;
137}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100138
139/**
140 * Get the vCPU with the given index from the given VM.
141 * This assumes the index is valid, i.e. less than vm->vcpu_count.
142 */
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100143struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100144{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100145 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100146 return &vm->vcpus[vcpu_index];
147}