blob: 928de40385c133fbadc6d264cfc0d05f732e2f76 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull9726c252019-01-23 13:44:19 +000011#include <stdatomic.h>
12
Andrew Walbran1f32e722019-06-07 17:57:26 +010013#include "hf/arch/types.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/cpu.h"
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000016#include "hf/list.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000018#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020#include "vmapi/hf/ffa.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010021
Andrew Scullae9962e2019-10-03 16:51:16 +010022#define MAX_SMCS 32
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010023#define LOG_BUFFER_SIZE 256
24
Andrew Walbrana36f7592019-12-13 18:43:38 +000025/**
26 * The state of an RX buffer.
27 *
28 * EMPTY is the initial state. The follow state transitions are possible:
Olivier Deprezcf6e3862021-01-18 10:24:58 +010029 * * EMPTY => RECEIVED: message sent to the VM.
30 * * RECEIVED => READ: secondary VM returns from FFA_MSG_WAIT or
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010031 * FFA_MSG_POLL, or primary VM returns from FFA_RUN with an FFA_MSG_SEND
Andrew Walbrana36f7592019-12-13 18:43:38 +000032 * where the receiver is itself.
Olivier Deprezcf6e3862021-01-18 10:24:58 +010033 * * READ => EMPTY: VM called FFA_RX_RELEASE.
Andrew Walbrana36f7592019-12-13 18:43:38 +000034 */
Andrew Scullaa039b32018-10-04 15:02:26 +010035enum mailbox_state {
Andrew Walbranc3910f72018-11-27 14:24:36 +000036 /** There is no message in the mailbox. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010037 MAILBOX_STATE_EMPTY,
Andrew Scullaa039b32018-10-04 15:02:26 +010038
Andrew Walbranc3910f72018-11-27 14:24:36 +000039 /** There is a message in the mailbox that is waiting for a reader. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010040 MAILBOX_STATE_RECEIVED,
Andrew Scullaa039b32018-10-04 15:02:26 +010041
Andrew Walbranc3910f72018-11-27 14:24:36 +000042 /** There is a message in the mailbox that has been read. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010043 MAILBOX_STATE_READ,
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010044};
45
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000046struct wait_entry {
47 /** The VM that is waiting for a mailbox to become writable. */
48 struct vm *waiting_vm;
49
50 /**
51 * Links used to add entry to a VM's waiter_list. This is protected by
52 * the notifying VM's lock.
53 */
54 struct list_entry wait_links;
55
56 /**
57 * Links used to add entry to a VM's ready_list. This is protected by
58 * the waiting VM's lock.
59 */
60 struct list_entry ready_links;
61};
62
Andrew Scullaa039b32018-10-04 15:02:26 +010063struct mailbox {
64 enum mailbox_state state;
Andrew Walbran70bc8622019-10-07 14:15:58 +010065 void *recv;
66 const void *send;
67
68 /** The ID of the VM which sent the message currently in `recv`. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010069 ffa_vm_id_t recv_sender;
Andrew Walbran70bc8622019-10-07 14:15:58 +010070
71 /** The size of the message currently in `recv`. */
72 uint32_t recv_size;
73
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000074 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075 * The FF-A function ID to use to deliver the message currently in
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000076 * `recv`.
77 */
78 uint32_t recv_func;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000079
80 /**
81 * List of wait_entry structs representing VMs that want to be notified
82 * when the mailbox becomes writable. Once the mailbox does become
83 * writable, the entry is removed from this list and added to the
84 * waiting VM's ready_list.
85 */
86 struct list_entry waiter_list;
87
88 /**
89 * List of wait_entry structs representing VMs whose mailboxes became
90 * writable since the owner of the mailbox registers for notification.
91 */
92 struct list_entry ready_list;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010093};
94
J-Alves4ef6e842021-03-18 12:47:01 +000095struct notifications_state {
96 /**
97 * To keep track of the notifications pending.
98 * Set on call to FFA_NOTIFICATION_SET, and cleared on call to
99 * FFA_NOTIFICATION_GET.
100 */
101 ffa_notifications_bitmap_t pending;
102
103 /**
104 * Set on FFA_NOTIFICATION_INFO_GET to keep track of the notifications
105 * whose information has been retrieved by the referred ABI.
106 * Cleared on call to FFA_NOTIFICATION_GET.
107 */
108 ffa_notifications_bitmap_t info_get_retrieved;
109};
110
111struct notifications {
112 /**
113 * The following array maps the notifications to the bound FF-A
114 * endpoint.
115 * The index in the bindings array relates to the notification
116 * ID, and bit position in 'ffa_notifications_bitmap_t'.
117 */
118 ffa_vm_id_t bindings_sender_id[MAX_FFA_NOTIFICATIONS];
119 ffa_notifications_bitmap_t bindings_per_vcpu;
120
121 /* The index of the array below relates to the ID of the VCPU. */
122 struct notifications_state per_vcpu[MAX_CPUS];
123 struct notifications_state global;
124};
125
J-Alvesc8e8a222021-06-08 17:33:52 +0100126/**
127 * The following enum relates to a state machine to guide the insertion of
128 * IDs in the respective list as a result of a FFA_NOTIFICATION_INFO_GET call.
129 * As per the FF-A v1.1 specification, the return of the interface
130 * FFA_NOTIFICATION_INFO_GET, is a list of 16-bit values, regarding the VM ID
131 * and VCPU IDs of those with pending notifications.
132 * The overall list, is composed of "sub-lists", that starts with the VM ID, and
133 * can follow with up to 3 more VCPU IDs. A VM can have multiple 'sub-lists'.
134 * The states are traversed on a per VM basis, and should help with filling the
135 * list of IDs.
136 *
137 * INIT is the initial state. The following state transitions are possible:
138 * * INIT => INSERTING: no list has been created for the VM prior. There are
139 * notifications pending and VM ID should be inserted first. If it regards to
140 * a per VCPU notification the VCPU ID should follow. Only VCPU IDs should be
141 * inserted from this point, until reaching "sub-list" size limit.
142 * * INIT => FULL: There is no space in the ID list to insert IDs.
143 * * INSERTING => STARTING_NEW: list has been created. Adding only VCPU IDs,
144 * however "sub-list" limit has been reached. If there are more pending per VCPU
145 * notifications pending for the VM, a new list should be created starting with
146 * VM ID.
147 * * INSERTING => FULL: There is no space in the ID list to insert IDs.
148 * * STARTING_NEW => INSERTING: Started a new 'sub-list' for the given VM, for
149 * the remaining pending per VCPU notifications, only the VCPU ID should be
150 * inserted.
151 * * STARTING_NEW => FULL: There is no space in the ID list to insert IDs.
152 */
153enum notifications_info_get_state {
154 INIT,
155 INSERTING,
156 STARTING_NEW,
157 FULL,
158};
159
Andrew Scullae9962e2019-10-03 16:51:16 +0100160struct smc_whitelist {
161 uint32_t smcs[MAX_SMCS];
162 uint16_t smc_count;
163 bool permissive;
164};
165
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100166struct vm {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100167 ffa_vm_id_t id;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100168 struct ffa_uuid uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100169 struct smc_whitelist smc_whitelist;
170
Andrew Walbran0d7a0682018-12-06 16:48:47 +0000171 /** See api.c for the partial ordering on locks. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100172 struct spinlock lock;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100173 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100174 struct vcpu vcpus[MAX_CPUS];
Andrew Scull89a75242018-08-06 17:04:55 +0100175 struct mm_ptable ptable;
Andrew Scullaa039b32018-10-04 15:02:26 +0100176 struct mailbox mailbox;
J-Alves4ef6e842021-03-18 12:47:01 +0000177
178 struct {
179 /**
180 * State structures for notifications coming from VMs or coming
181 * from SPs. Both fields are maintained by the SPMC.
182 * The hypervisor ignores the 'from_sp' field, given VM
183 * notifications from SPs are managed by the SPMC.
184 */
185 struct notifications from_vm;
186 struct notifications from_sp;
187 /* TODO: include framework notifications */
188 bool enabled;
189 } notifications;
190
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100191 char log_buffer[LOG_BUFFER_SIZE];
Andrew Scullae9962e2019-10-03 16:51:16 +0100192 uint16_t log_buffer_length;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000193
Andrew Walbranf76f5752019-12-03 18:33:08 +0000194 /**
195 * Wait entries to be used when waiting on other VM mailboxes. See
196 * comments on `struct wait_entry` for the lock discipline of these.
197 */
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000198 struct wait_entry wait_entries[MAX_VMS];
Andrew Scull9726c252019-01-23 13:44:19 +0000199
200 atomic_bool aborting;
Andrew Walbran1f32e722019-06-07 17:57:26 +0100201
J-Alvesb37fd082020-10-22 12:29:21 +0100202 /**
Max Shvetsov40108e72020-08-27 12:39:50 +0100203 * Booting parameters (FF-A SP partitions).
J-Alvesb37fd082020-10-22 12:29:21 +0100204 */
205 bool initialized;
206 uint16_t boot_order;
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100207 uint8_t messaging_method;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100208 bool managed_exit;
J-Alvesb37fd082020-10-22 12:29:21 +0100209 struct vm *next_boot;
210
Max Shvetsov40108e72020-08-27 12:39:50 +0100211 /**
212 * Secondary entry point supplied by FFA_SECONDARY_EP_REGISTER used
213 * for cold and warm boot of SP execution contexts.
214 */
215 ipaddr_t secondary_ep;
216
Andrew Walbran1f32e722019-06-07 17:57:26 +0100217 /** Arch-specific VM information. */
218 struct arch_vm arch;
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800219 bool el0_partition;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000220};
221
222/** Encapsulates a VM whose lock is held. */
223struct vm_locked {
224 struct vm *vm;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100225};
226
Jose Marinho75509b42019-04-09 09:34:59 +0100227/** Container for two vm_locked structures */
228struct two_vm_locked {
229 struct vm_locked vm1;
230 struct vm_locked vm2;
231};
232
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100233struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800234 struct mpool *ppool, bool el0_partition);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100235bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800236 struct vm **new_vm, bool el0_partition);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237ffa_vm_count_t vm_get_count(void);
238struct vm *vm_find(ffa_vm_id_t id);
J-Alves46ee0682021-07-26 15:17:53 +0100239struct vm_locked vm_find_locked(ffa_vm_id_t id);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100240struct vm *vm_find_index(uint16_t index);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100241struct vm_locked vm_lock(struct vm *vm);
Jose Marinho75509b42019-04-09 09:34:59 +0100242struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000243void vm_unlock(struct vm_locked *locked);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100244struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index);
245struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm);
246ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
Andrew Walbran45633dd2020-10-07 17:59:54 +0100247bool vm_id_is_current_world(ffa_vm_id_t vm_id);
Andrew Scull3c257452019-11-26 13:32:50 +0000248
249bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
250 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
251bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
252 uint32_t mode, struct mpool *ppool);
253void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
254 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
255bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
256 struct mpool *ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700257void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000258bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
J-Alvesb37fd082020-10-22 12:29:21 +0100259
260void vm_update_boot(struct vm *vm);
261struct vm *vm_get_first_boot(void);
J-Alves4ef6e842021-03-18 12:47:01 +0000262
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800263bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
264 uint32_t *mode);
J-Alvesa0f317d2021-06-09 13:31:59 +0100265
266void vm_notifications_init_bindings(struct notifications *n);
267bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
268 ffa_notifications_bitmap_t notifications);
J-Alvesc003a7a2021-03-18 13:06:53 +0000269bool vm_are_notifications_enabled(struct vm_locked vm_locked);
270bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
271 bool is_from_vm, bool is_per_vcpu,
272 ffa_notifications_bitmap_t notif);
273bool vm_notifications_validate_bound_sender(
274 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
275 ffa_notifications_bitmap_t notifications);
276bool vm_notifications_validate_binding(struct vm_locked vm_locked,
277 bool is_from_vm, ffa_vm_id_t sender_id,
278 ffa_notifications_bitmap_t notifications,
279 bool is_per_vcpu);
280void vm_notifications_update_bindings(struct vm_locked vm_locked,
281 bool is_from_vm, ffa_vm_id_t sender_id,
282 ffa_notifications_bitmap_t notifications,
283 bool is_per_vcpu);
J-Alvesaa79c012021-07-09 14:29:45 +0100284void vm_notifications_set(struct vm_locked vm_locked, bool is_from_vm,
285 ffa_notifications_bitmap_t notifications,
286 ffa_vcpu_index_t vcpu_id, bool is_per_vcpu);
287ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear(
288 struct vm_locked vm_locked, bool is_from_vm,
289 ffa_vcpu_index_t cur_vcpu_id);
J-Alvesc8e8a222021-06-08 17:33:52 +0100290void vm_notifications_info_get_pending(
291 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
292 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
293 const uint32_t ids_max_count,
294 enum notifications_info_get_state *info_get_state);
295bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
296 uint32_t *ids_count, uint32_t *lists_sizes,
297 uint32_t *lists_count,
298 const uint32_t ids_max_count);