blob: b9d69ff04b86e05f8e3cfb2ca9bb5ec5b603e15f [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull9726c252019-01-23 13:44:19 +000011#include <stdatomic.h>
12
Andrew Walbran1f32e722019-06-07 17:57:26 +010013#include "hf/arch/types.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/cpu.h"
Madhukar Pappireddy464f2462021-08-03 11:23:07 -050016#include "hf/interrupt_desc.h"
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000017#include "hf/list.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/mm.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010020
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "vmapi/hf/ffa.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010022
Andrew Scullae9962e2019-10-03 16:51:16 +010023#define MAX_SMCS 32
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010024#define LOG_BUFFER_SIZE 256
Madhukar Pappireddy464f2462021-08-03 11:23:07 -050025#define VM_MANIFEST_MAX_INTERRUPTS 32
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010026
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -060027/** Action for Other-Secure interrupts by SPMC. */
28#define OTHER_S_INT_ACTION_QUEUED 0
29#define OTHER_S_INT_ACTION_SIGNALED 1
30
Andrew Walbrana36f7592019-12-13 18:43:38 +000031/**
Olivier Deprez20137752023-02-07 10:55:35 +010032 * Power management bifields stating which messages a VM is willing to be
33 * notified about.
34 */
35#define VM_POWER_MANAGEMENT_CPU_OFF_SHIFT (0)
36#define VM_POWER_MANAGEMENT_CPU_ON_SHIFT (3)
37
38/**
Andrew Walbrana36f7592019-12-13 18:43:38 +000039 * The state of an RX buffer.
40 *
41 * EMPTY is the initial state. The follow state transitions are possible:
Olivier Deprezcf6e3862021-01-18 10:24:58 +010042 * * EMPTY => RECEIVED: message sent to the VM.
Federico Recanati6c1e05c2022-04-20 11:37:26 +020043 * * RECEIVED => READ: secondary VM receives an RX buffer full notification
44 * or primary VM returns from FFA_RUN with an FFA_MSG_SEND where the receiver
45 * is itself.
Olivier Deprezcf6e3862021-01-18 10:24:58 +010046 * * READ => EMPTY: VM called FFA_RX_RELEASE.
Andrew Walbrana36f7592019-12-13 18:43:38 +000047 */
Andrew Scullaa039b32018-10-04 15:02:26 +010048enum mailbox_state {
Andrew Walbranc3910f72018-11-27 14:24:36 +000049 /** There is no message in the mailbox. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010050 MAILBOX_STATE_EMPTY,
Andrew Scullaa039b32018-10-04 15:02:26 +010051
Andrew Walbranc3910f72018-11-27 14:24:36 +000052 /** There is a message in the mailbox that is waiting for a reader. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010053 MAILBOX_STATE_RECEIVED,
Andrew Scullaa039b32018-10-04 15:02:26 +010054
Andrew Walbranc3910f72018-11-27 14:24:36 +000055 /** There is a message in the mailbox that has been read. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010056 MAILBOX_STATE_READ,
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010057};
58
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000059struct wait_entry {
60 /** The VM that is waiting for a mailbox to become writable. */
61 struct vm *waiting_vm;
62
63 /**
64 * Links used to add entry to a VM's waiter_list. This is protected by
65 * the notifying VM's lock.
66 */
67 struct list_entry wait_links;
68
69 /**
70 * Links used to add entry to a VM's ready_list. This is protected by
71 * the waiting VM's lock.
72 */
73 struct list_entry ready_links;
74};
75
Andrew Scullaa039b32018-10-04 15:02:26 +010076struct mailbox {
77 enum mailbox_state state;
Andrew Walbran70bc8622019-10-07 14:15:58 +010078 void *recv;
79 const void *send;
80
81 /** The ID of the VM which sent the message currently in `recv`. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010082 ffa_vm_id_t recv_sender;
Andrew Walbran70bc8622019-10-07 14:15:58 +010083
84 /** The size of the message currently in `recv`. */
85 uint32_t recv_size;
86
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000087 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010088 * The FF-A function ID to use to deliver the message currently in
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000089 * `recv`.
90 */
91 uint32_t recv_func;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000092
93 /**
94 * List of wait_entry structs representing VMs that want to be notified
95 * when the mailbox becomes writable. Once the mailbox does become
96 * writable, the entry is removed from this list and added to the
97 * waiting VM's ready_list.
98 */
99 struct list_entry waiter_list;
100
101 /**
102 * List of wait_entry structs representing VMs whose mailboxes became
103 * writable since the owner of the mailbox registers for notification.
104 */
105 struct list_entry ready_list;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106};
107
J-Alves4ef6e842021-03-18 12:47:01 +0000108struct notifications_state {
109 /**
110 * To keep track of the notifications pending.
111 * Set on call to FFA_NOTIFICATION_SET, and cleared on call to
112 * FFA_NOTIFICATION_GET.
113 */
114 ffa_notifications_bitmap_t pending;
115
116 /**
117 * Set on FFA_NOTIFICATION_INFO_GET to keep track of the notifications
118 * whose information has been retrieved by the referred ABI.
119 * Cleared on call to FFA_NOTIFICATION_GET.
120 */
121 ffa_notifications_bitmap_t info_get_retrieved;
122};
123
124struct notifications {
125 /**
126 * The following array maps the notifications to the bound FF-A
127 * endpoint.
128 * The index in the bindings array relates to the notification
129 * ID, and bit position in 'ffa_notifications_bitmap_t'.
130 */
131 ffa_vm_id_t bindings_sender_id[MAX_FFA_NOTIFICATIONS];
132 ffa_notifications_bitmap_t bindings_per_vcpu;
133
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700134 /* The index of the array below relates to the ID of the VCPU.
135 * This is a dynamically allocated array of struct
136 * notifications_state and has as many entries as vcpu_count.
137 */
138 struct notifications_state *per_vcpu;
J-Alves4ef6e842021-03-18 12:47:01 +0000139 struct notifications_state global;
140};
141
J-Alvesc8e8a222021-06-08 17:33:52 +0100142/**
143 * The following enum relates to a state machine to guide the insertion of
144 * IDs in the respective list as a result of a FFA_NOTIFICATION_INFO_GET call.
145 * As per the FF-A v1.1 specification, the return of the interface
146 * FFA_NOTIFICATION_INFO_GET, is a list of 16-bit values, regarding the VM ID
147 * and VCPU IDs of those with pending notifications.
148 * The overall list, is composed of "sub-lists", that starts with the VM ID, and
149 * can follow with up to 3 more VCPU IDs. A VM can have multiple 'sub-lists'.
150 * The states are traversed on a per VM basis, and should help with filling the
151 * list of IDs.
152 *
153 * INIT is the initial state. The following state transitions are possible:
154 * * INIT => INSERTING: no list has been created for the VM prior. There are
155 * notifications pending and VM ID should be inserted first. If it regards to
156 * a per VCPU notification the VCPU ID should follow. Only VCPU IDs should be
157 * inserted from this point, until reaching "sub-list" size limit.
158 * * INIT => FULL: There is no space in the ID list to insert IDs.
159 * * INSERTING => STARTING_NEW: list has been created. Adding only VCPU IDs,
160 * however "sub-list" limit has been reached. If there are more pending per VCPU
161 * notifications pending for the VM, a new list should be created starting with
162 * VM ID.
163 * * INSERTING => FULL: There is no space in the ID list to insert IDs.
164 * * STARTING_NEW => INSERTING: Started a new 'sub-list' for the given VM, for
165 * the remaining pending per VCPU notifications, only the VCPU ID should be
166 * inserted.
167 * * STARTING_NEW => FULL: There is no space in the ID list to insert IDs.
168 */
169enum notifications_info_get_state {
170 INIT,
171 INSERTING,
172 STARTING_NEW,
173 FULL,
174};
175
Andrew Scullae9962e2019-10-03 16:51:16 +0100176struct smc_whitelist {
177 uint32_t smcs[MAX_SMCS];
178 uint16_t smc_count;
179 bool permissive;
180};
181
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100182struct vm {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100183 ffa_vm_id_t id;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100184 struct ffa_uuid uuid;
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +0000185 uint32_t ffa_version;
Andrew Scullae9962e2019-10-03 16:51:16 +0100186 struct smc_whitelist smc_whitelist;
187
Andrew Walbran0d7a0682018-12-06 16:48:47 +0000188 /** See api.c for the partial ordering on locks. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100189 struct spinlock lock;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100190 ffa_vcpu_count_t vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700191 struct vcpu *vcpus;
Andrew Scull89a75242018-08-06 17:04:55 +0100192 struct mm_ptable ptable;
Andrew Scullaa039b32018-10-04 15:02:26 +0100193 struct mailbox mailbox;
J-Alves4ef6e842021-03-18 12:47:01 +0000194
195 struct {
196 /**
197 * State structures for notifications coming from VMs or coming
198 * from SPs. Both fields are maintained by the SPMC.
199 * The hypervisor ignores the 'from_sp' field, given VM
200 * notifications from SPs are managed by the SPMC.
201 */
202 struct notifications from_vm;
203 struct notifications from_sp;
J-Alves52578f82022-03-25 12:30:47 +0000204 struct notifications_state framework;
J-Alves4ef6e842021-03-18 12:47:01 +0000205 bool enabled;
J-Alves6e2abc62021-12-02 14:58:56 +0000206 bool npi_injected;
J-Alves4ef6e842021-03-18 12:47:01 +0000207 } notifications;
208
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100209 char log_buffer[LOG_BUFFER_SIZE];
Andrew Scullae9962e2019-10-03 16:51:16 +0100210 uint16_t log_buffer_length;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000211
Andrew Walbranf76f5752019-12-03 18:33:08 +0000212 /**
213 * Wait entries to be used when waiting on other VM mailboxes. See
214 * comments on `struct wait_entry` for the lock discipline of these.
215 */
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000216 struct wait_entry wait_entries[MAX_VMS];
Andrew Scull9726c252019-01-23 13:44:19 +0000217
218 atomic_bool aborting;
Andrew Walbran1f32e722019-06-07 17:57:26 +0100219
J-Alvesb37fd082020-10-22 12:29:21 +0100220 /**
Max Shvetsov40108e72020-08-27 12:39:50 +0100221 * Booting parameters (FF-A SP partitions).
J-Alvesb37fd082020-10-22 12:29:21 +0100222 */
J-Alvesb37fd082020-10-22 12:29:21 +0100223 uint16_t boot_order;
J-Alves7d38f7b2022-04-13 13:22:30 +0100224
225 /** Entries to pass boot data to the VM. */
226 struct {
227 uint32_t gp_register_num;
228 ipaddr_t blob_addr;
229 } boot_info;
230
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100231 uint8_t messaging_method;
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500232
233 /**
234 * Action specified by a Partition through the manifest in response to
235 * non secure interrupt.
236 */
237 uint8_t ns_interrupts_action;
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600238
239 /**
240 * Action specified by a Partition through the manifest in response to
241 * Other-S-Int.
242 */
243 uint8_t other_s_interrupts_action;
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500244 bool me_signal_virq;
J-Alvesb37fd082020-10-22 12:29:21 +0100245
Max Shvetsov40108e72020-08-27 12:39:50 +0100246 /**
Olivier Depreza15f2352022-09-26 09:17:24 +0200247 * Bitmask reporting the power management events that a partition
248 * requests to the signaled about.
249 */
250 uint32_t power_management;
251
252 /**
Max Shvetsov40108e72020-08-27 12:39:50 +0100253 * Secondary entry point supplied by FFA_SECONDARY_EP_REGISTER used
254 * for cold and warm boot of SP execution contexts.
255 */
256 ipaddr_t secondary_ep;
257
Andrew Walbran1f32e722019-06-07 17:57:26 +0100258 /** Arch-specific VM information. */
259 struct arch_vm arch;
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800260 bool el0_partition;
Madhukar Pappireddy464f2462021-08-03 11:23:07 -0500261
262 /** Interrupt descriptor */
263 struct interrupt_descriptor interrupt_desc[VM_MANIFEST_MAX_INTERRUPTS];
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000264};
265
266/** Encapsulates a VM whose lock is held. */
267struct vm_locked {
268 struct vm *vm;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100269};
270
Jose Marinho75509b42019-04-09 09:34:59 +0100271/** Container for two vm_locked structures */
272struct two_vm_locked {
273 struct vm_locked vm1;
274 struct vm_locked vm2;
275};
276
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100277struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800278 struct mpool *ppool, bool el0_partition);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100279bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800280 struct vm **new_vm, bool el0_partition);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100281ffa_vm_count_t vm_get_count(void);
282struct vm *vm_find(ffa_vm_id_t id);
J-Alves46ee0682021-07-26 15:17:53 +0100283struct vm_locked vm_find_locked(ffa_vm_id_t id);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100284struct vm *vm_find_index(uint16_t index);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100285struct vm_locked vm_lock(struct vm *vm);
Jose Marinho75509b42019-04-09 09:34:59 +0100286struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000287void vm_unlock(struct vm_locked *locked);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100288struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index);
289struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm);
290ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
Andrew Walbran45633dd2020-10-07 17:59:54 +0100291bool vm_id_is_current_world(ffa_vm_id_t vm_id);
J-Alves122f1a12022-12-12 15:55:42 +0000292bool vm_is_mailbox_busy(struct vm_locked to);
Andrew Scull3c257452019-11-26 13:32:50 +0000293bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
294 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
295bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
296 uint32_t mode, struct mpool *ppool);
297void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
298 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
299bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
300 struct mpool *ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700301void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000302bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
J-Alvesb37fd082020-10-22 12:29:21 +0100303
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800304bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
305 uint32_t *mode);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700306void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
307 struct mpool *ppool);
J-Alves66652252022-07-06 09:49:51 +0100308bool vm_mailbox_state_busy(struct vm_locked vm_locked);
J-Alvesa0f317d2021-06-09 13:31:59 +0100309bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
310 ffa_notifications_bitmap_t notifications);
J-Alves7461ef22021-10-18 17:21:33 +0100311bool vm_are_global_notifications_pending(struct vm_locked vm_locked);
312bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
313 ffa_vcpu_index_t vcpu_id);
J-Alves09ff9d82021-11-02 11:55:20 +0000314bool vm_are_notifications_enabled(struct vm *vm);
315bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked);
J-Alvesc003a7a2021-03-18 13:06:53 +0000316bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
317 bool is_from_vm, bool is_per_vcpu,
318 ffa_notifications_bitmap_t notif);
319bool vm_notifications_validate_bound_sender(
320 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
321 ffa_notifications_bitmap_t notifications);
322bool vm_notifications_validate_binding(struct vm_locked vm_locked,
323 bool is_from_vm, ffa_vm_id_t sender_id,
324 ffa_notifications_bitmap_t notifications,
325 bool is_per_vcpu);
326void vm_notifications_update_bindings(struct vm_locked vm_locked,
327 bool is_from_vm, ffa_vm_id_t sender_id,
328 ffa_notifications_bitmap_t notifications,
329 bool is_per_vcpu);
J-Alves5a16c962022-03-25 12:32:51 +0000330void vm_notifications_partition_set_pending(
331 struct vm_locked vm_locked, bool is_from_vm,
332 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
333 bool is_per_vcpu);
J-Alves5136dda2022-03-25 12:26:38 +0000334ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
335 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id);
J-Alves14163a72022-03-25 14:01:34 +0000336void vm_notifications_framework_set_pending(
337 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications);
J-Alves663682a2022-03-25 13:56:51 +0000338ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
339 struct vm_locked vm_locked);
J-Alvesc8e8a222021-06-08 17:33:52 +0100340void vm_notifications_info_get_pending(
341 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
342 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
343 const uint32_t ids_max_count,
344 enum notifications_info_get_state *info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100345bool vm_notifications_pending_not_retrieved_by_scheduler(void);
346bool vm_is_notifications_pending_count_zero(void);
J-Alvesc8e8a222021-06-08 17:33:52 +0100347bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
348 uint32_t *ids_count, uint32_t *lists_sizes,
349 uint32_t *lists_count,
350 const uint32_t ids_max_count);
J-Alves439ac972021-11-18 17:32:03 +0000351bool vm_supports_messaging_method(struct vm *vm, uint8_t messaging_method);
J-Alves6e2abc62021-12-02 14:58:56 +0000352void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
353 bool npi_injected);
354bool vm_notifications_is_npi_injected(struct vm_locked vm_locked);
J-Alves7d38f7b2022-04-13 13:22:30 +0100355void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu);
Olivier Deprez20137752023-02-07 10:55:35 +0100356
357/**
358 * Returns true if the VM requested to receive cpu on power management
359 * events.
360 */
361static inline bool vm_power_management_cpu_on_requested(struct vm *vm)
362{
363 return (vm->power_management &
364 (UINT32_C(1) << VM_POWER_MANAGEMENT_CPU_ON_SHIFT)) != 0;
365}
366
367/**
368 * Returns true if the VM requested to receive cpu off power management
369 * events.
370 */
371static inline bool vm_power_management_cpu_off_requested(struct vm *vm)
372{
373 return (vm->power_management &
374 (UINT32_C(1) << VM_POWER_MANAGEMENT_CPU_OFF_SHIFT)) != 0;
375}