blob: d45b47674ab8fc598238773bd2623001c11cc526 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull9726c252019-01-23 13:44:19 +000011#include <stdatomic.h>
12
Andrew Walbran1f32e722019-06-07 17:57:26 +010013#include "hf/arch/types.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/cpu.h"
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000016#include "hf/list.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000018#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020#include "vmapi/hf/ffa.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010021
Andrew Scullae9962e2019-10-03 16:51:16 +010022#define MAX_SMCS 32
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010023#define LOG_BUFFER_SIZE 256
24
Andrew Walbrana36f7592019-12-13 18:43:38 +000025/**
26 * The state of an RX buffer.
27 *
28 * EMPTY is the initial state. The follow state transitions are possible:
29 * * EMPTY → RECEIVED: message sent to the VM.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010030 * * RECEIVED → READ: secondary VM returns from FFA_MSG_WAIT or
31 * FFA_MSG_POLL, or primary VM returns from FFA_RUN with an FFA_MSG_SEND
Andrew Walbrana36f7592019-12-13 18:43:38 +000032 * where the receiver is itself.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010033 * * READ → EMPTY: VM called FFA_RX_RELEASE.
Andrew Walbrana36f7592019-12-13 18:43:38 +000034 */
Andrew Scullaa039b32018-10-04 15:02:26 +010035enum mailbox_state {
Andrew Walbranc3910f72018-11-27 14:24:36 +000036 /** There is no message in the mailbox. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010037 MAILBOX_STATE_EMPTY,
Andrew Scullaa039b32018-10-04 15:02:26 +010038
Andrew Walbranc3910f72018-11-27 14:24:36 +000039 /** There is a message in the mailbox that is waiting for a reader. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010040 MAILBOX_STATE_RECEIVED,
Andrew Scullaa039b32018-10-04 15:02:26 +010041
Andrew Walbranc3910f72018-11-27 14:24:36 +000042 /** There is a message in the mailbox that has been read. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010043 MAILBOX_STATE_READ,
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010044};
45
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000046struct wait_entry {
47 /** The VM that is waiting for a mailbox to become writable. */
48 struct vm *waiting_vm;
49
50 /**
51 * Links used to add entry to a VM's waiter_list. This is protected by
52 * the notifying VM's lock.
53 */
54 struct list_entry wait_links;
55
56 /**
57 * Links used to add entry to a VM's ready_list. This is protected by
58 * the waiting VM's lock.
59 */
60 struct list_entry ready_links;
61};
62
Andrew Scullaa039b32018-10-04 15:02:26 +010063struct mailbox {
64 enum mailbox_state state;
Andrew Walbran70bc8622019-10-07 14:15:58 +010065 void *recv;
66 const void *send;
67
68 /** The ID of the VM which sent the message currently in `recv`. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010069 ffa_vm_id_t recv_sender;
Andrew Walbran70bc8622019-10-07 14:15:58 +010070
71 /** The size of the message currently in `recv`. */
72 uint32_t recv_size;
73
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000074 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075 * The FF-A function ID to use to deliver the message currently in
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000076 * `recv`.
77 */
78 uint32_t recv_func;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000079
80 /**
81 * List of wait_entry structs representing VMs that want to be notified
82 * when the mailbox becomes writable. Once the mailbox does become
83 * writable, the entry is removed from this list and added to the
84 * waiting VM's ready_list.
85 */
86 struct list_entry waiter_list;
87
88 /**
89 * List of wait_entry structs representing VMs whose mailboxes became
90 * writable since the owner of the mailbox registers for notification.
91 */
92 struct list_entry ready_list;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010093};
94
Andrew Scullae9962e2019-10-03 16:51:16 +010095struct smc_whitelist {
96 uint32_t smcs[MAX_SMCS];
97 uint16_t smc_count;
98 bool permissive;
99};
100
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100101struct vm {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100102 ffa_vm_id_t id;
Andrew Scullae9962e2019-10-03 16:51:16 +0100103 struct smc_whitelist smc_whitelist;
104
Andrew Walbran0d7a0682018-12-06 16:48:47 +0000105 /** See api.c for the partial ordering on locks. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106 struct spinlock lock;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100108 struct vcpu vcpus[MAX_CPUS];
Andrew Scull89a75242018-08-06 17:04:55 +0100109 struct mm_ptable ptable;
Andrew Scullaa039b32018-10-04 15:02:26 +0100110 struct mailbox mailbox;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100111 char log_buffer[LOG_BUFFER_SIZE];
Andrew Scullae9962e2019-10-03 16:51:16 +0100112 uint16_t log_buffer_length;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000113
Andrew Walbranf76f5752019-12-03 18:33:08 +0000114 /**
115 * Wait entries to be used when waiting on other VM mailboxes. See
116 * comments on `struct wait_entry` for the lock discipline of these.
117 */
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000118 struct wait_entry wait_entries[MAX_VMS];
Andrew Scull9726c252019-01-23 13:44:19 +0000119
120 atomic_bool aborting;
Andrew Walbran1f32e722019-06-07 17:57:26 +0100121
122 /** Arch-specific VM information. */
123 struct arch_vm arch;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000124};
125
126/** Encapsulates a VM whose lock is held. */
127struct vm_locked {
128 struct vm *vm;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100129};
130
Jose Marinho75509b42019-04-09 09:34:59 +0100131/** Container for two vm_locked structures */
132struct two_vm_locked {
133 struct vm_locked vm1;
134 struct vm_locked vm2;
135};
136
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100137struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100138 struct mpool *ppool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100139bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100140 struct vm **new_vm);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100141ffa_vm_count_t vm_get_count(void);
142struct vm *vm_find(ffa_vm_id_t id);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100143struct vm_locked vm_lock(struct vm *vm);
Jose Marinho75509b42019-04-09 09:34:59 +0100144struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000145void vm_unlock(struct vm_locked *locked);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100146struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index);
147struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm);
148ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
Andrew Scull3c257452019-11-26 13:32:50 +0000149
150bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
151 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
152bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
153 uint32_t mode, struct mpool *ppool);
154void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
155 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
156bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
157 struct mpool *ppool);
158bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);