blob: f4c9dab5f73e6324be4560be750514c563f54993 [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
11#include "hf/ffa.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070012#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000014#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000015
J-Alves13394022021-06-30 13:48:49 +010016/**
17 * The following enum relates to a state machine to guide the handling of the
18 * Scheduler Receiver Interrupt.
19 * The SRI is used to signal the receiver scheduler that there are pending
20 * notifications for the receiver, and it is sent when there is a valid call to
21 * FFA_NOTIFICATION_SET.
22 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
23 * after which the FF-A driver should process the returned list, and request
24 * the receiver scheduler to give the receiver CPU cycles to process the
25 * notification.
26 * The use of the following state machine allows for synchronized sending
27 * and handling of the SRI, as well as avoiding the occurrence of spurious
28 * SRI. A spurious SRI would be one such that upon handling a call to
29 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
30 * in an MP system.
31 * The state machine also aims at resolving the delay of the SRI by setting
32 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
33 * delaying, the SRI is sent in context switching to the primary endpoint.
34 * The SPMC is implemented under the assumption the receiver scheduler is a
35 * NWd endpoint, hence the SRI is triggered at the world switch.
36 * If concurrently another notification is set that requires immediate action,
37 * the SRI is triggered immediately within that same execution context.
38 *
39 * HANDLED is the initial state, and means a new SRI can be sent. The following
40 * state transitions are possible:
41 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
42 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
43 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
44 * receiver scheduler is being done.
45 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
46 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
47 */
48enum plat_ffa_sri_state {
49 HANDLED = 0,
50 DELAYED,
51 TRIGGERED,
52};
53
Daniel Boulby87b2dc82021-08-04 14:07:43 +010054/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000055struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000056/** Returns the SPMC ID. */
57struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020058
59void plat_ffa_log_init(void);
J-Alvesa09ac2d2022-06-07 13:46:59 +010060void plat_ffa_set_tee_enabled(bool tee_enabled);
61void plat_ffa_init(void);
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000062bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
63 uint32_t share_func);
64
Olivier Deprez55a189e2021-06-09 15:45:27 +020065bool plat_ffa_is_direct_request_valid(struct vcpu *current,
66 ffa_vm_id_t sender_vm_id,
67 ffa_vm_id_t receiver_vm_id);
68bool plat_ffa_is_direct_response_valid(struct vcpu *current,
69 ffa_vm_id_t sender_vm_id,
70 ffa_vm_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000071bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
72 struct vm *receiver_vm);
Olivier Deprez55a189e2021-06-09 15:45:27 +020073bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
74 struct ffa_value args,
75 struct ffa_value *ret);
J-Alvesa0f317d2021-06-09 13:31:59 +010076bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
77 ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000078
79bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
80 ffa_vm_id_t sender_id,
81 ffa_vm_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010082bool plat_ffa_notifications_update_bindings_forward(
83 ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
84 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +000085
J-Alvesaa79c012021-07-09 14:29:45 +010086bool plat_ffa_is_notification_set_valid(struct vcpu *current,
87 ffa_vm_id_t sender_id,
88 ffa_vm_id_t receiver_id);
89
J-Alvesde7bd2f2021-09-09 19:54:35 +010090bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
91 ffa_vm_id_t receiver_vm_id,
92 uint32_t flags,
93 ffa_notifications_bitmap_t bitmap,
94 struct ffa_value *ret);
95
J-Alvesaa79c012021-07-09 14:29:45 +010096bool plat_ffa_is_notification_get_valid(struct vcpu *current,
J-Alvesfc95a302022-04-22 14:18:23 +010097 ffa_vm_id_t receiver_id,
98 uint32_t flags);
J-Alvesaa79c012021-07-09 14:29:45 +010099
J-Alves98ff9562021-09-09 14:39:41 +0100100bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
101 ffa_vcpu_index_t vcpu_id,
102 ffa_notifications_bitmap_t *from_sp,
103 struct ffa_value *ret);
104
J-Alvesd605a092022-03-28 14:20:48 +0100105bool plat_ffa_notifications_get_framework_notifications(
106 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
107 uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret);
108
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200109void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked);
110
Federico Recanati10bd06c2022-02-23 17:32:59 +0100111void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
112
Federico Recanati8da9e332022-02-10 11:00:17 +0100113void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id);
114
Olivier Deprez55a189e2021-06-09 15:45:27 +0200115/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100116 * Checks whether managed exit is supported by given SP.
117 */
118bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
119
120/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200121 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
122 */
123ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
124
125/**
126 * Checks whether given handle was allocated by current world, according to
127 * handle encoding rules.
128 */
129bool plat_ffa_memory_handle_allocated_by_current_world(
130 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100131
132/**
J-Alves7db32002021-12-14 14:44:50 +0000133 * For non-secure memory, retrieve the NS mode if the partition manager supports
134 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
135 * with NS accesses by default.
136 */
137uint32_t plat_ffa_other_world_mode(void);
138
139/**
140 * For memory management operations between SWd and NWd, the SPMC might need
141 * to operate NS-memory. The function below returns the mode to use in the mm.h
142 * library, depending on the memory ownder's id.
143 */
144uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id);
145
146/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100147 * Return the FF-A partition info VM/SP properties given the VM id.
148 */
149ffa_partition_properties_t plat_ffa_partition_properties(
J-Alvesfa782092021-10-13 16:02:16 +0100150 ffa_vm_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100151
152/**
153 * Get NWd VM's structure.
154 */
155struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id);
156
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200157struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id);
158
J-Alvesa0f317d2021-06-09 13:31:59 +0100159/**
160 * Creates a bitmap for the VM of the given ID.
161 */
162struct ffa_value plat_ffa_notifications_bitmap_create(
163 ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count);
164
165/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100166 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000167 * Returns true if the call goes well, and false if call returns with
168 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100169 */
170bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000171 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100172
173/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100174 * Destroys the notifications bitmap for the given VM ID.
175 */
176struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000177
178/**
179 * Helper to get the struct notifications, depending on the sender's id.
180 */
181struct notifications *plat_ffa_vm_get_notifications_senders_world(
182 struct vm_locked vm_locked, ffa_vm_id_t sender_id);
183
184/**
185 * Helper to check if FF-A ID is a VM ID.
186 */
187bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id);
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700188
189/**
190 * Forward normal world calls of FFA_RUN ABI to other world.
191 */
192bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
193 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100194
195bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
196
197bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
198 uint32_t *lists_sizes,
199 uint32_t *lists_count,
200 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700201
J-Alves13394022021-06-30 13:48:49 +0100202/** Helper to set SRI current state. */
203void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
204
205/**
206 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
207 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
208 * To be called at a context switch to the NWd.
209 */
210void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
211
212/**
213 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
214 * delayed in call to FFA_NOTIFICATION_SET.
215 */
216void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
217
218/**
219 * Initialize Schedule Receiver Interrupts needed in the context of
220 * notifications support.
221 */
222void plat_ffa_sri_init(struct cpu *cpu);
223
J-Alvesca058c22021-09-10 14:02:07 +0100224void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
225 uint32_t *lists_sizes,
226 uint32_t *lists_count,
227 const uint32_t ids_count_max);
228
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700229bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
230bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500231
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600232bool plat_ffa_msg_wait_prepare(struct vcpu *current, struct vcpu **next,
233 struct ffa_value *ret_args);
234
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500235/**
236 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
237 */
238bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
Raghu Krishnamurthy048d63f2021-12-11 12:45:41 -0800239 ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
240 struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500241
242/**
243 * Deactivate interrupt.
244 */
245int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
246 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500247
248void plat_ffa_secure_interrupt(struct vcpu *current, struct vcpu **next);
Madhukar Pappireddy9e7a11f2021-08-03 13:59:42 -0500249struct ffa_value plat_ffa_delegate_ffa_interrupt(struct vcpu *current,
250 struct vcpu **next);
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -0500251struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current,
252 struct vcpu **next);
253struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current,
254 struct vcpu **next);
J-Alves7461ef22021-10-18 17:21:33 +0100255
J-Alves6e2abc62021-12-02 14:58:56 +0000256bool plat_ffa_inject_notification_pending_interrupt(
257 struct vcpu_locked next_locked, struct vcpu *current,
258 struct vm_locked receiver_locked);
Olivier Depreze562e542020-06-11 17:31:54 +0200259
260void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000261 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200262 struct ffa_partition_info *partitions,
263 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700264
265void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
266 paddr_t fdt_addr,
267 size_t fdt_allocated_size,
268 const struct manifest_vm *manifest_vm,
269 struct mpool *ppool);
Olivier Deprezd614d322021-06-18 15:21:00 +0200270
271/**
272 * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at
273 * the virtual FF-A instance.
274 */
275bool plat_ffa_is_secondary_ep_register_supported(void);