blob: d8e68449e7dc2957545c1072e77c047dae54c16c [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
11#include "hf/ffa.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070012#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000014#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000015
J-Alves13394022021-06-30 13:48:49 +010016/**
17 * The following enum relates to a state machine to guide the handling of the
18 * Scheduler Receiver Interrupt.
19 * The SRI is used to signal the receiver scheduler that there are pending
20 * notifications for the receiver, and it is sent when there is a valid call to
21 * FFA_NOTIFICATION_SET.
22 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
23 * after which the FF-A driver should process the returned list, and request
24 * the receiver scheduler to give the receiver CPU cycles to process the
25 * notification.
26 * The use of the following state machine allows for synchronized sending
27 * and handling of the SRI, as well as avoiding the occurrence of spurious
28 * SRI. A spurious SRI would be one such that upon handling a call to
29 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
30 * in an MP system.
31 * The state machine also aims at resolving the delay of the SRI by setting
32 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
33 * delaying, the SRI is sent in context switching to the primary endpoint.
34 * The SPMC is implemented under the assumption the receiver scheduler is a
35 * NWd endpoint, hence the SRI is triggered at the world switch.
36 * If concurrently another notification is set that requires immediate action,
37 * the SRI is triggered immediately within that same execution context.
38 *
39 * HANDLED is the initial state, and means a new SRI can be sent. The following
40 * state transitions are possible:
41 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
42 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
43 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
44 * receiver scheduler is being done.
45 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
46 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
47 */
48enum plat_ffa_sri_state {
49 HANDLED = 0,
50 DELAYED,
51 TRIGGERED,
52};
53
Daniel Boulby87b2dc82021-08-04 14:07:43 +010054/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000055struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000056/** Returns the SPMC ID. */
57struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020058
59void plat_ffa_log_init(void);
J-Alvesa09ac2d2022-06-07 13:46:59 +010060void plat_ffa_set_tee_enabled(bool tee_enabled);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070061void plat_ffa_init(struct mpool *ppool);
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000062bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
63 uint32_t share_func);
64
Olivier Deprez55a189e2021-06-09 15:45:27 +020065bool plat_ffa_is_direct_request_valid(struct vcpu *current,
66 ffa_vm_id_t sender_vm_id,
67 ffa_vm_id_t receiver_vm_id);
68bool plat_ffa_is_direct_response_valid(struct vcpu *current,
69 ffa_vm_id_t sender_vm_id,
70 ffa_vm_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000071bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
72 struct vm *receiver_vm);
Olivier Deprez55a189e2021-06-09 15:45:27 +020073bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
74 struct ffa_value args,
75 struct ffa_value *ret);
Federico Recanati25053ee2022-03-14 15:01:53 +010076
Federico Recanati7bef0b92022-03-17 14:56:22 +010077bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
78 struct ffa_value *ret);
79
80bool plat_ffa_rx_release_forwarded(struct vm_locked vm_locked);
81
82bool plat_ffa_acquire_receiver_rx(struct vm_locked locked,
Federico Recanati644f0462022-03-17 12:04:00 +010083 struct ffa_value *ret);
84
Federico Recanati25053ee2022-03-14 15:01:53 +010085bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
86 struct vm_locked receiver_locked);
87
88bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
89 ffa_vm_id_t sender_vm_id,
90 struct ffa_value *ret);
91
J-Alvesa0f317d2021-06-09 13:31:59 +010092bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
93 ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000094
95bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
96 ffa_vm_id_t sender_id,
97 ffa_vm_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010098bool plat_ffa_notifications_update_bindings_forward(
99 ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
100 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +0000101
J-Alvesaa79c012021-07-09 14:29:45 +0100102bool plat_ffa_is_notification_set_valid(struct vcpu *current,
103 ffa_vm_id_t sender_id,
104 ffa_vm_id_t receiver_id);
105
J-Alvesde7bd2f2021-09-09 19:54:35 +0100106bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
107 ffa_vm_id_t receiver_vm_id,
108 uint32_t flags,
109 ffa_notifications_bitmap_t bitmap,
110 struct ffa_value *ret);
111
J-Alvesaa79c012021-07-09 14:29:45 +0100112bool plat_ffa_is_notification_get_valid(struct vcpu *current,
J-Alvesfc95a302022-04-22 14:18:23 +0100113 ffa_vm_id_t receiver_id,
114 uint32_t flags);
J-Alvesaa79c012021-07-09 14:29:45 +0100115
J-Alves98ff9562021-09-09 14:39:41 +0100116bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
117 ffa_vcpu_index_t vcpu_id,
118 ffa_notifications_bitmap_t *from_sp,
119 struct ffa_value *ret);
120
J-Alvesd605a092022-03-28 14:20:48 +0100121bool plat_ffa_notifications_get_framework_notifications(
122 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
123 uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret);
124
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200125void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked);
126
Federico Recanati10bd06c2022-02-23 17:32:59 +0100127void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
128
J-Alves70079932022-12-07 17:32:20 +0000129void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +0100130
Olivier Deprez55a189e2021-06-09 15:45:27 +0200131/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100132 * Checks whether managed exit is supported by given SP.
133 */
134bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
135
136/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200137 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
138 */
139ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
140
141/**
142 * Checks whether given handle was allocated by current world, according to
143 * handle encoding rules.
144 */
145bool plat_ffa_memory_handle_allocated_by_current_world(
146 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100147
148/**
J-Alves7db32002021-12-14 14:44:50 +0000149 * For non-secure memory, retrieve the NS mode if the partition manager supports
150 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
151 * with NS accesses by default.
152 */
153uint32_t plat_ffa_other_world_mode(void);
154
155/**
156 * For memory management operations between SWd and NWd, the SPMC might need
157 * to operate NS-memory. The function below returns the mode to use in the mm.h
158 * library, depending on the memory ownder's id.
159 */
160uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id);
161
162/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100163 * Return the FF-A partition info VM/SP properties given the VM id.
164 */
165ffa_partition_properties_t plat_ffa_partition_properties(
J-Alvesfa782092021-10-13 16:02:16 +0100166 ffa_vm_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100167
168/**
169 * Get NWd VM's structure.
170 */
171struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id);
172
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200173struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id);
174
J-Alvesa0f317d2021-06-09 13:31:59 +0100175/**
176 * Creates a bitmap for the VM of the given ID.
177 */
178struct ffa_value plat_ffa_notifications_bitmap_create(
179 ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count);
180
181/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100182 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000183 * Returns true if the call goes well, and false if call returns with
184 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100185 */
186bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000187 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100188
189/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100190 * Destroys the notifications bitmap for the given VM ID.
191 */
192struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000193
194/**
195 * Helper to get the struct notifications, depending on the sender's id.
196 */
197struct notifications *plat_ffa_vm_get_notifications_senders_world(
198 struct vm_locked vm_locked, ffa_vm_id_t sender_id);
199
200/**
201 * Helper to check if FF-A ID is a VM ID.
202 */
203bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id);
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700204
205/**
206 * Forward normal world calls of FFA_RUN ABI to other world.
207 */
208bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
209 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100210
211bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
212
213bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
214 uint32_t *lists_sizes,
215 uint32_t *lists_count,
216 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700217
J-Alves13394022021-06-30 13:48:49 +0100218/** Helper to set SRI current state. */
219void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
220
221/**
222 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
223 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
224 * To be called at a context switch to the NWd.
225 */
226void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
227
228/**
229 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
230 * delayed in call to FFA_NOTIFICATION_SET.
231 */
232void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
233
234/**
235 * Initialize Schedule Receiver Interrupts needed in the context of
236 * notifications support.
237 */
238void plat_ffa_sri_init(struct cpu *cpu);
239
J-Alvesca058c22021-09-10 14:02:07 +0100240void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
241 uint32_t *lists_sizes,
242 uint32_t *lists_count,
243 const uint32_t ids_count_max);
244
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700245bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
246bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500247
Madhukar Pappireddydd883202022-10-24 16:49:28 -0500248struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
249 struct vcpu **next);
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600250
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500251/**
252 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
253 */
254bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
Raghu Krishnamurthy048d63f2021-12-11 12:45:41 -0800255 ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
256 struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500257
258/**
259 * Deactivate interrupt.
260 */
261int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
262 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500263
Madhukar Pappireddydc0c8012022-06-21 15:23:14 -0500264struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current,
265 struct vcpu **next,
266 bool from_normal_world);
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -0500267struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current,
268 struct vcpu **next);
269struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current,
270 struct vcpu **next);
J-Alves7461ef22021-10-18 17:21:33 +0100271
J-Alves6e2abc62021-12-02 14:58:56 +0000272bool plat_ffa_inject_notification_pending_interrupt(
273 struct vcpu_locked next_locked, struct vcpu *current,
274 struct vm_locked receiver_locked);
Olivier Depreze562e542020-06-11 17:31:54 +0200275
276void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000277 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200278 struct ffa_partition_info *partitions,
279 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700280
281void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
282 paddr_t fdt_addr,
283 size_t fdt_allocated_size,
284 const struct manifest_vm *manifest_vm,
285 struct mpool *ppool);
Olivier Deprezd614d322021-06-18 15:21:00 +0200286
287/**
288 * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at
289 * the virtual FF-A instance.
290 */
291bool plat_ffa_is_secondary_ep_register_supported(void);
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500292
293/**
294 * Perform checks for the state transition being requested by the Partition
295 * based on it's runtime model and return false if an illegal transition is
296 * being performed.
297 */
298bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
299 ffa_vm_id_t vm_id,
300 ffa_vm_id_t receiver_vm_id,
301 struct vcpu *vcpu, uint32_t func,
302 enum vcpu_state *next_state);
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500303
Madhukar Pappireddyd46c06e2022-06-21 18:14:52 -0500304struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current);
Madhukar Pappireddy1480fce2022-06-21 18:09:25 -0500305void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
306 struct vcpu_locked target_locked);
307
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500308void plat_ffa_wind_call_chain_ffa_direct_req(
309 struct vcpu_locked current_locked,
310 struct vcpu_locked receiver_vcpu_locked);
Madhukar Pappireddyc0fb87e2022-06-21 17:59:15 -0500311
312void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
313 struct vcpu *next);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500314
Madhukar Pappireddy486360d2022-09-06 15:32:24 -0500315void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
316 struct vm_locked vm_locked);
J-Alves66652252022-07-06 09:49:51 +0100317
J-Alvesb5084cf2022-07-06 14:20:12 +0100318bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
319 struct vcpu **next,
320 struct ffa_value to_ret,
321 struct ffa_value *signal_interrupt);
J-Alves66652252022-07-06 09:49:51 +0100322/*
323 * Handles FF-A memory share calls with recipients from the other world.
324 */
325struct ffa_value plat_ffa_other_world_mem_send(
326 struct vm *from, uint32_t share_func,
327 struct ffa_memory_region **memory_region, uint32_t length,
328 uint32_t fragment_length, struct mpool *page_pool);
J-Alvesfc19b372022-07-06 12:17:35 +0100329
330/**
331 * Handles the memory reclaim if a memory handle from the other world is
332 * provided.
333 */
334struct ffa_value plat_ffa_other_world_mem_reclaim(
335 struct vm *to, ffa_memory_handle_t handle,
336 ffa_memory_region_flags_t flags, struct mpool *page_pool);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500337
J-Alvesb5084cf2022-07-06 14:20:12 +0100338/**
339 * Handles the memory retrieve request if the specified memory handle belongs
340 * to the other world.
341 */
342struct ffa_value plat_ffa_other_world_mem_retrieve(
343 struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
344 uint32_t length, struct mpool *page_pool);