blob: dafafcd100a7d6775da6ee2c1f9ff36c20b019d8 [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
11#include "hf/ffa.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070012#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000014#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000015
J-Alves13394022021-06-30 13:48:49 +010016/**
17 * The following enum relates to a state machine to guide the handling of the
18 * Scheduler Receiver Interrupt.
19 * The SRI is used to signal the receiver scheduler that there are pending
20 * notifications for the receiver, and it is sent when there is a valid call to
21 * FFA_NOTIFICATION_SET.
22 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
23 * after which the FF-A driver should process the returned list, and request
24 * the receiver scheduler to give the receiver CPU cycles to process the
25 * notification.
26 * The use of the following state machine allows for synchronized sending
27 * and handling of the SRI, as well as avoiding the occurrence of spurious
28 * SRI. A spurious SRI would be one such that upon handling a call to
29 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
30 * in an MP system.
31 * The state machine also aims at resolving the delay of the SRI by setting
32 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
33 * delaying, the SRI is sent in context switching to the primary endpoint.
34 * The SPMC is implemented under the assumption the receiver scheduler is a
35 * NWd endpoint, hence the SRI is triggered at the world switch.
36 * If concurrently another notification is set that requires immediate action,
37 * the SRI is triggered immediately within that same execution context.
38 *
39 * HANDLED is the initial state, and means a new SRI can be sent. The following
40 * state transitions are possible:
41 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
42 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
43 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
44 * receiver scheduler is being done.
45 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
46 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
47 */
48enum plat_ffa_sri_state {
49 HANDLED = 0,
50 DELAYED,
51 TRIGGERED,
52};
53
Daniel Boulby87b2dc82021-08-04 14:07:43 +010054/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000055struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000056/** Returns the SPMC ID. */
57struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020058
59void plat_ffa_log_init(void);
60void plat_ffa_init(bool tee_enabled);
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000061bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
62 uint32_t share_func);
63
Olivier Deprez55a189e2021-06-09 15:45:27 +020064bool plat_ffa_is_direct_request_valid(struct vcpu *current,
65 ffa_vm_id_t sender_vm_id,
66 ffa_vm_id_t receiver_vm_id);
67bool plat_ffa_is_direct_response_valid(struct vcpu *current,
68 ffa_vm_id_t sender_vm_id,
69 ffa_vm_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000070bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
71 struct vm *receiver_vm);
Olivier Deprez55a189e2021-06-09 15:45:27 +020072bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
73 struct ffa_value args,
74 struct ffa_value *ret);
J-Alvesa0f317d2021-06-09 13:31:59 +010075bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
76 ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000077
78bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
79 ffa_vm_id_t sender_id,
80 ffa_vm_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010081bool plat_ffa_notifications_update_bindings_forward(
82 ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
83 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +000084
J-Alvesaa79c012021-07-09 14:29:45 +010085bool plat_ffa_is_notification_set_valid(struct vcpu *current,
86 ffa_vm_id_t sender_id,
87 ffa_vm_id_t receiver_id);
88
J-Alvesde7bd2f2021-09-09 19:54:35 +010089bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
90 ffa_vm_id_t receiver_vm_id,
91 uint32_t flags,
92 ffa_notifications_bitmap_t bitmap,
93 struct ffa_value *ret);
94
J-Alvesaa79c012021-07-09 14:29:45 +010095bool plat_ffa_is_notification_get_valid(struct vcpu *current,
96 ffa_vm_id_t receiver_id);
97
J-Alves98ff9562021-09-09 14:39:41 +010098bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
99 ffa_vcpu_index_t vcpu_id,
100 ffa_notifications_bitmap_t *from_sp,
101 struct ffa_value *ret);
102
J-Alvesaa79c012021-07-09 14:29:45 +0100103bool plat_ffa_notifications_get_call(ffa_vm_id_t receiver_id, uint32_t vcpu_id,
104 uint32_t flags, struct ffa_value *ret);
105
Olivier Deprez55a189e2021-06-09 15:45:27 +0200106/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100107 * Checks whether managed exit is supported by given SP.
108 */
109bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
110
111/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200112 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
113 */
114ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
115
116/**
117 * Checks whether given handle was allocated by current world, according to
118 * handle encoding rules.
119 */
120bool plat_ffa_memory_handle_allocated_by_current_world(
121 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100122
123/**
J-Alves7db32002021-12-14 14:44:50 +0000124 * For non-secure memory, retrieve the NS mode if the partition manager supports
125 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
126 * with NS accesses by default.
127 */
128uint32_t plat_ffa_other_world_mode(void);
129
130/**
131 * For memory management operations between SWd and NWd, the SPMC might need
132 * to operate NS-memory. The function below returns the mode to use in the mm.h
133 * library, depending on the memory ownder's id.
134 */
135uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id);
136
137/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100138 * Return the FF-A partition info VM/SP properties given the VM id.
139 */
140ffa_partition_properties_t plat_ffa_partition_properties(
J-Alvesfa782092021-10-13 16:02:16 +0100141 ffa_vm_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100142
143/**
144 * Get NWd VM's structure.
145 */
146struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id);
147
148/**
149 * Creates a bitmap for the VM of the given ID.
150 */
151struct ffa_value plat_ffa_notifications_bitmap_create(
152 ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count);
153
154/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100155 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000156 * Returns true if the call goes well, and false if call returns with
157 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100158 */
159bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000160 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100161
162/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100163 * Destroys the notifications bitmap for the given VM ID.
164 */
165struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000166
167/**
168 * Helper to get the struct notifications, depending on the sender's id.
169 */
170struct notifications *plat_ffa_vm_get_notifications_senders_world(
171 struct vm_locked vm_locked, ffa_vm_id_t sender_id);
172
173/**
174 * Helper to check if FF-A ID is a VM ID.
175 */
176bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id);
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700177
178/**
179 * Forward normal world calls of FFA_RUN ABI to other world.
180 */
181bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
182 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100183
184bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
185
186bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
187 uint32_t *lists_sizes,
188 uint32_t *lists_count,
189 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700190
J-Alves13394022021-06-30 13:48:49 +0100191/** Helper to set SRI current state. */
192void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
193
194/**
195 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
196 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
197 * To be called at a context switch to the NWd.
198 */
199void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
200
201/**
202 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
203 * delayed in call to FFA_NOTIFICATION_SET.
204 */
205void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
206
207/**
208 * Initialize Schedule Receiver Interrupts needed in the context of
209 * notifications support.
210 */
211void plat_ffa_sri_init(struct cpu *cpu);
212
J-Alvesca058c22021-09-10 14:02:07 +0100213void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
214 uint32_t *lists_sizes,
215 uint32_t *lists_count,
216 const uint32_t ids_count_max);
217
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700218bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
219bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500220
221/**
222 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
223 */
224bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
Raghu Krishnamurthy048d63f2021-12-11 12:45:41 -0800225 ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
226 struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500227
228/**
229 * Deactivate interrupt.
230 */
231int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
232 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500233
234void plat_ffa_secure_interrupt(struct vcpu *current, struct vcpu **next);
Madhukar Pappireddy9e7a11f2021-08-03 13:59:42 -0500235struct ffa_value plat_ffa_delegate_ffa_interrupt(struct vcpu *current,
236 struct vcpu **next);
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -0500237struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current,
238 struct vcpu **next);
239struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current,
240 struct vcpu **next);
J-Alves7461ef22021-10-18 17:21:33 +0100241
242void plat_ffa_inject_notification_pending_interrupt_context_switch(
243 struct vcpu *next, struct vcpu *current);
Olivier Depreze562e542020-06-11 17:31:54 +0200244
245void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000246 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200247 struct ffa_partition_info *partitions,
248 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700249
250void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
251 paddr_t fdt_addr,
252 size_t fdt_allocated_size,
253 const struct manifest_vm *manifest_vm,
254 struct mpool *ppool);