blob: 69ea010564c3503c7adf25217504536a4bfb57ae [file] [log] [blame]
Karl Meakin7a664f62024-07-24 17:20:29 +01001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
Karl Meakin902af082024-11-28 14:58:38 +00009#include "hf/ffa/notifications.h"
Karl Meakin7a664f62024-07-24 17:20:29 +010010
11#include "hf/arch/other_world.h"
12
13#include "hf/ffa_internal.h"
14#include "hf/std.h"
15#include "hf/vm.h"
16
17#include "hypervisor.h"
18
19/**
20 * Check validity of the calls:
21 * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
22 */
23struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
24 struct vcpu *current, ffa_id_t vm_id)
25{
26 /*
27 * Call should only be used by the Hypervisor, so any attempt of
28 * invocation from NWd FF-A endpoints should fail.
29 */
30 (void)current;
31 (void)vm_id;
32
33 return ffa_error(FFA_NOT_SUPPORTED);
34}
35
36bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
37 ffa_id_t sender_id,
38 ffa_id_t receiver_id)
39{
40 ffa_id_t current_vm_id = current->vm->id;
41 /** If Hafnium is hypervisor, receiver needs to be current vm. */
42 return sender_id != receiver_id && current_vm_id == receiver_id;
43}
44
45bool plat_ffa_notifications_update_bindings_forward(
Karl Meakinf9c73ce2024-07-30 17:37:13 +010046 ffa_id_t receiver_id, ffa_id_t sender_id,
47 ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
48 bool is_bind, struct ffa_value *ret)
Karl Meakin7a664f62024-07-24 17:20:29 +010049{
50 CHECK(ret != NULL);
51
52 if (vm_id_is_current_world(receiver_id) &&
53 !vm_id_is_current_world(sender_id)) {
54 dlog_verbose(
55 "Forward notifications bind/unbind to other world.\n");
56 *ret = arch_other_world_call((struct ffa_value){
57 .func = is_bind ? FFA_NOTIFICATION_BIND_32
58 : FFA_NOTIFICATION_UNBIND_32,
59 .arg1 = (sender_id << 16) | (receiver_id),
60 .arg2 = is_bind ? flags : 0U,
61 .arg3 = (uint32_t)(bitmap),
62 .arg4 = (uint32_t)(bitmap >> 32),
63 });
64 return true;
65 }
66 return false;
67}
68
69bool plat_ffa_is_notification_set_valid(struct vcpu *current,
70 ffa_id_t sender_id,
71 ffa_id_t receiver_id)
72{
73 ffa_id_t current_vm_id = current->vm->id;
74
75 /* If Hafnium is hypervisor, sender needs to be current vm. */
76 return sender_id == current_vm_id && sender_id != receiver_id;
77}
78
79bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
80 ffa_id_t receiver_vm_id, uint32_t flags,
81 ffa_notifications_bitmap_t bitmap,
82 struct ffa_value *ret)
83{
84 /* Forward only if receiver is an SP. */
85 if (vm_id_is_current_world(receiver_vm_id)) {
86 return false;
87 }
88
89 dlog_verbose("Forwarding notification set to SPMC.\n");
90
91 *ret = arch_other_world_call((struct ffa_value){
92 .func = FFA_NOTIFICATION_SET_32,
93 .arg1 = (sender_vm_id << 16) | receiver_vm_id,
94 .arg2 = flags & ~FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
95 .arg3 = (uint32_t)(bitmap),
96 .arg4 = (uint32_t)(bitmap >> 32),
97 });
98
99 if (ret->func == FFA_ERROR_32) {
100 dlog_verbose("Failed to set notifications from SPMC.\n");
101 }
102
103 return true;
104}
105
106bool plat_ffa_is_notification_get_valid(struct vcpu *current,
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100107 ffa_id_t receiver_id,
108 ffa_notification_flags_t flags)
Karl Meakin7a664f62024-07-24 17:20:29 +0100109{
110 ffa_id_t current_vm_id = current->vm->id;
111
112 (void)flags;
113
114 /* If Hafnium is hypervisor, receiver needs to be current vm. */
115 return (current_vm_id == receiver_id);
116}
117
118struct ffa_value plat_ffa_notifications_bitmap_create(
119 ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
120{
121 (void)vm_id;
122 (void)vcpu_count;
123
124 return ffa_error(FFA_NOT_SUPPORTED);
125}
126
127struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
128{
129 (void)vm_id;
130
131 return ffa_error(FFA_NOT_SUPPORTED);
132}
133
134bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
135 ffa_vcpu_count_t vcpu_count)
136{
137 struct ffa_value ret;
138
139 if (plat_ffa_is_tee_enabled()) {
140 ret = arch_other_world_call((struct ffa_value){
141 .func = FFA_NOTIFICATION_BITMAP_CREATE_32,
142 .arg1 = vm_id,
143 .arg2 = vcpu_count,
144 });
145
146 if (ret.func == FFA_ERROR_32) {
147 dlog_error(
148 "Failed to create notifications bitmap "
149 "to VM: %#x; error: %#x.\n",
150 vm_id, ffa_error_code(ret));
151 return false;
152 }
153 }
154
155 return true;
156}
157
158void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
159 uint32_t *lists_sizes,
160 uint32_t *lists_count,
161 const uint32_t ids_count_max)
162{
163 CHECK(ids != NULL);
164 CHECK(ids_count != NULL);
165 CHECK(lists_sizes != NULL);
166 CHECK(lists_count != NULL);
167 CHECK(ids_count_max == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
168
169 uint32_t local_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
170 struct ffa_value ret;
171
172 dlog_verbose("Forwarding notification info get to SPMC.\n");
173
174 ret = arch_other_world_call((struct ffa_value){
175 .func = FFA_NOTIFICATION_INFO_GET_64,
176 });
177
178 if (ret.func == FFA_ERROR_32) {
179 dlog_verbose("No notifications returned by SPMC.\n");
180 return;
181 }
182
183 *lists_count = ffa_notification_info_get_lists_count(ret);
184
185 if (*lists_count > ids_count_max) {
186 *lists_count = 0;
187 return;
188 }
189
190 /*
191 * The count of ids should be at least the number of lists, to
192 * encompass for at least the ids of the FF-A endpoints. List
193 * sizes will be between 0 and 3, and relates to the counting of
194 * vCPU of the endpoint that have pending notifications.
195 * If `lists_count` is already ids_count_max, each list size
196 * must be 0.
197 */
198 *ids_count = *lists_count;
199
200 for (uint32_t i = 0; i < *lists_count; i++) {
201 local_lists_sizes[i] =
202 ffa_notification_info_get_list_size(ret, i + 1);
203
204 /*
205 * ... sum the counting of each list size that are part
206 * of the main list.
207 */
208 *ids_count += local_lists_sizes[i];
209 }
210
211 /*
212 * Sanity check returned `lists_count` and determined
213 * `ids_count`. If something wrong, reset arguments to 0 such
214 * that hypervisor's handling of FFA_NOTIFICATION_INFO_GET can
215 * proceed without SPMC's values.
216 */
217 if (*ids_count > ids_count_max) {
218 *ids_count = 0;
219 return;
220 }
221
222 /* Copy now lists sizes, as return sizes have been validated. */
223 memcpy_s(lists_sizes, sizeof(lists_sizes[0]) * ids_count_max,
224 local_lists_sizes, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
225
226 /* Unpack the notifications info from the return. */
227 memcpy_s(ids, sizeof(ids[0]) * ids_count_max, &ret.arg3,
228 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
229}
230
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100231struct ffa_value plat_ffa_notifications_get_from_sp(
232 struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
233 ffa_notifications_bitmap_t *from_sp)
Karl Meakin7a664f62024-07-24 17:20:29 +0100234{
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100235 struct ffa_value ret = {.func = FFA_SUCCESS_32};
Karl Meakin7a664f62024-07-24 17:20:29 +0100236 ffa_id_t receiver_id = receiver_locked.vm->id;
237
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100238 assert(from_sp != NULL);
Karl Meakin7a664f62024-07-24 17:20:29 +0100239
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100240 ret = arch_other_world_call((struct ffa_value){
Karl Meakin7a664f62024-07-24 17:20:29 +0100241 .func = FFA_NOTIFICATION_GET_32,
242 .arg1 = (vcpu_id << 16) | receiver_id,
243 .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SP,
244 });
245
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100246 if (ret.func == FFA_ERROR_32) {
247 return ret;
Karl Meakin7a664f62024-07-24 17:20:29 +0100248 }
249
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100250 *from_sp = ffa_notification_get_from_sp(ret);
Karl Meakin7a664f62024-07-24 17:20:29 +0100251
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100252 return ret;
Karl Meakin7a664f62024-07-24 17:20:29 +0100253}
254
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100255struct ffa_value plat_ffa_notifications_get_framework_notifications(
Karl Meakin7a664f62024-07-24 17:20:29 +0100256 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100257 ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
Karl Meakin7a664f62024-07-24 17:20:29 +0100258{
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100259 struct ffa_value ret = {.func = FFA_SUCCESS_32};
Karl Meakin7a664f62024-07-24 17:20:29 +0100260 ffa_id_t receiver_id = receiver_locked.vm->id;
261 ffa_notifications_bitmap_t spm_notifications = 0;
262
263 (void)flags;
264
265 assert(from_fwk != NULL);
Karl Meakin7a664f62024-07-24 17:20:29 +0100266
267 /* Get SPMC notifications. */
268 if (plat_ffa_is_tee_enabled()) {
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100269 ret = arch_other_world_call((struct ffa_value){
Karl Meakin7a664f62024-07-24 17:20:29 +0100270 .func = FFA_NOTIFICATION_GET_32,
271 .arg1 = (vcpu_id << 16) | receiver_id,
272 .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SPM,
273 });
274
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100275 if (ffa_func_id(ret) == FFA_ERROR_32) {
276 return ret;
Karl Meakin7a664f62024-07-24 17:20:29 +0100277 }
278
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100279 spm_notifications = ffa_notification_get_from_framework(ret);
Karl Meakin7a664f62024-07-24 17:20:29 +0100280 }
281
282 /* Merge notifications from SPMC and Hypervisor. */
283 *from_fwk = spm_notifications |
284 vm_notifications_framework_get_pending(receiver_locked);
285
Karl Meakinf9c73ce2024-07-30 17:37:13 +0100286 return ret;
Karl Meakin7a664f62024-07-24 17:20:29 +0100287}
288
289/**
290 * A hypervisor should send the SRI to the Primary Endpoint. Not implemented as
291 * the hypervisor is only interesting for us for the sake of having a test
292 * intrastructure that encompasses the NWd, and we are not interested in testing
293 * the flow of notifications between VMs only.
294 */
295void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
296{
297 (void)cpu;
298}
299
300void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
301{
302 (void)cpu;
303}
304
305/**
306 * Track that in current CPU there was a notification set with delay SRI
307 * flag.
308 */
309void plat_ffa_sri_set_delayed(struct cpu *cpu)
310{
311 (void)cpu;
312}