Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame^] | 9 | #include "hf/ffa/notifications.h" |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 10 | |
| 11 | #include "hf/arch/other_world.h" |
| 12 | |
| 13 | #include "hf/ffa_internal.h" |
| 14 | #include "hf/std.h" |
| 15 | #include "hf/vm.h" |
| 16 | |
| 17 | #include "hypervisor.h" |
| 18 | |
| 19 | /** |
| 20 | * Check validity of the calls: |
| 21 | * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY. |
| 22 | */ |
| 23 | struct ffa_value plat_ffa_is_notifications_bitmap_access_valid( |
| 24 | struct vcpu *current, ffa_id_t vm_id) |
| 25 | { |
| 26 | /* |
| 27 | * Call should only be used by the Hypervisor, so any attempt of |
| 28 | * invocation from NWd FF-A endpoints should fail. |
| 29 | */ |
| 30 | (void)current; |
| 31 | (void)vm_id; |
| 32 | |
| 33 | return ffa_error(FFA_NOT_SUPPORTED); |
| 34 | } |
| 35 | |
| 36 | bool plat_ffa_is_notifications_bind_valid(struct vcpu *current, |
| 37 | ffa_id_t sender_id, |
| 38 | ffa_id_t receiver_id) |
| 39 | { |
| 40 | ffa_id_t current_vm_id = current->vm->id; |
| 41 | /** If Hafnium is hypervisor, receiver needs to be current vm. */ |
| 42 | return sender_id != receiver_id && current_vm_id == receiver_id; |
| 43 | } |
| 44 | |
| 45 | bool plat_ffa_notifications_update_bindings_forward( |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 46 | ffa_id_t receiver_id, ffa_id_t sender_id, |
| 47 | ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap, |
| 48 | bool is_bind, struct ffa_value *ret) |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 49 | { |
| 50 | CHECK(ret != NULL); |
| 51 | |
| 52 | if (vm_id_is_current_world(receiver_id) && |
| 53 | !vm_id_is_current_world(sender_id)) { |
| 54 | dlog_verbose( |
| 55 | "Forward notifications bind/unbind to other world.\n"); |
| 56 | *ret = arch_other_world_call((struct ffa_value){ |
| 57 | .func = is_bind ? FFA_NOTIFICATION_BIND_32 |
| 58 | : FFA_NOTIFICATION_UNBIND_32, |
| 59 | .arg1 = (sender_id << 16) | (receiver_id), |
| 60 | .arg2 = is_bind ? flags : 0U, |
| 61 | .arg3 = (uint32_t)(bitmap), |
| 62 | .arg4 = (uint32_t)(bitmap >> 32), |
| 63 | }); |
| 64 | return true; |
| 65 | } |
| 66 | return false; |
| 67 | } |
| 68 | |
| 69 | bool plat_ffa_is_notification_set_valid(struct vcpu *current, |
| 70 | ffa_id_t sender_id, |
| 71 | ffa_id_t receiver_id) |
| 72 | { |
| 73 | ffa_id_t current_vm_id = current->vm->id; |
| 74 | |
| 75 | /* If Hafnium is hypervisor, sender needs to be current vm. */ |
| 76 | return sender_id == current_vm_id && sender_id != receiver_id; |
| 77 | } |
| 78 | |
| 79 | bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id, |
| 80 | ffa_id_t receiver_vm_id, uint32_t flags, |
| 81 | ffa_notifications_bitmap_t bitmap, |
| 82 | struct ffa_value *ret) |
| 83 | { |
| 84 | /* Forward only if receiver is an SP. */ |
| 85 | if (vm_id_is_current_world(receiver_vm_id)) { |
| 86 | return false; |
| 87 | } |
| 88 | |
| 89 | dlog_verbose("Forwarding notification set to SPMC.\n"); |
| 90 | |
| 91 | *ret = arch_other_world_call((struct ffa_value){ |
| 92 | .func = FFA_NOTIFICATION_SET_32, |
| 93 | .arg1 = (sender_vm_id << 16) | receiver_vm_id, |
| 94 | .arg2 = flags & ~FFA_NOTIFICATIONS_FLAG_DELAY_SRI, |
| 95 | .arg3 = (uint32_t)(bitmap), |
| 96 | .arg4 = (uint32_t)(bitmap >> 32), |
| 97 | }); |
| 98 | |
| 99 | if (ret->func == FFA_ERROR_32) { |
| 100 | dlog_verbose("Failed to set notifications from SPMC.\n"); |
| 101 | } |
| 102 | |
| 103 | return true; |
| 104 | } |
| 105 | |
| 106 | bool plat_ffa_is_notification_get_valid(struct vcpu *current, |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 107 | ffa_id_t receiver_id, |
| 108 | ffa_notification_flags_t flags) |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 109 | { |
| 110 | ffa_id_t current_vm_id = current->vm->id; |
| 111 | |
| 112 | (void)flags; |
| 113 | |
| 114 | /* If Hafnium is hypervisor, receiver needs to be current vm. */ |
| 115 | return (current_vm_id == receiver_id); |
| 116 | } |
| 117 | |
| 118 | struct ffa_value plat_ffa_notifications_bitmap_create( |
| 119 | ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count) |
| 120 | { |
| 121 | (void)vm_id; |
| 122 | (void)vcpu_count; |
| 123 | |
| 124 | return ffa_error(FFA_NOT_SUPPORTED); |
| 125 | } |
| 126 | |
| 127 | struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id) |
| 128 | { |
| 129 | (void)vm_id; |
| 130 | |
| 131 | return ffa_error(FFA_NOT_SUPPORTED); |
| 132 | } |
| 133 | |
| 134 | bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id, |
| 135 | ffa_vcpu_count_t vcpu_count) |
| 136 | { |
| 137 | struct ffa_value ret; |
| 138 | |
| 139 | if (plat_ffa_is_tee_enabled()) { |
| 140 | ret = arch_other_world_call((struct ffa_value){ |
| 141 | .func = FFA_NOTIFICATION_BITMAP_CREATE_32, |
| 142 | .arg1 = vm_id, |
| 143 | .arg2 = vcpu_count, |
| 144 | }); |
| 145 | |
| 146 | if (ret.func == FFA_ERROR_32) { |
| 147 | dlog_error( |
| 148 | "Failed to create notifications bitmap " |
| 149 | "to VM: %#x; error: %#x.\n", |
| 150 | vm_id, ffa_error_code(ret)); |
| 151 | return false; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | return true; |
| 156 | } |
| 157 | |
| 158 | void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count, |
| 159 | uint32_t *lists_sizes, |
| 160 | uint32_t *lists_count, |
| 161 | const uint32_t ids_count_max) |
| 162 | { |
| 163 | CHECK(ids != NULL); |
| 164 | CHECK(ids_count != NULL); |
| 165 | CHECK(lists_sizes != NULL); |
| 166 | CHECK(lists_count != NULL); |
| 167 | CHECK(ids_count_max == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS); |
| 168 | |
| 169 | uint32_t local_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS]; |
| 170 | struct ffa_value ret; |
| 171 | |
| 172 | dlog_verbose("Forwarding notification info get to SPMC.\n"); |
| 173 | |
| 174 | ret = arch_other_world_call((struct ffa_value){ |
| 175 | .func = FFA_NOTIFICATION_INFO_GET_64, |
| 176 | }); |
| 177 | |
| 178 | if (ret.func == FFA_ERROR_32) { |
| 179 | dlog_verbose("No notifications returned by SPMC.\n"); |
| 180 | return; |
| 181 | } |
| 182 | |
| 183 | *lists_count = ffa_notification_info_get_lists_count(ret); |
| 184 | |
| 185 | if (*lists_count > ids_count_max) { |
| 186 | *lists_count = 0; |
| 187 | return; |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * The count of ids should be at least the number of lists, to |
| 192 | * encompass for at least the ids of the FF-A endpoints. List |
| 193 | * sizes will be between 0 and 3, and relates to the counting of |
| 194 | * vCPU of the endpoint that have pending notifications. |
| 195 | * If `lists_count` is already ids_count_max, each list size |
| 196 | * must be 0. |
| 197 | */ |
| 198 | *ids_count = *lists_count; |
| 199 | |
| 200 | for (uint32_t i = 0; i < *lists_count; i++) { |
| 201 | local_lists_sizes[i] = |
| 202 | ffa_notification_info_get_list_size(ret, i + 1); |
| 203 | |
| 204 | /* |
| 205 | * ... sum the counting of each list size that are part |
| 206 | * of the main list. |
| 207 | */ |
| 208 | *ids_count += local_lists_sizes[i]; |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * Sanity check returned `lists_count` and determined |
| 213 | * `ids_count`. If something wrong, reset arguments to 0 such |
| 214 | * that hypervisor's handling of FFA_NOTIFICATION_INFO_GET can |
| 215 | * proceed without SPMC's values. |
| 216 | */ |
| 217 | if (*ids_count > ids_count_max) { |
| 218 | *ids_count = 0; |
| 219 | return; |
| 220 | } |
| 221 | |
| 222 | /* Copy now lists sizes, as return sizes have been validated. */ |
| 223 | memcpy_s(lists_sizes, sizeof(lists_sizes[0]) * ids_count_max, |
| 224 | local_lists_sizes, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS); |
| 225 | |
| 226 | /* Unpack the notifications info from the return. */ |
| 227 | memcpy_s(ids, sizeof(ids[0]) * ids_count_max, &ret.arg3, |
| 228 | sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET); |
| 229 | } |
| 230 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 231 | struct ffa_value plat_ffa_notifications_get_from_sp( |
| 232 | struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id, |
| 233 | ffa_notifications_bitmap_t *from_sp) |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 234 | { |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 235 | struct ffa_value ret = {.func = FFA_SUCCESS_32}; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 236 | ffa_id_t receiver_id = receiver_locked.vm->id; |
| 237 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 238 | assert(from_sp != NULL); |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 239 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 240 | ret = arch_other_world_call((struct ffa_value){ |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 241 | .func = FFA_NOTIFICATION_GET_32, |
| 242 | .arg1 = (vcpu_id << 16) | receiver_id, |
| 243 | .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SP, |
| 244 | }); |
| 245 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 246 | if (ret.func == FFA_ERROR_32) { |
| 247 | return ret; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 248 | } |
| 249 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 250 | *from_sp = ffa_notification_get_from_sp(ret); |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 251 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 252 | return ret; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 253 | } |
| 254 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 255 | struct ffa_value plat_ffa_notifications_get_framework_notifications( |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 256 | struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk, |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 257 | ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id) |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 258 | { |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 259 | struct ffa_value ret = {.func = FFA_SUCCESS_32}; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 260 | ffa_id_t receiver_id = receiver_locked.vm->id; |
| 261 | ffa_notifications_bitmap_t spm_notifications = 0; |
| 262 | |
| 263 | (void)flags; |
| 264 | |
| 265 | assert(from_fwk != NULL); |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 266 | |
| 267 | /* Get SPMC notifications. */ |
| 268 | if (plat_ffa_is_tee_enabled()) { |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 269 | ret = arch_other_world_call((struct ffa_value){ |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 270 | .func = FFA_NOTIFICATION_GET_32, |
| 271 | .arg1 = (vcpu_id << 16) | receiver_id, |
| 272 | .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SPM, |
| 273 | }); |
| 274 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 275 | if (ffa_func_id(ret) == FFA_ERROR_32) { |
| 276 | return ret; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 277 | } |
| 278 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 279 | spm_notifications = ffa_notification_get_from_framework(ret); |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | /* Merge notifications from SPMC and Hypervisor. */ |
| 283 | *from_fwk = spm_notifications | |
| 284 | vm_notifications_framework_get_pending(receiver_locked); |
| 285 | |
Karl Meakin | f9c73ce | 2024-07-30 17:37:13 +0100 | [diff] [blame] | 286 | return ret; |
Karl Meakin | 7a664f6 | 2024-07-24 17:20:29 +0100 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | /** |
| 290 | * A hypervisor should send the SRI to the Primary Endpoint. Not implemented as |
| 291 | * the hypervisor is only interesting for us for the sake of having a test |
| 292 | * intrastructure that encompasses the NWd, and we are not interested in testing |
| 293 | * the flow of notifications between VMs only. |
| 294 | */ |
| 295 | void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu) |
| 296 | { |
| 297 | (void)cpu; |
| 298 | } |
| 299 | |
| 300 | void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu) |
| 301 | { |
| 302 | (void)cpu; |
| 303 | } |
| 304 | |
| 305 | /** |
| 306 | * Track that in current CPU there was a notification set with delay SRI |
| 307 | * flag. |
| 308 | */ |
| 309 | void plat_ffa_sri_set_delayed(struct cpu *cpu) |
| 310 | { |
| 311 | (void)cpu; |
| 312 | } |