Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #include "hf/hf_ipi.h" |
| 10 | |
| 11 | #include "hf/cpu.h" |
Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 12 | #include "hf/ffa/notifications.h" |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 13 | #include "hf/plat/interrupts.h" |
| 14 | |
| 15 | /** Interrupt priority for Inter-Processor Interrupt. */ |
| 16 | #define IPI_PRIORITY 0x0U |
| 17 | |
| 18 | /** |
| 19 | * Initialize the IPI SGI. |
| 20 | */ |
| 21 | void hf_ipi_init_interrupt(void) |
| 22 | { |
| 23 | /* Configure as a Secure SGI. */ |
| 24 | struct interrupt_descriptor ipi_desc = { |
| 25 | .interrupt_id = HF_IPI_INTID, |
| 26 | .type = INT_DESC_TYPE_SGI, |
| 27 | .sec_state = INT_DESC_SEC_STATE_S, |
| 28 | .priority = IPI_PRIORITY, |
| 29 | .valid = true, |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 30 | .enabled = true, |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 31 | }; |
| 32 | |
| 33 | plat_interrupts_configure_interrupt(ipi_desc); |
| 34 | } |
| 35 | |
| 36 | /** |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 37 | * Returns the next target_vcpu with a pending IPI and removes it from |
| 38 | * the list for the current CPU to show it has been retrieved. |
| 39 | * The running vCPU is prioritised to prevent it being put into |
| 40 | * the PREEMPTED state before it has handled it's IPI, this could happen in |
| 41 | * the case a vCPU in the WAITING state also has a pending IPI. |
| 42 | * In the case of a spurious IPI physical interrupt, where the target |
| 43 | * vCPUs have already handled their pending IPIs return NULL. |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 44 | */ |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 45 | struct vcpu *hf_ipi_get_pending_target_vcpu(struct vcpu *current) |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 46 | { |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 47 | struct list_entry *list; |
| 48 | struct vcpu *target_vcpu; |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 49 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 50 | /* Lock the CPU the list belongs to. */ |
| 51 | sl_lock(¤t->cpu->lock); |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 52 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 53 | /* |
| 54 | * Check if the current vcpu has a pending interrupt, |
| 55 | * if so prioritise this. |
| 56 | */ |
| 57 | if (!list_empty(¤t->ipi_list_node)) { |
| 58 | list = ¤t->ipi_list_node; |
| 59 | } else { |
| 60 | /* |
| 61 | * If the current cpu doesn't have a pending IPI check other |
| 62 | * vcpus on the current CPU. |
| 63 | */ |
| 64 | list = ¤t->cpu->pending_ipis; |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 65 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 66 | if (list_empty(list)) { |
| 67 | target_vcpu = NULL; |
| 68 | goto out; |
| 69 | } |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 70 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 71 | /* |
| 72 | * The list is circular, the root element does not belong to a |
| 73 | * vCPU but is used to track if the list is empty and if not |
| 74 | * point to the first vCPU with a pending IPI. |
| 75 | */ |
| 76 | list = list->next; |
| 77 | } |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 78 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 79 | /* |
| 80 | * The next vCPU with a pending IPI has been retrieved to be handled |
| 81 | * so remove it from the list. |
| 82 | */ |
| 83 | list_remove(list); |
| 84 | target_vcpu = CONTAINER_OF(list, struct vcpu, ipi_list_node); |
| 85 | |
| 86 | out: |
| 87 | sl_unlock(¤t->cpu->lock); |
| 88 | return target_vcpu; |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | /** |
| 92 | * Send and record the IPI for the target vCPU. |
| 93 | */ |
| 94 | void hf_ipi_send_interrupt(struct vm *vm, ffa_vcpu_index_t target_vcpu_index) |
| 95 | { |
| 96 | struct vcpu *target_vcpu = vm_get_vcpu(vm, target_vcpu_index); |
| 97 | struct cpu *target_cpu = target_vcpu->cpu; |
| 98 | |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 99 | sl_lock(&target_cpu->lock); |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 100 | /* |
| 101 | * Since vCPUs are pinned to a physical cpu they can only belong |
| 102 | * to one list. Therefore check if the vCPU is in a list. If not |
| 103 | * add it and send the IPI SGI. |
| 104 | */ |
| 105 | if (list_empty(&target_vcpu->ipi_list_node)) { |
| 106 | list_prepend(&target_cpu->pending_ipis, |
| 107 | &target_vcpu->ipi_list_node); |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 108 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 109 | plat_interrupts_send_sgi(HF_IPI_INTID, target_cpu, true); |
| 110 | } |
Daniel Boulby | 84d49b6 | 2024-11-04 18:25:59 +0000 | [diff] [blame] | 111 | |
| 112 | sl_unlock(&target_cpu->lock); |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | /** |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 116 | * Enum to track the next SRI action that should be performed for an IPI to |
| 117 | * a vCPU in the WAITING state. |
| 118 | */ |
| 119 | enum ipi_sri_action { |
| 120 | /* First entry into the handling function. */ |
| 121 | IPI_SRI_ACTION_INIT, |
| 122 | /* For a waiting state trigger and SRI not delayed. */ |
| 123 | IPI_SRI_ACTION_NOT_DELAYED, |
| 124 | /* |
| 125 | * For a waiting state set delayed SRI to prioritize a running vCPU, |
| 126 | * preventing the running vCPU becoming preempted. |
| 127 | */ |
| 128 | IPI_SRI_ACTION_DELAYED, |
| 129 | /* SRI already set. */ |
| 130 | IPI_SRI_ACTION_NONE, |
| 131 | }; |
| 132 | |
| 133 | /** |
| 134 | * IPI IRQ handling for each vCPU state, the ipi_sri_action is used to know |
| 135 | * which SRI action to use when there is a vCPU in the WAITING state. |
| 136 | * Elements of the list of vCPUs on the CPU with pending IPIs will be traversed |
| 137 | * and depending of the state of each, the handling specific to IPIs will be |
| 138 | * taken: |
| 139 | * - RUNNING: Set the ipi_sri_action to IPI_SRI_ACTION_DELAYED, so if an SRI |
| 140 | * is required for a different vCPU, the running (current) vCPU will still |
| 141 | * handle the IPI. Return false so that the normal secure interrupt handling |
| 142 | * continues. |
| 143 | * - WAITING: If the ipi_sri_action is IPI_SRI_ACTION_NONE, an SRI has either |
| 144 | * already been triggered or set to delayed so we don't need to do anything. |
| 145 | * Otherwise: |
| 146 | * - If the running vCPU has a pending IPI, the ipi_sri_action will be |
| 147 | * IPI_SRI_ACTION_DELAYED so set the SRI to delayed, this means the SRI |
| 148 | * will be triggered on the next world switch to NWd and the running |
| 149 | * vCPU will not be stopped before it has handled it's IPI. Set the |
| 150 | * ipi_sri_action to IPI_SRI_ACTION_NONE, as we only need to set the |
| 151 | * SRI once. |
| 152 | * - If the running vCPU does not have a pending IPI, the ipi_sri_action |
| 153 | * will either be IPI_SRI_ACTION_INIT, if we are in the head of the list, |
| 154 | * or IPI_SRI_ACTION_NOT_DELAYED, in these cases we want to trigger the |
| 155 | * SRI immediately, so the NWd can schedule the target vCPU to handle |
| 156 | * the IPI. Set the ipi_sri_action to IPI_SRI_ACTION_NONE as we only need |
| 157 | * to trigger the SRI once. |
| 158 | * - PREEMPTED/BLOCKED: |
| 159 | * - If it's the head of the list (indicated by |
| 160 | * IPI_SRI_ACTION_INIT), return false and allow normal secure interrupt |
| 161 | * handling to handle the interrupt as usual. |
| 162 | * - Otherwise queue the interrupt for the vCPU. |
| 163 | * Returns True if the IPI SGI has been fully handled. |
| 164 | * False if further secure interrupt handling is required, this will |
| 165 | * only be the case for the target vCPU head of the pending ipi list, if it |
| 166 | * is in the RUNNING, PREEMPTED or BLOCKED state. |
| 167 | */ |
| 168 | static bool hf_ipi_handle_list_element(struct vcpu_locked target_vcpu_locked, |
| 169 | enum ipi_sri_action *ipi_sri_action) |
| 170 | { |
| 171 | bool ret = true; |
| 172 | struct vcpu *target_vcpu = target_vcpu_locked.vcpu; |
| 173 | |
| 174 | assert(ipi_sri_action != NULL); |
| 175 | |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 176 | vcpu_virt_interrupt_inject(target_vcpu_locked, HF_IPI_INTID); |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 177 | |
| 178 | switch (target_vcpu->state) { |
| 179 | case VCPU_STATE_RUNNING: |
| 180 | if (*ipi_sri_action != IPI_SRI_ACTION_INIT) { |
| 181 | panic("%s: If present the RUNNING vCPU should be the " |
| 182 | "first to be handled.\n", |
| 183 | __func__); |
| 184 | } |
| 185 | /* |
| 186 | * Any SRI should be delayed to prioritize the running vCPU, |
| 187 | * preventing it from entering the PREEMPTED state by the SRI |
| 188 | * before the IPI is handled. |
| 189 | */ |
| 190 | *ipi_sri_action = IPI_SRI_ACTION_DELAYED; |
| 191 | ret = false; |
| 192 | break; |
| 193 | case VCPU_STATE_WAITING: |
| 194 | if (*ipi_sri_action == IPI_SRI_ACTION_INIT || |
| 195 | *ipi_sri_action == IPI_SRI_ACTION_NOT_DELAYED) { |
| 196 | /* |
| 197 | * The current target vCPU is either the first element |
| 198 | * in the pending list or there is not running vCPU in |
| 199 | * the list, so we are ok to trigger the SRI |
| 200 | * immediately. |
| 201 | */ |
| 202 | ffa_notifications_sri_trigger_not_delayed( |
| 203 | target_vcpu->cpu); |
| 204 | } else if (*ipi_sri_action == IPI_SRI_ACTION_DELAYED) { |
| 205 | /* |
| 206 | * Otherwise a running vCPU has a pending IPI so set a |
| 207 | * delayed SRI, so as not to preempt the running vCPU |
| 208 | * before it is able to handle it's IPI. |
| 209 | */ |
| 210 | ffa_notifications_sri_set_delayed(target_vcpu->cpu); |
| 211 | } |
| 212 | *ipi_sri_action = IPI_SRI_ACTION_NONE; |
| 213 | break; |
| 214 | case VCPU_STATE_BLOCKED: |
| 215 | case VCPU_STATE_PREEMPTED: |
| 216 | if (*ipi_sri_action == IPI_SRI_ACTION_INIT) { |
| 217 | /* |
| 218 | * The current target vCPU is the top of the list of |
| 219 | * pending IPIs so allow it to be handled by the default |
| 220 | * secure interrupt handling. Change the state to |
| 221 | * IPI_SRI_ACTION_NOT_DELAYED since there now can't be |
| 222 | * any running vCPUs with pending IPIs (it would have |
| 223 | * been the head of the list) so we are safe to trigger |
| 224 | * the SRI for any waiting vCPUs immediately. |
| 225 | */ |
| 226 | *ipi_sri_action = IPI_SRI_ACTION_NOT_DELAYED; |
| 227 | ret = false; |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 228 | } |
| 229 | break; |
| 230 | default: |
| 231 | dlog_error( |
| 232 | "%s: unexpected state: %u handling an IPI for [%x %u]", |
| 233 | __func__, target_vcpu->state, target_vcpu->vm->id, |
| 234 | vcpu_index(target_vcpu)); |
| 235 | } |
| 236 | |
| 237 | return ret; |
| 238 | } |
| 239 | |
| 240 | /** |
| 241 | * IPI IRQ specific handling for the secure interrupt. |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 242 | */ |
| 243 | bool hf_ipi_handle(struct vcpu_locked target_vcpu_locked) |
| 244 | { |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 245 | enum ipi_sri_action ipi_sri_action = IPI_SRI_ACTION_INIT; |
| 246 | bool ret = true; |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 247 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 248 | ret = hf_ipi_handle_list_element(target_vcpu_locked, &ipi_sri_action); |
| 249 | |
| 250 | /* |
| 251 | * Clear the pending ipi list, handling the ipi for the remaining |
| 252 | * target vCPUs. |
| 253 | */ |
| 254 | for (struct vcpu *target_vcpu = |
| 255 | hf_ipi_get_pending_target_vcpu(target_vcpu_locked.vcpu); |
| 256 | target_vcpu != NULL; |
| 257 | target_vcpu = hf_ipi_get_pending_target_vcpu(target_vcpu)) { |
| 258 | target_vcpu_locked = vcpu_lock(target_vcpu); |
| 259 | hf_ipi_handle_list_element(target_vcpu_locked, &ipi_sri_action); |
| 260 | vcpu_unlock(&target_vcpu_locked); |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 261 | } |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 262 | |
| 263 | return ret; |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 264 | } |