Set or clear VI bit in hvc_handler just before returning, using count.

A count of enabled/pending interrupts is kept for this purpose whenever
the enabled and pending bits are updated. This handles the case where the
target vCPU was already running on a different physical CPU when an
interrupt was injected and so it had to be kicked by the primary.

Also add comments explaining what happens in case that the target of an
injected interrupt is already running on a different physical CPU.

Also also, add a test for the case of injecting an interrupt ID which is
not enabled, then enabling it later.

Bug: 117270899
Change-Id: I200f547a5a72332a5e24b5109a3e6e7b66c0b59e
diff --git a/src/api.c b/src/api.c
index 7e6e4e6..e122107 100644
--- a/src/api.c
+++ b/src/api.c
@@ -556,14 +556,26 @@
 
 	sl_lock(&current->lock);
 	if (enable) {
+		/*
+		 * If it is pending and was not enabled before, increment the
+		 * count.
+		 */
+		if (current->interrupts.interrupt_pending[intid_index] &
+		    ~current->interrupts.interrupt_enabled[intid_index] &
+		    intid_mask) {
+			current->interrupts.enabled_and_pending_count++;
+		}
 		current->interrupts.interrupt_enabled[intid_index] |=
 			intid_mask;
-		/* If it is pending, change state and trigger a virtual IRQ. */
-		if (current->interrupts.interrupt_pending[intid_index] &
-		    intid_mask) {
-			arch_regs_set_virtual_interrupt(&current->regs, true);
-		}
 	} else {
+		/*
+		 * If it is pending and was enabled before, decrement the count.
+		 */
+		if (current->interrupts.interrupt_pending[intid_index] &
+		    current->interrupts.interrupt_enabled[intid_index] &
+		    intid_mask) {
+			current->interrupts.enabled_and_pending_count--;
+		}
 		current->interrupts.interrupt_enabled[intid_index] &=
 			~intid_mask;
 	}
@@ -581,7 +593,6 @@
 {
 	uint8_t i;
 	uint32_t first_interrupt = HF_INVALID_INTID;
-	bool interrupts_remain = false;
 
 	/*
 	 * Find the first enabled and pending interrupt ID, return it, and
@@ -592,32 +603,19 @@
 		uint32_t enabled_and_pending =
 			current->interrupts.interrupt_enabled[i] &
 			current->interrupts.interrupt_pending[i];
-		if (enabled_and_pending == 0) {
-			continue;
-		}
-
-		if (first_interrupt != HF_INVALID_INTID) {
-			interrupts_remain = true;
-			break;
-		}
-
-		uint8_t bit_index = ctz(enabled_and_pending);
-		/* Mark it as no longer pending. */
-		current->interrupts.interrupt_pending[i] &= ~(1u << bit_index);
-		first_interrupt = i * INTERRUPT_REGISTER_BITS + bit_index;
-
-		enabled_and_pending = current->interrupts.interrupt_enabled[i] &
-				      current->interrupts.interrupt_pending[i];
 		if (enabled_and_pending != 0) {
-			interrupts_remain = true;
+			uint8_t bit_index = ctz(enabled_and_pending);
+			/*
+			 * Mark it as no longer pending and decrement the count.
+			 */
+			current->interrupts.interrupt_pending[i] &=
+				~(1u << bit_index);
+			current->interrupts.enabled_and_pending_count--;
+			first_interrupt =
+				i * INTERRUPT_REGISTER_BITS + bit_index;
 			break;
 		}
 	}
-	/*
-	 * If there are no more enabled and pending interrupts left, clear the
-	 * VI bit.
-	 */
-	arch_regs_set_virtual_interrupt(&current->regs, interrupts_remain);
 
 	sl_unlock(&current->lock);
 	return first_interrupt;
@@ -644,9 +642,13 @@
  * This doesn't cause the vCPU to actually be run immediately; it will be taken
  * when the vCPU is next run, which is up to the scheduler.
  *
- * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist, the
- * interrupt ID is invalid, or the current VM is not allowed to inject
- * interrupts to the target VM.
+ * Returns:
+ *  - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
+ *    ID is invalid, or the current VM is not allowed to inject interrupts to
+ *    the target VM.
+ *  - 0 on success if no further action is needed.
+ *  - 1 if it was called by the primary VM and the primary VM now needs to wake
+ *    up or kick the target vCPU.
  */
 int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
 			     uint32_t intid, struct vcpu *current,
@@ -657,6 +659,7 @@
 	struct vcpu *target_vcpu;
 	struct vm *target_vm = vm_get(target_vm_id);
 	bool need_vm_lock;
+	int64_t ret = 0;
 
 	if (intid >= HF_NUM_INTIDS) {
 		return -1;
@@ -688,86 +691,105 @@
 	 * lock until we are done, so nothing should change in such as way that
 	 * we need the VM lock after all.
 	 */
-	need_vm_lock = (target_vcpu->interrupts.interrupt_enabled[intid_index] &
-			intid_mask) &&
-		       target_vcpu->state == vcpu_state_blocked_mailbox;
+	need_vm_lock =
+		(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+		 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
+		 intid_mask) &&
+		target_vcpu->state == vcpu_state_blocked_mailbox;
 	if (need_vm_lock) {
 		sl_unlock(&target_vcpu->lock);
 		sl_lock(&target_vm->lock);
 		sl_lock(&target_vcpu->lock);
 	}
 
-	/* Make it pending. */
-	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+	/*
+	 * We only need to change state and (maybe) trigger a virtual IRQ if it
+	 * is enabled and was not previously pending. Otherwise we can skip
+	 * everything except setting the pending bit.
+	 *
+	 * If you change this logic make sure to update the need_vm_lock logic
+	 * above to match.
+	 */
+	if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+	      ~target_vcpu->interrupts.interrupt_pending[intid_index] &
+	      intid_mask)) {
+		goto out;
+	}
+
+	/* Increment the count. */
+	target_vcpu->interrupts.enabled_and_pending_count++;
 
 	/*
-	 * If it is enabled, change state and trigger a virtual IRQ. If you
-	 * change this logic make sure to update the need_vm_lock logic above to
-	 * match.
+	 * Only need to update state if there was not already an
+	 * interrupt enabled and pending.
 	 */
-	if (target_vcpu->interrupts.interrupt_enabled[intid_index] &
-	    intid_mask) {
-		dlog("IRQ %d is enabled for VM %d VCPU %d, setting VI.\n",
-		     intid, target_vm_id, target_vcpu_idx);
-		arch_regs_set_virtual_interrupt(&target_vcpu->regs, true);
+	if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
+		goto out;
+	}
 
-		if (target_vcpu->state == vcpu_state_blocked_interrupt) {
-			target_vcpu->state = vcpu_state_ready;
-		} else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
-			/*
-			 * If you change this logic make sure to update the
-			 * need_vm_lock logic above to match.
-			 */
-			target_vcpu->state = vcpu_state_ready;
+	if (target_vcpu->state == vcpu_state_blocked_interrupt) {
+		target_vcpu->state = vcpu_state_ready;
+	} else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
+		/*
+		 * If you change this logic make sure to update the need_vm_lock
+		 * logic above to match.
+		 */
+		target_vcpu->state = vcpu_state_ready;
 
-			/* Take target vCPU out of mailbox recv_waiter list. */
+		/* Take target vCPU out of mailbox recv_waiter list. */
+		/*
+		 * TODO: Consider using a doubly-linked list for
+		 * the receive waiter list to avoid the linear
+		 * search here.
+		 */
+		struct vcpu **previous_next_pointer =
+			&target_vm->mailbox.recv_waiter;
+		while (*previous_next_pointer != NULL &&
+		       *previous_next_pointer != target_vcpu) {
 			/*
-			 * TODO: Consider using a doubly-linked list for the
-			 * receive waiter list to avoid the linear search here.
+			 * TODO(qwandor): Do we need to lock the vCPUs somehow
+			 * while we walk the linked list, or is the VM lock
+			 * enough?
 			 */
-			struct vcpu **previous_next_pointer =
-				&target_vm->mailbox.recv_waiter;
-			while (*previous_next_pointer != NULL &&
-			       *previous_next_pointer != target_vcpu) {
-				/*
-				 * TODO(qwandor): Do we need to lock the vCPUs
-				 * somehow while we walk the linked list, or is
-				 * the VM lock enough?
-				 */
-				previous_next_pointer =
-					&(*previous_next_pointer)->mailbox_next;
-			}
-			if (*previous_next_pointer == NULL) {
-				dlog("Target VCPU state is "
-				     "vcpu_state_blocked_mailbox but is not in "
-				     "VM mailbox waiter list. This should "
-				     "never happen.\n");
-			} else {
-				*previous_next_pointer =
-					target_vcpu->mailbox_next;
-			}
+			previous_next_pointer =
+				&(*previous_next_pointer)->mailbox_next;
 		}
-
-		if (current->vm->id != HF_PRIMARY_VM_ID &&
-		    current != target_vcpu) {
-			/*
-			 * Switch to the primary so that it can switch to the
-			 * target.
-			 */
-			struct hf_vcpu_run_return ret = {
-				.code = HF_VCPU_RUN_WAKE_UP,
-				.wake_up.vm_id = target_vm_id,
-				.wake_up.vcpu = target_vcpu_idx,
-			};
-			*next = api_switch_to_primary(current, ret,
-						      vcpu_state_ready);
+		if (*previous_next_pointer == NULL) {
+			dlog("Target VCPU state is vcpu_state_blocked_mailbox "
+			     "but is not in VM mailbox waiter list. This "
+			     "should never happen.\n");
+		} else {
+			*previous_next_pointer = target_vcpu->mailbox_next;
 		}
 	}
 
+	if (current->vm->id == HF_PRIMARY_VM_ID) {
+		/*
+		 * If the call came from the primary VM, let it know that it
+		 * should run or kick the target vCPU.
+		 */
+		ret = 1;
+	} else if (current != target_vcpu) {
+		/*
+		 * Switch to the primary so that it can switch to the target, or
+		 * kick it if it is already running on a different physical CPU.
+		 */
+		struct hf_vcpu_run_return ret = {
+			.code = HF_VCPU_RUN_WAKE_UP,
+			.wake_up.vm_id = target_vm_id,
+			.wake_up.vcpu = target_vcpu_idx,
+		};
+		*next = api_switch_to_primary(current, ret, vcpu_state_ready);
+	}
+
+out:
+	/* Either way, make it pending. */
+	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+
 	sl_unlock(&target_vcpu->lock);
 	if (need_vm_lock) {
 		sl_unlock(&target_vm->lock);
 	}
 
-	return 0;
+	return ret;
 }