Differentiate preemption from yielding.

Yielding is voluntary and indicates something else should be given the
chance to run. Preemption is involuntary and indicates the vCPU still
wants to run.

Change-Id: I9ba2445c3d67ea140bf33005a08084b29457e42f
diff --git a/src/abi_test.cc b/src/abi_test.cc
index d6bff21..11ab032 100644
--- a/src/abi_test.cc
+++ b/src/abi_test.cc
@@ -49,13 +49,33 @@
 }
 
 /**
+ * Encode a preempted response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_preempted)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_PREEMPTED;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0));
+}
+
+/**
+ * Decode a preempted response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_preempted)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_PREEMPTED));
+}
+
+/**
  * Encode a yield response without leaking.
  */
 TEST(abi, hf_vcpu_run_return_encode_yield)
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_YIELD;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1));
 }
 
 /**
@@ -64,7 +84,7 @@
 TEST(abi, hf_vcpu_run_return_decode_yield)
 {
 	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00);
+		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b01);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_YIELD));
 }
 
@@ -75,7 +95,7 @@
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(2));
 }
 
 /**
@@ -84,7 +104,7 @@
 TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt)
 {
 	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1234abcdbadb0101);
+		hf_vcpu_run_return_decode(0x1234abcdbadb0102);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
 }
 
@@ -97,7 +117,7 @@
 	res.code = HF_VCPU_RUN_WAKE_UP;
 	res.wake_up.vm_id = 0x12345678;
 	res.wake_up.vcpu = 0xabcd;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x12345678abcd0002));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x12345678abcd0003));
 }
 
 /**
@@ -106,7 +126,7 @@
 TEST(abi, hf_vcpu_run_return_decode_wake_up)
 {
 	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0xbeefd00df00daf02);
+		hf_vcpu_run_return_decode(0xbeefd00df00daf03);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAKE_UP));
 	EXPECT_THAT(res.wake_up.vm_id, Eq(0xbeefd00d));
 	EXPECT_THAT(res.wake_up.vcpu, Eq(0xf00d));
@@ -120,7 +140,7 @@
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_MESSAGE;
 	res.message.size = 0xdeadbeef;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xdeadbeef00000003));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xdeadbeef00000004));
 }
 
 /**
@@ -129,7 +149,7 @@
 TEST(abi, hf_vcpu_run_return_decode_message)
 {
 	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1123581314916203);
+		hf_vcpu_run_return_decode(0x1123581314916204);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_MESSAGE));
 	EXPECT_THAT(res.message.size, Eq(0x11235813));
 }
@@ -142,7 +162,7 @@
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_SLEEP;
 	res.sleep.ns = 0xcafed00dfeeded;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafed00dfeeded04));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafed00dfeeded05));
 }
 
 /**
@@ -154,7 +174,7 @@
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_SLEEP;
 	res.sleep.ns = 0xcc88888888888888;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x8888888888888804));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x8888888888888805));
 }
 
 /**
@@ -163,7 +183,7 @@
 TEST(abi, hf_vcpu_run_return_decode_sleep)
 {
 	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1a2b3c4d5e6f7704);
+		hf_vcpu_run_return_decode(0x1a2b3c4d5e6f7705);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_SLEEP));
 	EXPECT_THAT(res.sleep.ns, Eq(0x1a2b3c4d5e6f77));
 }
diff --git a/src/api.c b/src/api.c
index 004122f..8ff3b60 100644
--- a/src/api.c
+++ b/src/api.c
@@ -76,8 +76,21 @@
 }
 
 /**
- * Returns to the primary vm leaving the current vcpu ready to be scheduled
- * again.
+ * Returns to the primary vm and signals that the vcpu still has work to do so.
+ */
+struct vcpu *api_preempt(struct vcpu *current)
+{
+	struct hf_vcpu_run_return ret = {
+		.code = HF_VCPU_RUN_PREEMPTED,
+	};
+
+	return api_switch_to_primary(current, ret, vcpu_state_ready);
+}
+
+/**
+ * Returns to the primary vm to allow this cpu to be used for other tasks as the
+ * vcpu does not have work to do at this moment. The current vcpu is marked as
+ * ready to be scheduled again.
  */
 struct vcpu *api_yield(struct vcpu *current)
 {
@@ -240,10 +253,16 @@
 		goto out;
 	}
 
+	/* Switch to the vcpu. */
 	*next = vcpu;
-	ret.code = HF_VCPU_RUN_YIELD;
 
-	/* Update return value if one was injected. */
+	/*
+	 * Set a placeholder return code to the scheduler. This will be
+	 * overwritten when the switch back to the primary occurs.
+	 */
+	ret.code = HF_VCPU_RUN_PREEMPTED;
+
+	/* Update return value for the next vcpu if one was injected. */
 	if (vcpu_retval.force) {
 		arch_regs_set_retval(&vcpu->regs, vcpu_retval.value);
 	}
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index a875d00..9fbaa3c 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -330,10 +330,9 @@
 
 struct vcpu *irq_lower(void)
 {
-	/* TODO: Only switch if we know the interrupt was not for the secondary
-	 * VM. */
+	/* TODO: Only switch when the interrupt isn't for the current VM. */
 	/* Switch back to primary VM, interrupts will be handled there. */
-	return api_yield(current());
+	return api_preempt(current());
 }
 
 struct vcpu *fiq_lower(void)