Encode run return values in an SPCI compatible way.

Bug: 141469322
Change-Id: I269a7797b72e1e7d36c153cd41050584db8415b3
diff --git a/src/abi_test.cc b/src/abi_test.cc
index 0668ce0..fb30f57 100644
--- a/src/abi_test.cc
+++ b/src/abi_test.cc
@@ -16,6 +16,8 @@
 
 extern "C" {
 #include "vmapi/hf/abi.h"
+
+#include "vmapi/hf/spci.h"
 }
 
 #include <gmock/gmock.h>
@@ -37,13 +39,39 @@
 }
 
 /**
+ * Simulate an uninitialized spci_value so it can be detected if any
+ * uninitialized fields make their way into the encoded form which would
+ * indicate a data leak.
+ */
+struct spci_value dirty_spci_value()
+{
+	struct spci_value res;
+	memset(&res, 0xc5, sizeof(res));
+	return res;
+}
+
+bool operator==(const spci_value a, const spci_value b)
+{
+	return a.func == b.func && a.arg1 == b.arg1 && a.arg2 == b.arg2 &&
+	       a.arg3 == b.arg3 && a.arg4 == b.arg4 && a.arg5 == b.arg5 &&
+	       a.arg6 == b.arg6 && a.arg7 == b.arg7;
+}
+
+MATCHER_P(SpciEq, expected, "")
+{
+	return arg == expected;
+}
+
+/**
  * Encode a preempted response without leaking.
  */
 TEST(abi, hf_vcpu_run_return_encode_preempted)
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_PREEMPTED;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_INTERRUPT_32,
+					       .arg1 = 0x11112222}));
 }
 
 /**
@@ -51,8 +79,9 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_preempted)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_INTERRUPT_32;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_PREEMPTED));
 }
 
@@ -63,7 +92,9 @@
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_YIELD;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_YIELD_32,
+					       .arg1 = 0x22221111}));
 }
 
 /**
@@ -71,8 +102,9 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_yield)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b01);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_YIELD_32;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_YIELD));
 }
 
@@ -84,19 +116,43 @@
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
 	res.sleep.ns = HF_SLEEP_INDEFINITE;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xffffffffffffff02));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){
+			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+			    .arg1 = 0x22221111,
+			    .arg2 = SPCI_SLEEP_INDEFINITE}));
 }
 
 /**
- * Encoding wait-for-interrupt response with too large sleep duration will drop
- * the top octet.
+ * Encoding wait-for-interrupt response with large sleep duration won't drop the
+ * top octet.
  */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_too_long)
+TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_long)
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
 	res.sleep.ns = 0xcc22888888888888;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x2288888888888802));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){
+			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+			    .arg1 = 0x22221111,
+			    .arg2 = 0xcc22888888888888}));
+}
+
+/**
+ * Encoding wait-for-interrupt response with zero sleep duration will become
+ * non-zero for SPCI compatibility.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_zero)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
+	res.sleep.ns = 0;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){
+			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+			    .arg1 = 0x22221111,
+			    .arg2 = 1}));
 }
 
 /**
@@ -104,13 +160,28 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1234abcdbadb0102);
+	struct spci_value v = dirty_spci_value();
+	v.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+	v.arg2 = 0x1234abcdbadb01;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
 	EXPECT_THAT(res.sleep.ns, Eq(0x1234abcdbadb01));
 }
 
 /**
+ * Decode a wait-for-interrupt response waiting indefinitely.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt_indefinite)
+{
+	struct spci_value v = dirty_spci_value();
+	v.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+	v.arg2 = SPCI_SLEEP_INDEFINITE;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
+	EXPECT_THAT(res.sleep.ns, Eq(HF_SLEEP_INDEFINITE));
+}
+
+/**
  * Encode wait-for-message response without leaking.
  */
 TEST(abi, hf_vcpu_run_return_encode_wait_for_message)
@@ -118,19 +189,40 @@
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
 	res.sleep.ns = HF_SLEEP_INDEFINITE;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xffffffffffffff03));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+					       .arg1 = 0x22221111,
+					       .arg2 = SPCI_SLEEP_INDEFINITE}));
 }
 
 /**
- * Encoding wait-for-message response with too large sleep duration will drop
+ * Encoding wait-for-message response with large sleep duration won't drop
  * the top octet.
  */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_too_long)
+TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_long)
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
 	res.sleep.ns = 0xaa99777777777777;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x9977777777777703));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+					       .arg1 = 0x22221111,
+					       .arg2 = 0xaa99777777777777}));
+}
+
+/**
+ * Encoding wait-for-message response with zero sleep duration will become
+ * non-zero for SPCI compatibility.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_zero)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
+	res.sleep.ns = 0;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+					       .arg1 = 0x22221111,
+					       .arg2 = 1}));
 }
 
 /**
@@ -138,13 +230,28 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_wait_for_message)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x12347654badb0103);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_MSG_WAIT_32;
+	v.arg2 = 0x12347654badb01;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_MESSAGE));
 	EXPECT_THAT(res.sleep.ns, Eq(0x12347654badb01));
 }
 
 /**
+ * Decode a wait-for-message response waiting indefinitely.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wait_for_message_indefinite)
+{
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_MSG_WAIT_32;
+	v.arg2 = SPCI_SLEEP_INDEFINITE;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_MESSAGE));
+	EXPECT_THAT(res.sleep.ns, Eq(HF_SLEEP_INDEFINITE));
+}
+
+/**
  * Encode wake up response without leaking.
  */
 TEST(abi, hf_vcpu_run_return_encode_wake_up)
@@ -153,7 +260,9 @@
 	res.code = HF_VCPU_RUN_WAKE_UP;
 	res.wake_up.vm_id = 0x1234;
 	res.wake_up.vcpu = 0xabcd;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x1234abcd0004));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = HF_SPCI_RUN_WAKE_UP,
+					       .arg1 = 0xabcd1234}));
 }
 
 /**
@@ -161,8 +270,10 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_wake_up)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0xbeeff00daf04);
+	struct spci_value v = dirty_spci_value();
+	v.func = HF_SPCI_RUN_WAKE_UP;
+	v.arg1 = 0x88888888f00dbeef;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAKE_UP));
 	EXPECT_THAT(res.wake_up.vm_id, Eq(0xbeef));
 	EXPECT_THAT(res.wake_up.vcpu, Eq(0xf00d));
@@ -177,7 +288,10 @@
 	res.code = HF_VCPU_RUN_MESSAGE;
 	res.message.vm_id = 0xf007;
 	res.message.size = 0xcafe1971;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafe197100f00705));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_MSG_SEND_32,
+					       .arg1 = 0x1111f007,
+					       .arg3 = 0xcafe1971}));
 }
 
 /**
@@ -185,8 +299,11 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_message)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1123581314916205);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_MSG_SEND_32;
+	v.arg1 = 0x1111222233339162;
+	v.arg3 = 0x11235813;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_MESSAGE));
 	EXPECT_THAT(res.message.vm_id, Eq(0x9162));
 	EXPECT_THAT(res.message.size, Eq(0x11235813));
@@ -199,7 +316,8 @@
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_NOTIFY_WAITERS;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(6));
+	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		    SpciEq((struct spci_value){.func = SPCI_RX_RELEASE_32}));
 }
 
 /**
@@ -207,8 +325,9 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_notify_waiters)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b06);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_RX_RELEASE_32;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_NOTIFY_WAITERS));
 }
 
@@ -219,7 +338,10 @@
 {
 	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
 	res.code = HF_VCPU_RUN_ABORTED;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(7));
+	EXPECT_THAT(
+		hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+		SpciEq((struct spci_value){.func = SPCI_ERROR_32,
+					   .arg2 = (uint64_t)SPCI_ABORTED}));
 }
 
 /**
@@ -227,8 +349,10 @@
  */
 TEST(abi, hf_vcpu_run_return_decode_aborted)
 {
-	struct hf_vcpu_run_return res =
-		hf_vcpu_run_return_decode(0x31dbac4810fbc507);
+	struct spci_value v = dirty_spci_value();
+	v.func = SPCI_ERROR_32;
+	v.arg2 = SPCI_ABORTED;
+	struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
 	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_ABORTED));
 }
 
diff --git a/src/api.c b/src/api.c
index d009538..f20dc19 100644
--- a/src/api.c
+++ b/src/api.c
@@ -62,8 +62,7 @@
  * Switches the physical CPU back to the corresponding vcpu of the primary VM.
  *
  * This triggers the scheduling logic to run. Run in the context of secondary VM
- * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
- * cpu.
+ * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
  */
 static struct vcpu *api_switch_to_primary(struct vcpu *current,
 					  struct hf_vcpu_run_return primary_ret,
@@ -91,10 +90,9 @@
 	}
 
 	/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
-	arch_regs_set_retval(
-		&next->regs,
-		(struct spci_value){
-			.func = hf_vcpu_run_return_encode(primary_ret)});
+	arch_regs_set_retval(&next->regs, hf_vcpu_run_return_encode(
+						  primary_ret, current->vm->id,
+						  vcpu_index(current)));
 
 	/* Mark the current vcpu as waiting. */
 	sl_lock(&current->lock);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 77b10e3..885a383 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -344,6 +344,14 @@
 	case SPCI_MSG_POLL_32:
 		*args = api_spci_msg_recv(false, current(), next);
 		return true;
+	case SPCI_RUN_32: {
+		struct vcpu *vcpu = current();
+		*args = hf_vcpu_run_return_encode(
+			api_vcpu_run((args->arg1 >> 16) & 0xffff,
+				     args->arg1 & 0xffff, vcpu, next),
+			vcpu->vm->id, vcpu_index(vcpu));
+		return true;
+	}
 	}
 
 	return false;
@@ -441,11 +449,6 @@
 		vcpu->regs.r[0] = api_vcpu_get_count(args.arg1, vcpu);
 		break;
 
-	case HF_VCPU_RUN:
-		vcpu->regs.r[0] = hf_vcpu_run_return_encode(
-			api_vcpu_run(args.arg1, args.arg2, vcpu, &next));
-		break;
-
 	case HF_VM_CONFIGURE:
 		vcpu->regs.r[0] = api_vm_configure(
 			ipa_init(args.arg1), ipa_init(args.arg2), vcpu, &next);