Begin to abstract the ABI.

Different architectures or standards will require different ABIs but
this should be hidden from the Hafnium API. Further changes are required
to support returning multiple registers but that will be hidden from the
API.

Change-Id: I35bc674c35bd4bb4c8c30e02f1075024a3bc44db
diff --git a/src/BUILD.gn b/src/BUILD.gn
index eb0f76f..cad0a06 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -106,11 +106,15 @@
 executable("unit_tests") {
   testonly = true
   sources = [
+    "abi_test.cc",
     "fdt_handler_test.cc",
     "fdt_test.cc",
     "mm_test.cc",
   ]
-  cflags_cc = [ "-Wno-c99-extensions" ]
+  cflags_cc = [
+    "-Wno-c99-extensions",
+    "-Wno-nested-anon-types",
+  ]
   ldflags = [
     "-Xlinker",
     "-defsym=text_begin=0",
diff --git a/src/abi_test.cc b/src/abi_test.cc
new file mode 100644
index 0000000..d6bff21
--- /dev/null
+++ b/src/abi_test.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" {
+#include "vmapi/hf/abi.h"
+}
+
+#include <gmock/gmock.h>
+
+namespace
+{
+using ::testing::Eq;
+
+/**
+ * Simulate an uninitialized hf_vcpu_run_return so it can be detected if any
+ * uninitialized fields make their way into the encoded form which would
+ * indicate a data leak.
+ */
+struct hf_vcpu_run_return dirty_vcpu_run_return()
+{
+	struct hf_vcpu_run_return res;
+	memset(&res, 0xc5, sizeof(res));
+	return res;
+}
+
+/**
+ * Simulate an uninitialized hf_mailbox_receive_return so it can be detected if
+ * any uninitialized fields make their way into the encoded form which would
+ * indicate a data leak.
+ */
+struct hf_mailbox_receive_return dirty_mailbox_receive_return()
+{
+	struct hf_mailbox_receive_return res;
+	memset(&res, 0xc5, sizeof(res));
+	return res;
+}
+
+/**
+ * Encode a yield response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_yield)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_YIELD;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0));
+}
+
+/**
+ * Decode a yield response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_yield)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_YIELD));
+}
+
+/**
+ * Encode wait-for-interrupt response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1));
+}
+
+/**
+ * Decode a wait-for-interrupt response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0x1234abcdbadb0101);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
+}
+
+/**
+ * Encode wake up response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wake_up)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_WAKE_UP;
+	res.wake_up.vm_id = 0x12345678;
+	res.wake_up.vcpu = 0xabcd;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x12345678abcd0002));
+}
+
+/**
+ * Decode a wake up response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wake_up)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0xbeefd00df00daf02);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAKE_UP));
+	EXPECT_THAT(res.wake_up.vm_id, Eq(0xbeefd00d));
+	EXPECT_THAT(res.wake_up.vcpu, Eq(0xf00d));
+}
+
+/**
+ * Encode message response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_message)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_MESSAGE;
+	res.message.size = 0xdeadbeef;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xdeadbeef00000003));
+}
+
+/**
+ * Decode a wake up response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_message)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0x1123581314916203);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_MESSAGE));
+	EXPECT_THAT(res.message.size, Eq(0x11235813));
+}
+
+/**
+ * Encode sleep response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_sleep)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_SLEEP;
+	res.sleep.ns = 0xcafed00dfeeded;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafed00dfeeded04));
+}
+
+/**
+ * Encoding a sleep response with too large a sleep duration will drop the top
+ * octet.
+ */
+TEST(abi, hf_vcpu_run_return_encode_sleep_too_long)
+{
+	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+	res.code = HF_VCPU_RUN_SLEEP;
+	res.sleep.ns = 0xcc88888888888888;
+	EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x8888888888888804));
+}
+
+/**
+ * Decode a sleep response.
+ */
+TEST(abi, hf_vcpu_run_return_decode_sleep)
+{
+	struct hf_vcpu_run_return res =
+		hf_vcpu_run_return_decode(0x1a2b3c4d5e6f7704);
+	EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_SLEEP));
+	EXPECT_THAT(res.sleep.ns, Eq(0x1a2b3c4d5e6f77));
+}
+
+/**
+ * Encode a mailbox receive response without leaking.
+ */
+TEST(abi, hf_mailbox_receive_return_encode)
+{
+	struct hf_mailbox_receive_return res = dirty_mailbox_receive_return();
+	res.vm_id = 0x12345678;
+	res.size = 0xaabbccdd;
+	EXPECT_THAT(hf_mailbox_receive_return_encode(res),
+		    Eq(0xaabbccdd12345678));
+}
+
+/**
+ * Decode a mailbox receive response.
+ */
+TEST(abi, hf_mailbox_receive_return_decode)
+{
+	struct hf_mailbox_receive_return res =
+		hf_mailbox_receive_return_decode(0X8badf00d00ddba11);
+	EXPECT_THAT(res.vm_id, Eq(0X00ddba11));
+	EXPECT_THAT(res.size, Eq(0x8badf00d));
+}
+
+} /* namespace */
diff --git a/src/api.c b/src/api.c
index b2e2c36..3b8d921 100644
--- a/src/api.c
+++ b/src/api.c
@@ -34,7 +34,7 @@
  * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
  * cpu.
  */
-static struct vcpu *api_switch_to_primary(size_t primary_retval,
+static struct vcpu *api_switch_to_primary(struct hf_vcpu_run_return primary_ret,
 					  enum vcpu_state secondary_state)
 {
 	struct vcpu *vcpu = cpu()->current;
@@ -45,9 +45,10 @@
 	vm_set_current(primary);
 
 	/*
-	 * Set the return valuefor the primary VM's call to HF_VCPU_RUN.
+	 * Set the return value for the primary VM's call to HF_VCPU_RUN.
 	 */
-	arch_regs_set_retval(&next->regs, primary_retval);
+	arch_regs_set_retval(&next->regs,
+			     hf_vcpu_run_return_encode(primary_ret));
 
 	/* Mark the vcpu as waiting. */
 	sl_lock(&vcpu->lock);
@@ -63,9 +64,10 @@
  */
 struct vcpu *api_yield(void)
 {
-	return api_switch_to_primary(
-		HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0),
-		vcpu_state_ready);
+	struct hf_vcpu_run_return ret = {
+		.code = HF_VCPU_RUN_YIELD,
+	};
+	return api_switch_to_primary(ret, vcpu_state_ready);
 }
 
 /**
@@ -74,9 +76,10 @@
  */
 struct vcpu *api_wait_for_interrupt(void)
 {
-	return api_switch_to_primary(
-		HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0),
-		vcpu_state_blocked_interrupt);
+	struct hf_vcpu_run_return ret = {
+		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
+	};
+	return api_switch_to_primary(ret, vcpu_state_blocked_interrupt);
 }
 
 /**
@@ -110,51 +113,51 @@
 /**
  * Runs the given vcpu of the given vm.
  */
-int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
+struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
+				       struct vcpu **next)
 {
 	struct vm *vm;
 	struct vcpu *vcpu;
-	int64_t ret;
+	struct hf_vcpu_run_return ret = {
+		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
+	};
 
 	/* Only the primary VM can switch vcpus. */
 	if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
-		goto fail;
+		goto out;
 	}
 
 	/* Only secondary VM vcpus can be run. */
 	if (vm_id == HF_PRIMARY_VM_ID) {
-		goto fail;
+		goto out;
 	}
 
 	/* The requested VM must exist. */
 	vm = vm_get(vm_id);
 	if (vm == NULL) {
-		goto fail;
+		goto out;
 	}
 
 	/* The requested vcpu must exist. */
 	if (vcpu_idx >= vm->vcpu_count) {
-		goto fail;
+		goto out;
 	}
 
 	vcpu = &vm->vcpus[vcpu_idx];
 
 	sl_lock(&vcpu->lock);
 	if (vcpu->state != vcpu_state_ready) {
-		ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0,
-					   0);
+		ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
 	} else {
 		vcpu->state = vcpu_state_running;
 		vm_set_current(vm);
 		*next = vcpu;
-		ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0);
+		ret.code = HF_VCPU_RUN_YIELD;
 	}
 	sl_unlock(&vcpu->lock);
 
+out:
 	return ret;
-
-fail:
-	return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0);
 }
 
 /**
@@ -250,7 +253,9 @@
 	const void *from_buf;
 	uint16_t vcpu;
 	int64_t ret;
-	int64_t primary_ret;
+	struct hf_vcpu_run_return primary_ret = {
+		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
+	};
 
 	/* Limit the size of transfer. */
 	if (size > HF_MAILBOX_SIZE) {
@@ -297,8 +302,8 @@
 
 	/* Messages for the primary VM are delivered directly. */
 	if (to->id == HF_PRIMARY_VM_ID) {
-		primary_ret =
-			HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_MESSAGE, 0, size);
+		primary_ret.code = HF_VCPU_RUN_MESSAGE;
+		primary_ret.message.size = size;
 		ret = 0;
 		/*
 		 * clang-tidy isn't able to prove that
@@ -338,8 +343,11 @@
 
 		/* Return from HF_MAILBOX_RECEIVE. */
 		arch_regs_set_retval(&to_vcpu->regs,
-				     HF_MAILBOX_RECEIVE_RESPONSE(
-					     to->mailbox.recv_from_id, size));
+				     hf_mailbox_receive_return_encode((
+					     struct hf_mailbox_receive_return){
+					     .vm_id = to->mailbox.recv_from_id,
+					     .size = size,
+				     }));
 
 		sl_unlock(&to_vcpu->lock);
 
@@ -347,7 +355,9 @@
 	}
 
 	/* Return to the primary VM directly or with a switch. */
-	primary_ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAKE_UP, to->id, vcpu);
+	primary_ret.code = HF_VCPU_RUN_WAKE_UP;
+	primary_ret.wake_up.vm_id = to->id;
+	primary_ret.wake_up.vcpu = vcpu;
 	ret = 0;
 
 out:
@@ -364,7 +374,7 @@
 
 	/* If the sender is the primary, return the vcpu to schedule. */
 	if (from->id == HF_PRIMARY_VM_ID) {
-		return vcpu;
+		return primary_ret.wake_up.vcpu;
 	}
 
 	/* Switch to primary for scheduling and return success to the sender. */
@@ -378,18 +388,21 @@
  *
  * No new messages can be received until the mailbox has been cleared.
  */
-int64_t api_mailbox_receive(bool block, struct vcpu **next)
+struct hf_mailbox_receive_return api_mailbox_receive(bool block,
+						     struct vcpu **next)
 {
 	struct vcpu *vcpu = cpu()->current;
 	struct vm *vm = vcpu->vm;
-	int64_t ret = 0;
+	struct hf_mailbox_receive_return ret = {
+		.vm_id = HF_INVALID_VM_ID,
+	};
 
 	/*
 	 * The primary VM will receive messages as a status code from running
 	 * vcpus and must not call this function.
 	 */
 	if (vm->id == HF_PRIMARY_VM_ID) {
-		return -1;
+		return ret;
 	}
 
 	sl_lock(&vm->lock);
@@ -397,14 +410,13 @@
 	/* Return pending messages without blocking. */
 	if (vm->mailbox.state == mailbox_state_received) {
 		vm->mailbox.state = mailbox_state_read;
-		ret = HF_MAILBOX_RECEIVE_RESPONSE(vm->mailbox.recv_from_id,
-						  vm->mailbox.recv_bytes);
+		ret.vm_id = vm->mailbox.recv_from_id;
+		ret.size = vm->mailbox.recv_bytes;
 		goto out;
 	}
 
 	/* No pending message so fail if not allowed to block. */
 	if (!block) {
-		ret = -1;
 		goto out;
 	}
 
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 53f8c88..8fa5733 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -198,7 +198,8 @@
 		break;
 
 	case HF_VCPU_RUN:
-		ret.user_ret = api_vcpu_run(arg1, arg2, &ret.new);
+		ret.user_ret = hf_vcpu_run_return_encode(
+			api_vcpu_run(arg1, arg2, &ret.new));
 		break;
 
 	case HF_VM_CONFIGURE:
@@ -210,7 +211,8 @@
 		break;
 
 	case HF_MAILBOX_RECEIVE:
-		ret.user_ret = api_mailbox_receive(arg1, &ret.new);
+		ret.user_ret = hf_mailbox_receive_return_encode(
+			api_mailbox_receive(arg1, &ret.new));
 		break;
 
 	case HF_MAILBOX_CLEAR: