Basic memory sharing.

The introduces the basic functionality for memory sharing. The API used
to invoke memory sharing is a placeholder while we decide on what the
proper interface should look like.

Change-Id: Ia5c2c224119d896b3fc2294b0828626ec325e1e7
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 8357a89..e9ffe88 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -39,6 +39,8 @@
 int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next);
 int64_t api_mailbox_writable_get(const struct vcpu *current);
 int64_t api_mailbox_waiter_get(uint32_t vm_id, const struct vcpu *current);
+int64_t api_share_memory(uint32_t vm_id, ipaddr_t addr, size_t size,
+			 enum hf_share share, struct vcpu *current);
 
 struct vcpu *api_preempt(struct vcpu *current);
 struct vcpu *api_yield(struct vcpu *current);
diff --git a/inc/hf/spinlock.h b/inc/hf/spinlock.h
index 7a307f6..57ce3a8 100644
--- a/inc/hf/spinlock.h
+++ b/inc/hf/spinlock.h
@@ -39,6 +39,21 @@
 	}
 }
 
+/**
+ * Locks both locks, enforcing the lowest address first ordering for locks of
+ * the same kind.
+ */
+static inline void sl_lock_both(struct spinlock *a, struct spinlock *b)
+{
+	if (a < b) {
+		sl_lock(a);
+		sl_lock(b);
+	} else {
+		sl_lock(b);
+		sl_lock(a);
+	}
+}
+
 static inline void sl_unlock(struct spinlock *l)
 {
 	atomic_flag_clear_explicit(&l->v, memory_order_release);
diff --git a/inc/hf/std.h b/inc/hf/std.h
index c872fd4..6e5e82a 100644
--- a/inc/hf/std.h
+++ b/inc/hf/std.h
@@ -19,6 +19,8 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
 void *memset(void *s, int c, size_t n);
 void *memcpy(void *dst, const void *src, size_t n);
 void *memmove(void *dst, const void *src, size_t n);
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index 451de41..4716f01 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -94,6 +94,26 @@
 	uint32_t size;
 };
 
+enum hf_share {
+	/**
+	 * Relinquish ownership and access to the memory and pass them to the
+	 * recipient.
+	 */
+	HF_MEMORY_GIVE,
+
+	/**
+	 * Retain ownership of the memory but relinquish access to the
+	 * recipient.
+	 */
+	HF_MEMORY_LEND,
+
+	/**
+	 * Retain ownership and access but additionally allow access to the
+	 * recipient.
+	 */
+	HF_MEMORY_SHARE,
+};
+
 /**
  * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
  */
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 8014f8c..b0345e2 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -37,6 +37,7 @@
 #define HF_INTERRUPT_ENABLE     0xff0b
 #define HF_INTERRUPT_GET        0xff0c
 #define HF_INTERRUPT_INJECT     0xff0d
+#define HF_SHARE_MEMORY         0xff0e
 
 /** The amount of data that can be sent to a mailbox. */
 #define HF_MAILBOX_SIZE 4096
@@ -223,3 +224,18 @@
 	return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
 		       intid);
 }
+
+/**
+ * Shares a region of memory with another VM.
+ *
+ * Returns 0 on success or -1 if the sharing was not allowed or failed.
+ *
+ * TODO: replace this with a better API once we have decided what that should
+ *       look like.
+ */
+static inline int64_t hf_share_memory(uint32_t vm_id, hf_ipaddr_t addr,
+				      size_t size, enum hf_share share)
+{
+	return hf_call(HF_SHARE_MEMORY, (((uint64_t)vm_id) << 32) | share, addr,
+		       size);
+}
diff --git a/src/api.c b/src/api.c
index 08f1e05..b17d101 100644
--- a/src/api.c
+++ b/src/api.c
@@ -21,6 +21,8 @@
 #include "hf/arch/cpu.h"
 
 #include "hf/dlog.h"
+#include "hf/mm.h"
+#include "hf/spinlock.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -32,6 +34,9 @@
  * ordering requirements are as follows:
  *
  * vm::lock -> vcpu::lock
+ *
+ * Locks of the same kind require the lock of lowest address to be locked first,
+ * see `sl_lock_both()`.
  */
 
 static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
@@ -978,8 +983,8 @@
 	target_vcpu->interrupts.enabled_and_pending_count++;
 
 	/*
-	 * Only need to update state if there was not already an
-	 * interrupt enabled and pending.
+	 * Only need to update state if there was not already an interrupt
+	 * enabled and pending.
 	 */
 	if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
 		goto out;
@@ -996,9 +1001,8 @@
 
 		/* Take target vCPU out of mailbox recv_waiter list. */
 		/*
-		 * TODO: Consider using a doubly-linked list for
-		 * the receive waiter list to avoid the linear
-		 * search here.
+		 * TODO: Consider using a doubly-linked list for the receive
+		 *       waiter list to avoid the linear search here.
 		 */
 		struct vcpu **previous_next_pointer =
 			&target_vm->mailbox.recv_waiter;
@@ -1053,3 +1057,188 @@
 
 	return ret;
 }
+
+/**
+ * Clears a region of physical memory by overwriting it with zeros. The data is
+ * flushed from the cache so the memory has been cleared across the system.
+ */
+static bool api_clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
+{
+	/*
+	 * TODO: change this to a cpu local single page window rather than a
+	 *       global mapping of the whole range. Such an approach will limit
+	 *       the changes to stage-1 tables and will allow only local
+	 *       invalidation.
+	 */
+	void *ptr = mm_identity_map(begin, end, MM_MODE_W, ppool);
+	size_t size = pa_addr(end) - pa_addr(begin);
+
+	if (!ptr) {
+		/* TODO: partial defrag of failed range. */
+		/* Recover any memory consumed in failed mapping. */
+		mm_defrag(ppool);
+		return false;
+	}
+
+	memset(ptr, 0, size);
+	arch_mm_write_back_dcache(ptr, size);
+	mm_unmap(begin, end, ppool);
+
+	return true;
+}
+
+/**
+ * Shares memory from the calling VM with another. The memory can be shared in
+ * different modes.
+ *
+ * TODO: the interface for sharing memory will need to be enhanced to allow
+ *       sharing with different modes e.g. read-only, informing the recipient
+ *       of the memory they have been given, opting to not wipe the memory and
+ *       possibly allowing multiple blocks to be transferred. What this will
+ *       look like is TBD.
+ */
+int64_t api_share_memory(uint32_t vm_id, ipaddr_t addr, size_t size,
+			 enum hf_share share, struct vcpu *current)
+{
+	struct vm *from = current->vm;
+	struct vm *to;
+	int orig_from_mode;
+	int from_mode;
+	int to_mode;
+	ipaddr_t begin;
+	ipaddr_t end;
+	paddr_t pa_begin;
+	paddr_t pa_end;
+	struct mpool local_page_pool;
+	int64_t ret;
+
+	/* Disallow reflexive shares as this suggests an error in the VM. */
+	if (vm_id == from->id) {
+		return -1;
+	}
+
+	/* Ensure the target VM exists. */
+	to = vm_get(vm_id);
+	if (to == NULL) {
+		return -1;
+	}
+
+	begin = addr;
+	end = ipa_add(addr, size);
+
+	/* Fail if addresses are not page-aligned. */
+	if ((ipa_addr(begin) & (PAGE_SIZE - 1)) ||
+	    (ipa_addr(end) & (PAGE_SIZE - 1))) {
+		return -1;
+	}
+
+	/* Convert the sharing request to memory management modes. */
+	switch (share) {
+	case HF_MEMORY_GIVE:
+		from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED;
+		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+		break;
+
+	case HF_MEMORY_LEND:
+		from_mode = MM_MODE_INVALID;
+		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED;
+		break;
+
+	case HF_MEMORY_SHARE:
+		from_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_SHARED;
+		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED |
+			  MM_MODE_SHARED;
+		break;
+
+	default:
+		/* The input is untrusted so might not be a valid value. */
+		return -1;
+	}
+
+	/*
+	 * Create a local pool so any freed memory can't be used by another
+	 * thread. This is to ensure the original mapping can be restored if any
+	 * stage of the process fails.
+	 */
+	mpool_init_with_fallback(&local_page_pool, &api_page_pool);
+
+	sl_lock_both(&from->lock, &to->lock);
+
+	/*
+	 * Ensure that the memory range is mapped with the same mode so that
+	 * changes can be reverted if the process fails.
+	 */
+	if (!mm_vm_get_mode(&from->ptable, begin, end, &orig_from_mode)) {
+		goto fail;
+	}
+
+	/*
+	 * Ensure the memory range is valid for the sender. If it isn't, the
+	 * sender has either shared it with another VM already or has no claim
+	 * to the memory.
+	 */
+	if (orig_from_mode & MM_MODE_INVALID) {
+		goto fail;
+	}
+
+	/*
+	 * The sender must own the memory and have exclusive access to it in
+	 * order to share it. Alternatively, it is giving memory back to the
+	 * owning VM.
+	 */
+	if (orig_from_mode & MM_MODE_UNOWNED) {
+		int orig_to_mode;
+
+		if (share != HF_MEMORY_GIVE ||
+		    !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode) ||
+		    orig_to_mode & MM_MODE_UNOWNED) {
+			goto fail;
+		}
+	} else if (orig_from_mode & MM_MODE_SHARED) {
+		goto fail;
+	}
+
+	pa_begin = pa_from_ipa(begin);
+	pa_end = pa_from_ipa(end);
+
+	/*
+	 * First update the mapping for the sender so there is not overlap with
+	 * the recipient.
+	 */
+	if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
+				NULL, &local_page_pool)) {
+		goto fail;
+	}
+
+	/* Clear the memory so no VM or device can see the previous contents. */
+	if (!api_clear_memory(pa_begin, pa_end, &local_page_pool)) {
+		goto fail_return_to_sender;
+	}
+
+	/* Complete the transfer by mapping the memory into the recipient. */
+	if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
+				&local_page_pool)) {
+		/* TODO: partial defrag of failed range. */
+		/* Recover any memory consumed in failed mapping. */
+		mm_vm_defrag(&from->ptable, &local_page_pool);
+		goto fail_return_to_sender;
+	}
+
+	ret = 0;
+	goto out;
+
+fail_return_to_sender:
+	mm_vm_identity_map(&from->ptable, pa_begin, pa_end, orig_from_mode,
+			   NULL, &local_page_pool);
+
+fail:
+	ret = -1;
+
+out:
+	sl_unlock(&from->lock);
+	sl_unlock(&to->lock);
+
+	mpool_fini(&local_page_pool);
+
+	return ret;
+}
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 1faef38..832bc8a 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -311,6 +311,12 @@
 						    &ret.new);
 		break;
 
+	case HF_SHARE_MEMORY:
+		ret.user_ret =
+			api_share_memory(arg1 >> 32, ipa_init(arg2), arg3,
+					 arg1 & 0xffffffff, current());
+		break;
+
 	default:
 		ret.user_ret = -1;
 	}
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 1e45a1c..9488571 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -121,6 +121,11 @@
 	/* There's no modelling of the stage-2 TLB. */
 }
 
+void arch_mm_write_back_dcache(void *base, size_t size)
+{
+	/* There's no modelling of the cache. */
+}
+
 uint8_t arch_mm_stage1_max_level(void)
 {
 	return 2;
diff --git a/test/vmapi/primary_with_secondaries/BUILD.gn b/test/vmapi/primary_with_secondaries/BUILD.gn
index 87e3b1b..df38e18 100644
--- a/test/vmapi/primary_with_secondaries/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/BUILD.gn
@@ -24,6 +24,7 @@
   public_configs = [ ":config" ]
 
   sources = [
+    "memory_sharing.c",
     "no_services.c",
     "run_race.c",
     "with_services.c",
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
new file mode 100644
index 0000000..14d34f6
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "hftest.h"
+#include "primary_with_secondary.h"
+
+alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
+
+/**
+ * Tries sharing memory in different modes with different VMs and asserts that
+ * it will fail.
+ */
+void check_cannot_share_memory(void *ptr, size_t size)
+{
+	uint32_t vms[] = {SERVICE_VM0, SERVICE_VM1};
+	enum hf_share modes[] = {HF_MEMORY_GIVE, HF_MEMORY_LEND,
+				 HF_MEMORY_SHARE};
+	int i;
+	int j;
+
+	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+		for (j = 0; j < ARRAY_SIZE(modes); ++j) {
+			ASSERT_EQ(hf_share_memory(vms[i], (hf_ipaddr_t)ptr,
+						  size, modes[j]),
+				  -1);
+		}
+	}
+}
+
+/**
+ * After memory has been shared concurrently, it can't be shared again.
+ */
+TEST(memory_sharing, cannot_share_concurrent_memory_twice)
+{
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_SHARE),
+		  0);
+	check_cannot_share_memory(page, PAGE_SIZE);
+}
+
+/**
+ * After memory has been given away, it can't be shared again.
+ */
+TEST(memory_sharing, cannot_share_given_memory_twice)
+{
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_GIVE),
+		  0);
+	check_cannot_share_memory(page, PAGE_SIZE);
+}
+
+/**
+ * After memory has been lent, it can't be shared again.
+ */
+TEST(memory_sharing, cannot_share_lent_memory_twice)
+{
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_LEND),
+		  0);
+	check_cannot_share_memory(page, PAGE_SIZE);
+}
+
+/**
+ * Sharing memory concurrently gives both VMs access to the memory so it can be
+ * used for communication.
+ */
+TEST(memory_sharing, concurrent)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_increment", mb.send);
+
+	memset(ptr, 'a', PAGE_SIZE);
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_SHARE),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		page[i] = i;
+	}
+
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		uint8_t value = i + 1;
+
+		EXPECT_EQ(page[i], value);
+	}
+}
+
+/**
+ * Memory shared concurrently can be returned to the owner.
+ */
+TEST(memory_sharing, share_concurrently_and_get_back)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return", mb.send);
+
+	/* Dirty the memory before sharing it. */
+	memset(ptr, 'b', PAGE_SIZE);
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_SHARE),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	/* Let the memory be returned. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 0);
+	}
+}
+
+/**
+ * Memory given away can be given back.
+ */
+TEST(memory_sharing, give_and_get_back)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return", mb.send);
+
+	/* Dirty the memory before giving it. */
+	memset(ptr, 'b', PAGE_SIZE);
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_GIVE),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	/* Let the memory be returned. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 0);
+	}
+}
+
+/**
+ * Memory that has been lent can be returned to the owner.
+ */
+TEST(memory_sharing, lend_and_get_back)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return", mb.send);
+
+	/* Dirty the memory before lending it. */
+	memset(ptr, 'c', PAGE_SIZE);
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_LEND),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	/* Let the memory be returned. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 0);
+	}
+}
+
+/**
+ * After memory has been returned, it is free to be shared again.
+ */
+TEST(memory_sharing, reshare_after_return)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return", mb.send);
+
+	/* Share the memory initially. */
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_LEND),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	/* Let the memory be returned. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+
+	/* Share the memory again after it has been returned. */
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+}
+
+/**
+ * After memory has been returned, it is free to be shared with another VM.
+ */
+TEST(memory_sharing, share_elsewhere_after_return)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return", mb.send);
+
+	/* Share the memory initially. */
+	ASSERT_EQ(hf_share_memory(SERVICE_VM0, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_LEND),
+		  0);
+
+	/*
+	 * TODO: the address of the memory will be part of the proper API. That
+	 *       API is still to be agreed on so the address is passed
+	 *       explicitly to test the mechanism.
+	 */
+	memcpy(mb.send, &ptr, sizeof(ptr));
+	EXPECT_EQ(hf_mailbox_send(SERVICE_VM0, sizeof(ptr), false),
+		  HF_INVALID_VCPU);
+
+	/* Let the memory be returned. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+
+	/* Share the memory with a differnt VM after it has been returned. */
+	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
+				  HF_MEMORY_LEND),
+		  0);
+}
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
index caaaf8a..2389751 100644
--- a/test/vmapi/primary_with_secondaries/services/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -41,10 +41,7 @@
 # Echo service that waits for recipient to become writable.
 source_set("echo_with_notification") {
   testonly = true
-  public_configs = [
-    "..:config",
-    "//test/hftest:hftest_config",
-  ]
+  public_configs = [ "//test/hftest:hftest_config" ]
 
   sources = [
     "echo_with_notification.c",
@@ -55,6 +52,16 @@
   ]
 }
 
+# Services related to memory sharing.
+source_set("memory") {
+  testonly = true
+  public_configs = [ "//test/hftest:hftest_config" ]
+
+  sources = [
+    "memory.c",
+  ]
+}
+
 # Service that can be interrupted.
 source_set("interruptible") {
   testonly = true
@@ -92,6 +99,7 @@
     ":echo",
     ":echo_with_notification",
     ":interruptible",
+    ":memory",
     ":relay",
     "//test/hftest:hftest_secondary_vm",
   ]
diff --git a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
index d8815be..e481ecc 100644
--- a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
+++ b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
@@ -22,7 +22,6 @@
 
 #include "../msr.h"
 #include "hftest.h"
-#include "primary_with_secondary.h"
 
 static void irq(void)
 {
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
new file mode 100644
index 0000000..bd2d958
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "hftest.h"
+
+TEST_SERVICE(memory_increment)
+{
+	/* Loop, writing message to the shared memory. */
+	for (;;) {
+		struct hf_mailbox_receive_return res = hf_mailbox_receive(true);
+		uint8_t *ptr;
+		size_t i;
+
+		/* Check the memory was cleared. */
+		memcpy(&ptr, SERVICE_RECV_BUFFER(), sizeof(ptr));
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ASSERT_EQ(ptr[i], 0);
+		}
+
+		/* Allow the memory to be populated. */
+		hf_vcpu_yield();
+
+		/* Increment each byte of memory. */
+		for (i = 0; i < PAGE_SIZE; ++i) {
+			++ptr[i];
+		}
+
+		/* Signal completion and reset. */
+		hf_mailbox_clear();
+		hf_mailbox_send(res.vm_id, 0, false);
+	}
+}
+
+TEST_SERVICE(memory_return)
+{
+	/* Loop, giving memory back to the sender. */
+	for (;;) {
+		struct hf_mailbox_receive_return res = hf_mailbox_receive(true);
+		uint8_t *ptr;
+
+		/* Check the memory was cleared. */
+		memcpy(&ptr, SERVICE_RECV_BUFFER(), sizeof(ptr));
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ASSERT_EQ(ptr[i], 0);
+		}
+
+		/* Give the memory back and notify the sender. */
+		ASSERT_EQ(hf_share_memory(res.vm_id, (hf_ipaddr_t)ptr,
+					  PAGE_SIZE, HF_MEMORY_GIVE),
+			  0);
+		hf_mailbox_clear();
+		hf_mailbox_send(res.vm_id, 0, false);
+	}
+}