Update spci_msg_send to new SPCI beta API.

This removes the header in the message buffers, as the header data is now
passed in the SPCI_MSG_SEND parameters.

Bug: 141469322
Change-Id: I3a61f5470fd95ba2d47df33f5c96466ba286af85
diff --git a/test/hftest/inc/hftest_impl.h b/test/hftest/inc/hftest_impl.h
index f3eb407..9598b12 100644
--- a/test/hftest/inc/hftest_impl.h
+++ b/test/hftest/inc/hftest_impl.h
@@ -140,8 +140,8 @@
 	const struct fdt_header *fdt;
 
 	/* These are used in services. */
-	struct spci_message *send;
-	struct spci_message *recv;
+	void *send;
+	void *recv;
 	size_t memory_size;
 };
 
@@ -291,12 +291,12 @@
 		ASSERT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);             \
                                                                               \
 		/* Send the selected service to run and let it be handled. */ \
-		memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, service, \
+		memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, service,          \
 			 msg_length);                                         \
-		spci_message_init(send_buffer, msg_length, vm_id,             \
-				  hf_vm_get_id());                            \
                                                                               \
-		ASSERT_EQ(spci_msg_send(0), 0);                               \
+		ASSERT_EQ(spci_msg_send(hf_vm_get_id(), vm_id, msg_length, 0) \
+				  .func,                                      \
+			  SPCI_SUCCESS_32);                                   \
 		run_res = hf_vcpu_run(vm_id, 0);                              \
 		ASSERT_EQ(run_res.code, HF_VCPU_RUN_YIELD);                   \
 	} while (0)
diff --git a/test/hftest/service.c b/test/hftest/service.c
index c04c253..ed35e74 100644
--- a/test/hftest/service.c
+++ b/test/hftest/service.c
@@ -95,8 +95,6 @@
 		}
 	}
 
-	struct spci_message *recv_msg = (struct spci_message *)recv;
-
 	/* Prepare the context. */
 
 	/* Set up the mailbox. */
@@ -104,7 +102,8 @@
 
 	/* Receive the name of the service to run. */
 	ret = spci_msg_wait();
-	memiter_init(&args, recv_msg->payload, spci_msg_send_size(ret));
+	ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+	memiter_init(&args, recv, spci_msg_send_size(ret));
 	service = find_service(&args);
 	hf_mailbox_clear();
 
@@ -122,8 +121,8 @@
 	ctx = hftest_get_context();
 	memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
 	ctx->abort = abort;
-	ctx->send = (struct spci_message *)send;
-	ctx->recv = (struct spci_message *)recv;
+	ctx->send = send;
+	ctx->recv = recv;
 	ctx->memory_size = memory_size;
 
 	/* Pause so the next time cycles are given the service will be run. */
diff --git a/test/linux/hftest_socket.c b/test/linux/hftest_socket.c
index ae69abb..460d1f1 100644
--- a/test/linux/hftest_socket.c
+++ b/test/linux/hftest_socket.c
@@ -74,14 +74,12 @@
 	ctx = hftest_get_context();
 	memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
 	ctx->abort = abort;
-	ctx->send = (struct spci_message *)send;
-	ctx->recv = (struct spci_message *)recv;
+	ctx->send = send;
+	ctx->recv = recv;
 	ctx->memory_size = memory_size;
 
 	for (;;) {
 		struct spci_value ret;
-		struct spci_message *send_buf = (struct spci_message *)send;
-		struct spci_message *recv_buf = (struct spci_message *)recv;
 
 		/* Receive the packet. */
 		ret = spci_msg_wait();
@@ -89,21 +87,21 @@
 		EXPECT_LE(spci_msg_send_size(ret), SPCI_MSG_PAYLOAD_MAX);
 
 		/* Echo the message back to the sender. */
-		memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
-			 recv_buf->payload, spci_msg_send_size(ret));
+		memcpy_s(send, SPCI_MSG_PAYLOAD_MAX, recv,
+			 spci_msg_send_size(ret));
 
 		/* Swap the socket's source and destination ports */
-		struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send_buf->payload;
+		struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send;
 		swap(&(hdr->src_port), &(hdr->dst_port));
 
 		/* Swap the destination and source ids. */
 		spci_vm_id_t dst_id = spci_msg_send_sender(ret);
 		spci_vm_id_t src_id = spci_msg_send_receiver(ret);
 
-		spci_message_init(send_buf, spci_msg_send_size(ret), dst_id,
-				  src_id);
-
 		hf_mailbox_clear();
-		EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+		EXPECT_EQ(spci_msg_send(src_id, dst_id, spci_msg_send_size(ret),
+					0)
+				  .func,
+			  SPCI_SUCCESS_32);
 	}
 }
diff --git a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
index 2f0766a..1ed1031 100644
--- a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
@@ -79,10 +79,11 @@
 
 	/* Let secondary start looping. */
 	dlog("Telling secondary to loop.\n");
-	memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED);
 
@@ -136,10 +137,11 @@
 
 	/* Let secondary start looping. */
 	dlog("Telling secondary to loop.\n");
-	memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED);
 
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3.c b/test/vmapi/arch/aarch64/gicv3/gicv3.c
index e6e7435..9c144b0 100644
--- a/test/vmapi/arch/aarch64/gicv3/gicv3.c
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3.c
@@ -34,8 +34,8 @@
 hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
 hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
 
-struct spci_message *send_buffer = (struct spci_message *)send_page;
-struct spci_message *recv_buffer = (struct spci_message *)recv_page;
+void *send_buffer = send_page;
+void *recv_buffer = recv_page;
 
 volatile uint32_t last_interrupt_id = 0;
 
diff --git a/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
index d51398f..7308233 100644
--- a/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
+++ b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
@@ -33,8 +33,8 @@
 extern hf_ipaddr_t send_page_addr;
 extern hf_ipaddr_t recv_page_addr;
 
-extern struct spci_message *send_buffer;
-extern struct spci_message *recv_buffer;
+extern void *send_buffer;
+extern void *recv_buffer;
 
 extern volatile uint32_t last_interrupt_id;
 
diff --git a/test/vmapi/arch/aarch64/gicv3/services/timer.c b/test/vmapi/arch/aarch64/gicv3/services/timer.c
index 5d3dd2d..fc52ab1 100644
--- a/test/vmapi/arch/aarch64/gicv3/services/timer.c
+++ b/test/vmapi/arch/aarch64/gicv3/services/timer.c
@@ -46,11 +46,8 @@
 	}
 	buffer[8] = '0' + interrupt_id / 10;
 	buffer[9] = '0' + interrupt_id % 10;
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, buffer,
-		 size);
-	spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-	spci_msg_send(0);
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+	spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
 	dlog("secondary IRQ %d ended\n", interrupt_id);
 	event_send_local();
 }
@@ -63,8 +60,7 @@
 
 	for (;;) {
 		const char timer_wfi_message[] = "**** xxxxxxx";
-		struct spci_message *message_header = SERVICE_RECV_BUFFER();
-		uint8_t *message;
+		uint8_t *message = (uint8_t *)SERVICE_RECV_BUFFER();
 		bool wfi, wfe, receive;
 		bool disable_interrupts;
 		uint32_t ticks;
@@ -77,8 +73,6 @@
 			     spci_msg_send_size(ret));
 		}
 
-		message = message_header->payload;
-
 		/*
 		 * Start a timer to send the message back: enable it and
 		 * set it for the requested number of ticks.
diff --git a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
index b21bd83..e38a2fc 100644
--- a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
@@ -49,11 +49,11 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Send the message for the secondary to set a timer. */
-	memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(send_buffer, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	/*
 	 * Let the secondary handle the message and set the timer. It will loop
@@ -75,7 +75,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(recv_buffer->payload, expected_response,
+	EXPECT_EQ(memcmp(recv_buffer, expected_response,
 			 sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -108,11 +108,11 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Send the message for the secondary to set a timer. */
-	memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 message_length);
-	spci_message_init(send_buffer, message_length, SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, message_length, 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	/* Let the secondary handle the message and set the timer. */
 	last_interrupt_id = 0;
@@ -173,7 +173,7 @@
 	/* Once we wake it up it should get the timer interrupt and respond. */
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(recv_buffer->payload, expected_response,
+	EXPECT_EQ(memcmp(recv_buffer, expected_response,
 			 sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -261,11 +261,11 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Send the message for the secondary to set a timer. */
-	memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 message_length);
-	spci_message_init(send_buffer, message_length, SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, message_length, 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	/*
 	 * Let the secondary handle the message and set the timer.
diff --git a/test/vmapi/primary_with_secondaries/BUILD.gn b/test/vmapi/primary_with_secondaries/BUILD.gn
index 7302db0..86092cc 100644
--- a/test/vmapi/primary_with_secondaries/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/BUILD.gn
@@ -38,14 +38,24 @@
     "spci.c",
   ]
 
-  sources += [ "util.c" ]
-
   deps = [
+    ":util",
     "//src/arch/aarch64/hftest:registers",
     "//test/hftest:hftest_primary_vm",
   ]
 }
 
+source_set("util") {
+  testonly = true
+  public_configs = [
+    ":config",
+    "//test/hftest:hftest_config",
+  ]
+  sources = [
+    "util.c",
+  ]
+}
+
 initrd("primary_with_secondaries_test") {
   testonly = true
 
diff --git a/test/vmapi/primary_with_secondaries/inc/util.h b/test/vmapi/primary_with_secondaries/inc/util.h
index eca641f..845a4b4 100644
--- a/test/vmapi/primary_with_secondaries/inc/util.h
+++ b/test/vmapi/primary_with_secondaries/inc/util.h
@@ -18,9 +18,15 @@
 
 #include "vmapi/hf/spci.h"
 
+#define EXPECT_SPCI_ERROR(value, spci_error)          \
+	do {                                          \
+		EXPECT_EQ(value.func, SPCI_ERROR_32); \
+		EXPECT_EQ(value.arg1, spci_error);    \
+	} while (0)
+
 struct mailbox_buffers {
-	struct spci_message *send;
-	struct spci_message *recv;
+	void *send;
+	void *recv;
 };
 
 struct mailbox_buffers set_up_mailbox(void);
diff --git a/test/vmapi/primary_with_secondaries/interrupts.c b/test/vmapi/primary_with_secondaries/interrupts.c
index ae081d4..31db0b0 100644
--- a/test/vmapi/primary_with_secondaries/interrupts.c
+++ b/test/vmapi/primary_with_secondaries/interrupts.c
@@ -42,16 +42,15 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Set the message, echo it and wait for a response. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
@@ -78,8 +77,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 
@@ -88,8 +86,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
@@ -116,8 +113,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 
@@ -126,7 +122,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response_2));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response_2,
+	EXPECT_EQ(memcmp(mb.recv, expected_response_2,
 			 sizeof(expected_response_2)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -156,8 +152,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 
@@ -166,15 +161,15 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Now send a message to the secondary. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response_2));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response_2,
+	EXPECT_EQ(memcmp(mb.recv, expected_response_2,
 			 sizeof(expected_response_2)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -204,16 +199,15 @@
 	 * Now send a message to the secondary to enable the interrupt ID, and
 	 * expect the response from the interrupt we sent before.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
@@ -240,8 +234,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
@@ -268,8 +261,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
-			 sizeof(expected_response)),
+	EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
@@ -290,15 +282,15 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	hf_interrupt_inject(SERVICE_VM0, 0, EXTERNAL_INTERRUPT_ID_A);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
diff --git a/test/vmapi/primary_with_secondaries/mailbox.c b/test/vmapi/primary_with_secondaries/mailbox.c
index ca06bff..d83beaf 100644
--- a/test/vmapi/primary_with_secondaries/mailbox.c
+++ b/test/vmapi/primary_with_secondaries/mailbox.c
@@ -88,15 +88,15 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Set the message, echo it and check it didn't change. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.send->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.send, message, sizeof(message)), 0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
 
@@ -120,16 +120,16 @@
 
 		/* Set the message, echo it and check it didn't change. */
 		next_permutation(message, sizeof(message) - 1);
-		memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
+		memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message,
 			 sizeof(message));
-		spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-				  HF_PRIMARY_VM_ID);
-		EXPECT_EQ(spci_msg_send(0), 0);
+		EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0,
+					sizeof(message), 0)
+				  .func,
+			  SPCI_SUCCESS_32);
 		run_res = hf_vcpu_run(SERVICE_VM0, 0);
 		EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 		EXPECT_EQ(run_res.message.size, sizeof(message));
-		EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)),
-			  0);
+		EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 		EXPECT_EQ(hf_mailbox_clear(), 0);
 	}
 }
@@ -159,17 +159,19 @@
 	 * SERVICE_VM0, then to SERVICE_VM1 and finally back to here.
 	 */
 	{
-		spci_vm_id_t *chain = (spci_vm_id_t *)mb.send->payload;
+		spci_vm_id_t *chain = (spci_vm_id_t *)mb.send;
 		*chain++ = htole32(SERVICE_VM1);
 		*chain++ = htole32(HF_PRIMARY_VM_ID);
 		memcpy_s(chain,
 			 SPCI_MSG_PAYLOAD_MAX - (2 * sizeof(spci_vm_id_t)),
 			 message, sizeof(message));
 
-		spci_message_init(mb.send,
-				  sizeof(message) + (2 * sizeof(spci_vm_id_t)),
-				  SERVICE_VM0, HF_PRIMARY_VM_ID);
-		EXPECT_EQ(spci_msg_send(0), 0);
+		EXPECT_EQ(
+			spci_msg_send(
+				HF_PRIMARY_VM_ID, SERVICE_VM0,
+				sizeof(message) + (2 * sizeof(spci_vm_id_t)), 0)
+				.func,
+			SPCI_SUCCESS_32);
 	}
 
 	/* Let SERVICE_VM0 forward the message. */
@@ -185,7 +187,7 @@
 	/* Ensure the message is intact. */
 	EXPECT_EQ(run_res.message.vm_id, HF_PRIMARY_VM_ID);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
 
@@ -196,17 +198,17 @@
 TEST(mailbox, no_primary_to_secondary_notification_on_configure)
 {
 	struct hf_vcpu_run_return run_res;
+	set_up_mailbox();
 
-	struct mailbox_buffers mb = set_up_mailbox();
-	spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_BUSY);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0),
+			  SPCI_BUSY);
 
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
-	spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0).func,
+		  SPCI_SUCCESS_32);
 }
 
 /**
@@ -216,11 +218,11 @@
 TEST(mailbox, secondary_to_primary_notification_on_configure)
 {
 	struct hf_vcpu_run_return run_res;
+	set_up_mailbox();
 
-	struct mailbox_buffers mb = set_up_mailbox();
-
-	spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(SPCI_MSG_SEND_NOTIFY), SPCI_BUSY);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0,
+					SPCI_MSG_SEND_NOTIFY),
+			  SPCI_BUSY);
 
 	/*
 	 * Run first VM for it to configure itself. It should result in
@@ -234,7 +236,8 @@
 	EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM0), -1);
 
 	/* Send should now succeed. */
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0).func,
+		  SPCI_SUCCESS_32);
 }
 
 /**
@@ -255,15 +258,15 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Send a message to echo service, and get response back. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 
 	/* Let secondary VM continue running so that it will wait again. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -272,13 +275,13 @@
 
 	/* Without clearing our mailbox, send message again. */
 	reverse(message, strnlen_s(message, sizeof(message)));
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
 
 	/* Message should be dropped since the mailbox was not cleared. */
-	EXPECT_EQ(spci_msg_send(0), 0);
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
@@ -300,7 +303,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 }
 
 /**
@@ -321,18 +324,21 @@
 	EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
 
 	/* Send a message to echo service twice. The second should fail. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
-	EXPECT_EQ(spci_msg_send(SPCI_MSG_SEND_NOTIFY), SPCI_BUSY);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message),
+				SPCI_MSG_SEND_NOTIFY)
+			  .arg1,
+		  SPCI_BUSY);
 
 	/* Receive a reply for the first message. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(message));
-	EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
 
 	/* Run VM again so that it clears its mailbox. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -343,5 +349,8 @@
 	EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM0), -1);
 
 	/* Send should now succeed. */
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 }
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 5de2649..b97dfab 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -84,16 +84,23 @@
 					for (m = 0;
 					     m < ARRAY_SIZE(lend_shareability);
 					     ++m) {
-						spci_memory_lend(
-							mb.send, vms[i],
-							HF_PRIMARY_VM_ID,
-							constituents, 1, 0,
-							lend_access[j],
-							lend_type[k],
-							lend_cacheability[l],
-							lend_shareability[m]);
-						EXPECT_EQ(
-							spci_msg_send(0),
+						uint32_t msg_size =
+							spci_memory_lend_init(
+								mb.send,
+								constituents, 1,
+								0,
+								lend_access[j],
+								lend_type[k],
+								lend_cacheability
+									[l],
+								lend_shareability
+									[m]);
+						EXPECT_SPCI_ERROR(
+							spci_msg_send(
+								HF_PRIMARY_VM_ID,
+								vms[i],
+								msg_size,
+								SPCI_MSG_SEND_LEGACY_MEMORY),
 							SPCI_INVALID_PARAMETERS);
 					}
 				}
@@ -116,13 +123,17 @@
 
 	int i;
 	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+		uint32_t msg_size;
 		/* Optionally skip one VM as the donate would succeed. */
 		if (vms[i] == avoid_vm) {
 			continue;
 		}
-		spci_memory_donate(mb.send, vms[i], HF_PRIMARY_VM_ID,
-				   constituents, num_elements, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_donate_init(mb.send, constituents,
+						   num_elements, 0);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, vms[i], msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
 	}
 }
 
@@ -140,9 +151,12 @@
 	int j;
 	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
 		for (j = 0; j < ARRAY_SIZE(vms); ++j) {
-			spci_memory_relinquish(mb.send, vms[i], vms[j],
-					       constituents, num_elements, 0);
-			EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+			uint32_t msg_size = spci_memory_relinquish_init(
+				mb.send, constituents, num_elements, 0);
+			EXPECT_SPCI_ERROR(
+				spci_msg_send(vms[j], vms[i], msg_size,
+					      SPCI_MSG_SEND_LEGACY_MEMORY),
+				SPCI_INVALID_PARAMETERS);
 		}
 	}
 }
@@ -210,9 +224,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
@@ -253,9 +268,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -295,6 +311,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
 
@@ -306,10 +323,12 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 
 	/* Let the memory be returned. */
@@ -333,6 +352,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish", mb.send);
 
@@ -343,11 +363,15 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 
 	/* Let the memory be returned. */
@@ -385,9 +409,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -423,9 +448,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -460,9 +486,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -500,9 +527,10 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -534,7 +562,7 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 
 	/* Check the memory was cleared. */
-	ptr = *(uint8_t **)mb.recv->payload;
+	ptr = *(uint8_t **)mb.recv;
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
@@ -560,7 +588,7 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 
 	/* Check the memory was cleared. */
-	ptr = *(uint8_t **)mb.recv->payload;
+	ptr = *(uint8_t **)mb.recv;
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
@@ -578,6 +606,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_upper_bound", mb.send);
 
@@ -588,9 +617,11 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Observe the service faulting when accessing the memory. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -605,6 +636,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_lower_bound", mb.send);
 
@@ -615,9 +647,11 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Observe the service faulting when accessing the memory. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -633,6 +667,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
@@ -644,19 +679,23 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 
 	/* Let the memory be returned. */
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 
 	/* Share the memory with another VM. */
-	spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Observe the original service faulting when accessing the memory. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -672,6 +711,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_donate_secondary_and_fault", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -688,9 +728,11 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
 
 	/* Donate memory. */
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be sent from VM0 to VM1. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -717,6 +759,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_donate_twice", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -729,9 +772,11 @@
 	};
 
 	/* Donate memory to VM0. */
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be received. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -761,6 +806,7 @@
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	/* Initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -768,10 +814,11 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
-			   constituents, 1, 0);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+					msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 }
 
 /**
@@ -781,6 +828,7 @@
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	/* Initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -788,11 +836,13 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
-			 constituents, 1, 0, SPCI_LEND_RW_X,
-			 SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
-			 SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+					msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 }
 
 /**
@@ -803,6 +853,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_donate_invalid_source", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -814,22 +865,27 @@
 	};
 
 	/* Try invalid configurations. */
-	spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 
-	spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM0, constituents, 1,
-			   0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, SERVICE_VM0, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 
-	spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM1, constituents, 1,
-			   0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM1, SERVICE_VM0, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 
 	/* Successfully donate to VM0. */
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Receive and return memory from VM0. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -853,14 +909,20 @@
 		struct spci_memory_region_constituent constituents[] = {
 			{.address = (uint64_t)page + i, .page_count = 1},
 		};
-		spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID,
-				   constituents, 1, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
-		spci_memory_lend(
-			mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, 1,
-			0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		uint32_t msg_size =
+			spci_memory_donate_init(mb.send, constituents, 1, 0);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_lend_init(
+			mb.send, constituents, 1, 0, SPCI_LEND_RW_X,
+			SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+			SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
 	}
 }
 
@@ -872,6 +934,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_lend_invalid_source", mb.send);
 
@@ -882,16 +945,23 @@
 	};
 
 	/* Check cannot swap VM IDs. */
-	spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
-			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 
 	/* Lend memory to VM0. */
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Receive and return memory from VM0. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -911,6 +981,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
 
@@ -921,11 +992,15 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -944,11 +1019,15 @@
 	/* Re-initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -974,6 +1053,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
 
@@ -984,11 +1064,15 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1006,11 +1090,15 @@
 	/* Re-initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1035,6 +1123,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
 
@@ -1049,19 +1138,27 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Attempt to execute from memory. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Try and fail to execute from the memory region. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1076,6 +1173,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
 
@@ -1090,19 +1188,27 @@
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Attempt to execute from memory. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Try and fail to execute from the memory region. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1117,6 +1223,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
@@ -1128,11 +1235,15 @@
 		{.address = (uint64_t)page, .page_count = 2},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1142,15 +1253,19 @@
 	constituents[0].page_count = 1;
 	for (int i = 1; i < PAGE_SIZE * 2; i++) {
 		constituents[0].address = (uint64_t)page + PAGE_SIZE;
-		spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID,
-				   constituents, 1, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
 	}
 
 	/* Ensure we can donate to the only borrower. */
-	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			   1, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 }
 
 /**
@@ -1161,6 +1276,7 @@
 	struct hf_vcpu_run_return run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_twice", mb.send);
 	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
@@ -1172,11 +1288,15 @@
 		{.address = (uint64_t)page, .page_count = 2},
 	};
 
-	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
-			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+					 SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+					 SPCI_LEND_CACHE_WRITE_BACK,
+					 SPCI_LEND_OUTER_SHAREABLE);
 
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Let the memory be accessed. */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1193,10 +1313,13 @@
 	constituents[0].page_count = 1;
 	for (int i = 1; i < PAGE_SIZE * 2; i++) {
 		constituents[0].address = (uint64_t)page + PAGE_SIZE;
-		spci_memory_lend(
-			mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents, 1,
-			0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
-			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_lend_init(
+			mb.send, constituents, 1, 0, SPCI_LEND_RO_X,
+			SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+			SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
 	}
 }
diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c
index 7ba4d74..3689825 100644
--- a/test/vmapi/primary_with_secondaries/run_race.c
+++ b/test/vmapi/primary_with_secondaries/run_race.c
@@ -56,7 +56,7 @@
 
 	/* Copies the contents of the received boolean to the return value. */
 	if (run_res.message.size == sizeof(ok)) {
-		ok = *(bool *)mb->recv->payload;
+		ok = *(bool *)mb->recv;
 	}
 
 	hf_mailbox_clear();
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
index 6694a93..4dbe074 100644
--- a/test/vmapi/primary_with_secondaries/services/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -93,6 +93,9 @@
     "..:config",
     "//test/hftest:hftest_config",
   ]
+  deps = [
+    "//test/vmapi/primary_with_secondaries:util",
+  ]
 
   sources = [
     "memory.c",
@@ -150,6 +153,7 @@
   ]
   deps = [
     "//src/arch/aarch64:arch",
+    "//test/vmapi/primary_with_secondaries:util",
   ]
 }
 
@@ -194,6 +198,9 @@
     "..:config",
     "//test/hftest:hftest_config",
   ]
+  deps = [
+    "//test/vmapi/primary_with_secondaries:util",
+  ]
 
   sources = [
     "spci_check.c",
diff --git a/test/vmapi/primary_with_secondaries/services/check_state.c b/test/vmapi/primary_with_secondaries/services/check_state.c
index a1805f9..49939dd 100644
--- a/test/vmapi/primary_with_secondaries/services/check_state.c
+++ b/test/vmapi/primary_with_secondaries/services/check_state.c
@@ -22,13 +22,14 @@
 
 #include "hftest.h"
 
-void send_with_retry()
+void send_with_retry(spci_vm_id_t sender_vm_id, spci_vm_id_t target_vm_id,
+		     uint32_t size)
 {
-	int64_t res;
+	struct spci_value res;
 
 	do {
-		res = spci_msg_send(0);
-	} while (res != SPCI_SUCCESS);
+		res = spci_msg_send(sender_vm_id, target_vm_id, size, 0);
+	} while (res.func != SPCI_SUCCESS_32);
 }
 
 /**
@@ -49,9 +50,6 @@
 	static volatile uintptr_t expected;
 	static volatile uintptr_t actual;
 
-	spci_message_init(SERVICE_SEND_BUFFER(), 0, HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-
 	for (i = 0; i < 100000; i++) {
 		/*
 		 * We store the expected/actual values in volatile static
@@ -60,16 +58,13 @@
 		 */
 		expected = i;
 		per_cpu_ptr_set(expected);
-		send_with_retry();
+		send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, 0);
 		actual = per_cpu_ptr_get();
 		ok &= expected == actual;
 	}
 
 	/* Send two replies, one for each physical CPU. */
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ok,
-		 sizeof(ok));
-	spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ok), HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-	send_with_retry();
-	send_with_retry();
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ok, sizeof(ok));
+	send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
+	send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
 }
diff --git a/test/vmapi/primary_with_secondaries/services/echo.c b/test/vmapi/primary_with_secondaries/services/echo.c
index 87695a8..0578710 100644
--- a/test/vmapi/primary_with_secondaries/services/echo.c
+++ b/test/vmapi/primary_with_secondaries/services/echo.c
@@ -28,17 +28,15 @@
 		struct spci_value ret = spci_msg_wait();
 		spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
 		spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
 
 		ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
-		memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
-			 recv_buf->payload, spci_msg_send_size(ret));
-		spci_message_init(SERVICE_SEND_BUFFER(),
-				  spci_msg_send_size(ret), source_vm_id,
-				  target_vm_id);
+		memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+			 spci_msg_send_size(ret));
 
 		hf_mailbox_clear();
-		spci_msg_send(0);
+		spci_msg_send(target_vm_id, source_vm_id,
+			      spci_msg_send_size(ret), 0);
 	}
 }
diff --git a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
index ffd1e12..a4954d9 100644
--- a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
+++ b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
@@ -53,18 +53,19 @@
 
 	/* Loop, echo messages back to the sender. */
 	for (;;) {
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
 		struct spci_value ret = spci_msg_wait();
 		spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
 		spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
 
-		memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
-			 recv_buf->payload, spci_msg_send_size(ret));
-		spci_message_init(send_buf, spci_msg_send_size(ret),
-				  source_vm_id, target_vm_id);
+		memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+			 spci_msg_send_size(ret));
 
-		while (spci_msg_send(SPCI_MSG_SEND_NOTIFY) != SPCI_SUCCESS) {
+		while (spci_msg_send(target_vm_id, source_vm_id,
+				     spci_msg_send_size(ret),
+				     SPCI_MSG_SEND_NOTIFY)
+			       .func != SPCI_SUCCESS_32) {
 			wait_for_vm(source_vm_id);
 		}
 
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible.c b/test/vmapi/primary_with_secondaries/services/interruptible.c
index 94ac007..04ea783 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible.c
@@ -39,11 +39,8 @@
 	dlog("secondary IRQ %d from current\n", interrupt_id);
 	buffer[8] = '0' + interrupt_id / 10;
 	buffer[9] = '0' + interrupt_id % 10;
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, buffer,
-		 size);
-	spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-	spci_msg_send(0);
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+	spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
 	dlog("secondary IRQ %d ended\n", interrupt_id);
 }
 
@@ -66,7 +63,7 @@
 TEST_SERVICE(interruptible)
 {
 	spci_vm_id_t this_vm_id = hf_vm_get_id();
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 
 	exception_setup(irq);
 	hf_interrupt_enable(SELF_INTERRUPT_ID, true);
@@ -83,13 +80,12 @@
 		ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
 		if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
 		    spci_msg_send_size(ret) == sizeof(ping_message) &&
-		    memcmp(recv_buf->payload, ping_message,
-			   sizeof(ping_message)) == 0) {
+		    memcmp(recv_buf, ping_message, sizeof(ping_message)) == 0) {
 			/* Interrupt ourselves */
 			hf_interrupt_inject(this_vm_id, 0, SELF_INTERRUPT_ID);
 		} else if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
 			   spci_msg_send_size(ret) == sizeof(enable_message) &&
-			   memcmp(recv_buf->payload, enable_message,
+			   memcmp(recv_buf, enable_message,
 				  sizeof(enable_message)) == 0) {
 			/* Enable interrupt ID C. */
 			hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_C, true);
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
index 560c47e..814cce1 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
@@ -39,8 +39,8 @@
 
 	for (;;) {
 		struct spci_value res = spci_msg_wait();
-		struct spci_message *message = SERVICE_SEND_BUFFER();
-		struct spci_message *recv_message = SERVICE_RECV_BUFFER();
+		void *message = SERVICE_SEND_BUFFER();
+		void *recv_message = SERVICE_RECV_BUFFER();
 
 		/* Retry if interrupted but made visible with the yield. */
 		while (res.func == SPCI_ERROR_32 &&
@@ -50,12 +50,11 @@
 		}
 
 		ASSERT_EQ(res.func, SPCI_MSG_SEND_32);
-		memcpy_s(message->payload, SPCI_MSG_PAYLOAD_MAX,
-			 recv_message->payload, spci_msg_send_size(res));
-		spci_message_init(message, spci_msg_send_size(res),
-				  HF_PRIMARY_VM_ID, SERVICE_VM0);
+		memcpy_s(message, SPCI_MSG_PAYLOAD_MAX, recv_message,
+			 spci_msg_send_size(res));
 
 		hf_mailbox_clear();
-		spci_msg_send(0);
+		spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID,
+			      spci_msg_send_size(res), 0);
 	}
 }
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index bd12409..cf15554 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -21,6 +21,7 @@
 
 #include "hftest.h"
 #include "primary_with_secondary.h"
+#include "util.h"
 
 alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
 
@@ -35,10 +36,8 @@
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
 
 		/* Check the memory was cleared. */
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		ptr = *(uint8_t **)recv_buf->payload;
-		spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr),
-				  spci_msg_send_sender(ret), hf_vm_get_id());
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		ptr = *(uint8_t **)recv_buf;
 
 		for (int i = 0; i < PAGE_SIZE; ++i) {
 			ASSERT_EQ(ptr[i], 0);
@@ -54,7 +53,8 @@
 
 		/* Signal completion and reset. */
 		hf_mailbox_clear();
-		spci_msg_send(0);
+		spci_msg_send(hf_vm_get_id(), spci_msg_send_sender(ret),
+			      sizeof(ptr), 0);
 	}
 }
 
@@ -64,11 +64,14 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
+		uint32_t msg_size;
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			(struct spci_memory_region *)(spci_get_lend_descriptor(
 							      recv_buf)
@@ -85,10 +88,11 @@
 
 		hf_mailbox_clear();
 		/* Give the memory back and notify the sender. */
-		spci_memory_relinquish(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		spci_msg_send(0);
+		msg_size = spci_memory_relinquish_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0);
+		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -108,10 +112,8 @@
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
 
 		/* Check the memory was cleared. */
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		ptr = *(uint8_t **)recv_buf->payload;
-		spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr),
-				  spci_msg_send_sender(ret), hf_vm_get_id());
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		ptr = *(uint8_t **)recv_buf;
 
 		for (int i = 0; i < PAGE_SIZE; ++i) {
 			ASSERT_EQ(ptr[i], 0);
@@ -123,7 +125,8 @@
 					  HF_MEMORY_GIVE),
 			  0);
 		hf_mailbox_clear();
-		spci_msg_send(0);
+		spci_msg_send(hf_vm_get_id(), spci_msg_send_sender(ret),
+			      sizeof(ptr), 0);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -147,11 +150,12 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ptr,
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
 		 sizeof(ptr));
-	spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr), HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-	EXPECT_EQ(spci_msg_send(0), 0);
+	EXPECT_EQ(
+		spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	/* Try using the memory that isn't valid unless it's been returned. */
 	page[16] = 123;
@@ -171,11 +175,12 @@
 	 *       API is still to be agreed on so the address is passed
 	 *       explicitly to test the mechanism.
 	 */
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ptr,
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
 		 sizeof(ptr));
-	spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr), HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
-	EXPECT_EQ(spci_msg_send(0), 0);
+	EXPECT_EQ(
+		spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	/* Try using the memory that isn't valid unless it's been returned. */
 	page[633] = 180;
@@ -187,12 +192,15 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		uint32_t msg_size;
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			spci_get_donated_memory_region(recv_buf);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		hf_mailbox_clear();
 
 		ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -203,10 +211,11 @@
 		}
 
 		/* Give the memory back and notify the sender. */
-		spci_memory_donate(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		spci_msg_send(0);
+		msg_size = spci_memory_donate_init(send_buf,
+						   memory_region->constituents,
+						   memory_region->count, 0);
+		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -220,11 +229,12 @@
 {
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -237,11 +247,12 @@
 {
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -257,21 +268,25 @@
 {
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	uint32_t msg_size;
+	void *recv_buf = SERVICE_RECV_BUFFER();
+	void *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
 
 	/* Donate memory to next VM. */
-	spci_memory_donate(send_buf, SERVICE_VM1, spci_msg_send_receiver(ret),
-			   memory_region->constituents, memory_region->count,
-			   0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM1,
+				msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Ensure that we are unable to modify memory any more. */
 	ptr[0] = 'c';
@@ -284,29 +299,37 @@
  */
 TEST_SERVICE(spci_donate_twice)
 {
+	uint32_t msg_size;
 	struct spci_value ret = spci_msg_wait();
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
+	void *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
 	struct spci_memory_region_constituent constituent =
 		memory_region->constituents[0];
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	/* Yield to allow attempt to re donate from primary. */
 	spci_yield();
 
 	/* Give the memory back and notify the sender. */
-	spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, SERVICE_VM0,
-			   &constituent, memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(send_buf, &constituent,
+					   memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Attempt to donate the memory to another VM. */
-	spci_memory_donate(send_buf, SERVICE_VM1, spci_msg_send_receiver(ret),
-			   &constituent, memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_donate_init(send_buf, &constituent,
+					   memory_region->count, 0);
+	EXPECT_SPCI_ERROR(
+		spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM1,
+			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+		SPCI_INVALID_PARAMETERS);
 
 	spci_yield();
 }
@@ -320,11 +343,13 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
 		struct spci_memory_region *memory_region =
 			spci_get_donated_memory_region(recv_buf);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		hf_mailbox_clear();
 
 		ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -342,26 +367,31 @@
  */
 TEST_SERVICE(spci_donate_invalid_source)
 {
+	uint32_t msg_size;
 	struct spci_value ret = spci_msg_wait();
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
+	void *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	/* Give the memory back and notify the sender. */
-	spci_memory_donate(
-		send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-		memory_region->constituents, memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_donate_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+				msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Fail to donate the memory from the primary to VM1. */
-	spci_memory_donate(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
-			   memory_region->constituents, memory_region->count,
-			   0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_donate_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 	spci_yield();
 }
 
@@ -371,15 +401,18 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
+		uint32_t msg_size;
 
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			(struct spci_memory_region *)(spci_get_lend_descriptor(
 							      recv_buf)
 							      ->payload);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		ptr = (uint8_t *)memory_region->constituents[0].address;
 		/* Relevant information read, mailbox can be cleared. */
 		hf_mailbox_clear();
@@ -391,10 +424,11 @@
 
 		hf_mailbox_clear();
 		/* Give the memory back and notify the sender. */
-		spci_memory_relinquish(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		spci_msg_send(0);
+		msg_size = spci_memory_relinquish_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0);
+		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -412,13 +446,16 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
+		uint32_t msg_size;
 
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			spci_get_donated_memory_region(recv_buf);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		hf_mailbox_clear();
 
 		ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -428,10 +465,13 @@
 			ptr[i]++;
 		}
 		/* Give the memory back and notify the sender. */
-		spci_memory_relinquish(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_relinquish_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0);
+		EXPECT_SPCI_ERROR(spci_msg_send(spci_msg_send_receiver(ret),
+						HF_PRIMARY_VM_ID, msg_size,
+						SPCI_MSG_SEND_LEGACY_MEMORY),
+				  SPCI_INVALID_PARAMETERS);
 
 		/* Ensure we still have access to the memory. */
 		ptr[0] = 123;
@@ -445,35 +485,43 @@
  */
 TEST_SERVICE(spci_lend_invalid_source)
 {
+	uint32_t msg_size;
 	struct spci_value ret = spci_msg_wait();
 
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
+	void *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
 						      ->payload);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	/* Attempt to relinquish from primary VM. */
-	spci_memory_relinquish(send_buf, spci_msg_send_receiver(ret),
-			       HF_PRIMARY_VM_ID, memory_region->constituents,
-			       memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_relinquish_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_SPCI_ERROR(
+		spci_msg_send(HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
+			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+		SPCI_INVALID_PARAMETERS);
 
 	/* Give the memory back and notify the sender. */
-	spci_memory_relinquish(
-		send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-		memory_region->constituents, memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_relinquish_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+				msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Ensure we cannot lend from the primary to another secondary. */
-	spci_memory_lend(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
-			 memory_region->constituents, memory_region->count, 0,
-			 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	msg_size = spci_memory_lend_init(
+		send_buf, memory_region->constituents, memory_region->count, 0,
+		SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+		SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY),
+			  SPCI_INVALID_PARAMETERS);
 	spci_yield();
 }
 
@@ -485,15 +533,18 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint64_t *ptr;
+		uint32_t msg_size;
 
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			(struct spci_memory_region *)(spci_get_lend_descriptor(
 							      recv_buf)
 							      ->payload);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		hf_mailbox_clear();
 
 		ptr = (uint64_t *)memory_region->constituents[0].address;
@@ -506,10 +557,14 @@
 		__asm__ volatile("blr %0" ::"r"(ptr));
 
 		/* Release the memory again. */
-		spci_memory_relinquish(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+		msg_size = spci_memory_relinquish_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0);
+		EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+					HF_PRIMARY_VM_ID, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY)
+				  .func,
+			  SPCI_SUCCESS_32);
 	}
 }
 
@@ -521,15 +576,18 @@
 	for (;;) {
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
+		uint32_t msg_size;
 
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		struct spci_memory_region *memory_region =
 			(struct spci_memory_region *)(spci_get_lend_descriptor(
 							      recv_buf)
 							      ->payload);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		hf_mailbox_clear();
 
 		ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -547,10 +605,14 @@
 			ptr[i]++;
 		}
 
-		spci_memory_relinquish(
-			send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-			memory_region->constituents, memory_region->count, 0);
-		EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+		msg_size = spci_memory_relinquish_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0);
+		EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+					HF_PRIMARY_VM_ID, msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY)
+				  .func,
+			  SPCI_SUCCESS_32);
 	}
 }
 
@@ -562,12 +624,13 @@
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
 
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	struct spci_memory_region *memory_region =
 		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
 						      ->payload);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -584,12 +647,13 @@
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
 
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	struct spci_memory_region *memory_region =
 		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
 						      ->payload);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -602,14 +666,16 @@
 {
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
+	uint32_t msg_size;
 
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
+	void *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
 						      ->payload);
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
 	hf_mailbox_clear();
 
 	ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -631,16 +697,21 @@
 		memory_region->constituents[0].address = (uint64_t)ptr + i;
 
 		/* Fail to lend the memory back to the primary. */
-		spci_memory_lend(
-			send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
-			memory_region->constituents, memory_region->count, 0,
-			SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
-			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
-		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		msg_size = spci_memory_lend_init(
+			send_buf, memory_region->constituents,
+			memory_region->count, 0, SPCI_LEND_RW_X,
+			SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+			SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_SPCI_ERROR(
+			spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				      SPCI_MSG_SEND_LEGACY_MEMORY),
+			SPCI_INVALID_PARAMETERS);
 	}
 
-	spci_memory_relinquish(
-		send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
-		memory_region->constituents, memory_region->count, 0);
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	msg_size = spci_memory_relinquish_init(
+		send_buf, memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+				msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 }
diff --git a/test/vmapi/primary_with_secondaries/services/receive_block.c b/test/vmapi/primary_with_secondaries/services/receive_block.c
index 666754f..c8c4de2 100644
--- a/test/vmapi/primary_with_secondaries/services/receive_block.c
+++ b/test/vmapi/primary_with_secondaries/services/receive_block.c
@@ -24,6 +24,7 @@
 
 #include "hftest.h"
 #include "primary_with_secondary.h"
+#include "util.h"
 
 /*
  * Secondary VM that enables an interrupt, disables interrupts globally, and
@@ -47,14 +48,11 @@
 
 	for (i = 0; i < 10; ++i) {
 		struct spci_value res = spci_msg_wait();
-		EXPECT_EQ(res.func, SPCI_ERROR_32);
-		EXPECT_EQ(res.arg1, SPCI_INTERRUPTED);
+		EXPECT_SPCI_ERROR(res, SPCI_INTERRUPTED);
 	}
 
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
 		 sizeof(message));
-	spci_message_init(SERVICE_SEND_BUFFER(), sizeof(message),
-			  HF_PRIMARY_VM_ID, hf_vm_get_id());
 
-	spci_msg_send(0);
+	spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
 }
diff --git a/test/vmapi/primary_with_secondaries/services/relay.c b/test/vmapi/primary_with_secondaries/services/relay.c
index 1003699..61e26b9 100644
--- a/test/vmapi/primary_with_secondaries/services/relay.c
+++ b/test/vmapi/primary_with_secondaries/services/relay.c
@@ -40,23 +40,21 @@
 		ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
 
 		/* Prepare to relay the message. */
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		void *send_buf = SERVICE_SEND_BUFFER();
 		ASSERT_GE(spci_msg_send_size(ret), sizeof(spci_vm_id_t));
 
-		chain = (spci_vm_id_t *)recv_buf->payload;
+		chain = (spci_vm_id_t *)recv_buf;
 		next_vm_id = le16toh(*chain);
 		next_message = chain + 1;
 		next_message_size =
 			spci_msg_send_size(ret) - sizeof(spci_vm_id_t);
 
 		/* Send the message to the next stage. */
-		memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX, next_message,
+		memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, next_message,
 			 next_message_size);
-		spci_message_init(send_buf, next_message_size, next_vm_id,
-				  hf_vm_get_id());
 
 		hf_mailbox_clear();
-		spci_msg_send(0);
+		spci_msg_send(hf_vm_get_id(), next_vm_id, next_message_size, 0);
 	}
 }
diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c
index 5ad31d0..cd8fe9e 100644
--- a/test/vmapi/primary_with_secondaries/services/smp.c
+++ b/test/vmapi/primary_with_secondaries/services/smp.c
@@ -41,12 +41,10 @@
 /** Send a message back to the primary. */
 void send_message(const char *message, uint32_t size)
 {
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 size);
-	spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
-			  hf_vm_get_id());
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message, size);
 
-	ASSERT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	ASSERT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0).func,
+		  SPCI_SUCCESS_32);
 }
 
 /**
diff --git a/test/vmapi/primary_with_secondaries/services/spci_check.c b/test/vmapi/primary_with_secondaries/services/spci_check.c
index 3ce9a1f..0cb1cc0 100644
--- a/test/vmapi/primary_with_secondaries/services/spci_check.c
+++ b/test/vmapi/primary_with_secondaries/services/spci_check.c
@@ -21,24 +21,12 @@
 
 #include "hftest.h"
 #include "primary_with_secondary.h"
+#include "util.h"
 
 TEST_SERVICE(spci_check)
 {
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	const char message[] = "spci_msg_send";
-	struct spci_message expected_message = {
-		.flags = SPCI_MESSAGE_IMPDEF_MASK,
-		.length = sizeof(message),
-		.target_vm_id = hf_vm_get_id(),
-		.source_vm_id = HF_PRIMARY_VM_ID,
-
-		/*
-		 * TODO: Padding fields may be set to MBZ in the next SPCI spec
-		 * versions.
-		 */
-		.reserved_1 = 0,
-		.reserved_2 = 0,
-	};
 
 	/* Wait for single message to be sent by the primary VM. */
 	struct spci_value ret = spci_msg_wait();
@@ -49,18 +37,16 @@
 	EXPECT_EQ(spci_msg_send_size(ret), sizeof(message));
 	EXPECT_EQ(spci_msg_send_receiver(ret), hf_vm_get_id());
 	EXPECT_EQ(spci_msg_send_sender(ret), HF_PRIMARY_VM_ID);
-	EXPECT_EQ(memcmp(recv_buf, &expected_message, sizeof(expected_message)),
-		  0);
 
 	/* Ensure that the payload was correctly transmitted. */
-	EXPECT_EQ(memcmp(recv_buf->payload, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(recv_buf, message, sizeof(message)), 0);
 
 	spci_yield();
 }
 
 TEST_SERVICE(spci_length)
 {
-	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	void *recv_buf = SERVICE_RECV_BUFFER();
 	const char message[] = "this should be truncated";
 
 	/* Wait for single message to be sent by the primary VM. */
@@ -72,9 +58,8 @@
 	EXPECT_EQ(16, spci_msg_send_size(ret));
 
 	/* Check only part of the message is sent correctly. */
-	EXPECT_NE(memcmp(recv_buf->payload, message, sizeof(message)), 0);
-	EXPECT_EQ(memcmp(recv_buf->payload, message, spci_msg_send_size(ret)),
-		  0);
+	EXPECT_NE(memcmp(recv_buf, message, sizeof(message)), 0);
+	EXPECT_EQ(memcmp(recv_buf, message, spci_msg_send_size(ret)), 0);
 
 	spci_yield();
 }
@@ -84,8 +69,7 @@
 	/* Wait for single message to be sent by the primary VM. */
 	struct spci_value ret = spci_msg_poll();
 
-	EXPECT_EQ(ret.func, SPCI_ERROR_32);
-	EXPECT_EQ(ret.arg1, SPCI_RETRY);
+	EXPECT_SPCI_ERROR(ret, SPCI_RETRY);
 
 	spci_yield();
 }
diff --git a/test/vmapi/primary_with_secondaries/services/wfi.c b/test/vmapi/primary_with_secondaries/services/wfi.c
index 6935107..7dbd372 100644
--- a/test/vmapi/primary_with_secondaries/services/wfi.c
+++ b/test/vmapi/primary_with_secondaries/services/wfi.c
@@ -48,10 +48,8 @@
 		interrupt_wait();
 	}
 
-	memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
+	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
 		 sizeof(message));
-	spci_message_init(SERVICE_SEND_BUFFER(), sizeof(message),
-			  HF_PRIMARY_VM_ID, hf_vm_get_id());
 
-	spci_msg_send(0);
+	spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
 }
diff --git a/test/vmapi/primary_with_secondaries/smp.c b/test/vmapi/primary_with_secondaries/smp.c
index de91d78..af5c637 100644
--- a/test/vmapi/primary_with_secondaries/smp.c
+++ b/test/vmapi/primary_with_secondaries/smp.c
@@ -48,7 +48,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM2, 1);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response_1));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response_1,
+	EXPECT_EQ(memcmp(mb.recv, expected_response_1,
 			 sizeof(expected_response_1)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -58,7 +58,7 @@
 	run_res = hf_vcpu_run(SERVICE_VM2, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response_0));
-	EXPECT_EQ(memcmp(mb.recv->payload, expected_response_0,
+	EXPECT_EQ(memcmp(mb.recv, expected_response_0,
 			 sizeof(expected_response_0)),
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
diff --git a/test/vmapi/primary_with_secondaries/spci.c b/test/vmapi/primary_with_secondaries/spci.c
index ac54da7..f551830 100644
--- a/test/vmapi/primary_with_secondaries/spci.c
+++ b/test/vmapi/primary_with_secondaries/spci.c
@@ -39,11 +39,11 @@
 	SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
 
 	/* Set the payload, init the message header and send the message. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
-			  HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), 0);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_EQ(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+			.func,
+		SPCI_SUCCESS_32);
 
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
@@ -60,10 +60,10 @@
 	SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
 
 	/* Set the payload, init the message header and send the message. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), SERVICE_VM0, SERVICE_VM1);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_SPCI_ERROR(
+		spci_msg_send(SERVICE_VM1, SERVICE_VM0, sizeof(message), 0),
+		SPCI_INVALID_PARAMETERS);
 }
 
 /**
@@ -76,10 +76,10 @@
 
 	SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
 	/* Set the payload, init the message header and send the message. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
-	spci_message_init(mb.send, sizeof(message), -1, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+	EXPECT_SPCI_ERROR(
+		spci_msg_send(HF_PRIMARY_VM_ID, -1, sizeof(message), 0),
+		SPCI_INVALID_PARAMETERS);
 }
 
 /**
@@ -94,12 +94,10 @@
 	SERVICE_SELECT(SERVICE_VM0, "spci_length", mb.send);
 
 	/* Send the message and compare if truncated. */
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
 	/* Hard code incorrect length. */
-	spci_message_init(mb.send, 16, SERVICE_VM0, HF_PRIMARY_VM_ID);
-
-	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 16, 0).func,
+		  SPCI_SUCCESS_32);
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
 }
@@ -112,11 +110,11 @@
 	const char message[] = "fail to send";
 	struct mailbox_buffers mb = set_up_mailbox();
 
-	memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
-		 sizeof(message));
+	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
 	/* Send a message that is larger than the mailbox supports (4KB). */
-	spci_message_init(mb.send, 4 * 1024, SERVICE_VM0, HF_PRIMARY_VM_ID);
-	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	EXPECT_SPCI_ERROR(
+		spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 4 * 1024 + 1, 0),
+		SPCI_INVALID_PARAMETERS);
 }
 
 /**
diff --git a/test/vmapi/primary_with_secondaries/util.c b/test/vmapi/primary_with_secondaries/util.c
index 082daf6..419ec6c 100644
--- a/test/vmapi/primary_with_secondaries/util.c
+++ b/test/vmapi/primary_with_secondaries/util.c
@@ -36,7 +36,7 @@
 {
 	ASSERT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
 	return (struct mailbox_buffers){
-		.send = ((struct spci_message *)send_page),
-		.recv = ((struct spci_message *)recv_page),
+		.send = send_page,
+		.recv = recv_page,
 	};
 }