SPCI is now called PSA FF-A.

Change-Id: Iaa10e0449edf5f6493ab21e648219392b17cc5ec
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 66c9211..8ec91a0 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -55,8 +55,8 @@
   sources = [
     "api.c",
     "cpu.c",
+    "ffa_memory.c",
     "manifest.c",
-    "spci_memory.c",
     "vcpu.c",
   ]
 
diff --git a/src/api.c b/src/api.c
index be47717..11d0fc2 100644
--- a/src/api.c
+++ b/src/api.c
@@ -22,17 +22,17 @@
 
 #include "hf/check.h"
 #include "hf/dlog.h"
+#include "hf/ffa_internal.h"
+#include "hf/ffa_memory.h"
 #include "hf/mm.h"
 #include "hf/plat/console.h"
-#include "hf/spci_internal.h"
-#include "hf/spci_memory.h"
 #include "hf/spinlock.h"
 #include "hf/static_assert.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/call.h"
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 /*
  * To eliminate the risk of deadlocks, we define a partial order for the
@@ -69,10 +69,10 @@
  * Switches the physical CPU back to the corresponding vCPU of the primary VM.
  *
  * This triggers the scheduling logic to run. Run in the context of secondary VM
- * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
+ * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
  */
 static struct vcpu *api_switch_to_primary(struct vcpu *current,
-					  struct spci_value primary_ret,
+					  struct ffa_value primary_ret,
 					  enum vcpu_state secondary_state)
 {
 	struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
@@ -83,8 +83,8 @@
 	 * timer fires rather than indefinitely.
 	 */
 	switch (primary_ret.func) {
-	case HF_SPCI_RUN_WAIT_FOR_INTERRUPT:
-	case SPCI_MSG_WAIT_32: {
+	case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
+	case FFA_MSG_WAIT_32: {
 		if (arch_timer_enabled_current()) {
 			uint64_t remaining_ns =
 				arch_timer_remaining_ns_current();
@@ -94,7 +94,7 @@
 				 * Timer is pending, so the current vCPU should
 				 * be run again right away.
 				 */
-				primary_ret.func = SPCI_INTERRUPT_32;
+				primary_ret.func = FFA_INTERRUPT_32;
 				/*
 				 * primary_ret.arg1 should already be set to the
 				 * current VM ID and vCPU ID.
@@ -104,7 +104,7 @@
 				primary_ret.arg2 = remaining_ns;
 			}
 		} else {
-			primary_ret.arg2 = SPCI_SLEEP_INDEFINITE;
+			primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
 		}
 		break;
 	}
@@ -130,9 +130,9 @@
  */
 struct vcpu *api_preempt(struct vcpu *current)
 {
-	struct spci_value ret = {
-		.func = SPCI_INTERRUPT_32,
-		.arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+	struct ffa_value ret = {
+		.func = FFA_INTERRUPT_32,
+		.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
 	};
 
 	return api_switch_to_primary(current, ret, VCPU_STATE_READY);
@@ -144,9 +144,9 @@
  */
 struct vcpu *api_wait_for_interrupt(struct vcpu *current)
 {
-	struct spci_value ret = {
-		.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
-		.arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+	struct ffa_value ret = {
+		.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
+		.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
 	};
 
 	return api_switch_to_primary(current, ret,
@@ -158,9 +158,9 @@
  */
 struct vcpu *api_vcpu_off(struct vcpu *current)
 {
-	struct spci_value ret = {
-		.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
-		.arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+	struct ffa_value ret = {
+		.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
+		.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
 	};
 
 	/*
@@ -179,9 +179,9 @@
  */
 void api_yield(struct vcpu *current, struct vcpu **next)
 {
-	struct spci_value primary_ret = {
-		.func = SPCI_YIELD_32,
-		.arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+	struct ffa_value primary_ret = {
+		.func = FFA_YIELD_32,
+		.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
 	};
 
 	if (current->vm->id == HF_PRIMARY_VM_ID) {
@@ -198,10 +198,10 @@
  */
 struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
 {
-	struct spci_value ret = {
-		.func = HF_SPCI_RUN_WAKE_UP,
-		.arg1 = spci_vm_vcpu(target_vcpu->vm->id,
-				     vcpu_index(target_vcpu)),
+	struct ffa_value ret = {
+		.func = HF_FFA_RUN_WAKE_UP,
+		.arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
+				    vcpu_index(target_vcpu)),
 	};
 	return api_switch_to_primary(current, ret, VCPU_STATE_READY);
 }
@@ -211,7 +211,7 @@
  */
 struct vcpu *api_abort(struct vcpu *current)
 {
-	struct spci_value ret = spci_error(SPCI_ABORTED);
+	struct ffa_value ret = ffa_error(FFA_ABORTED);
 
 	dlog_notice("Aborting VM %u vCPU %u\n", current->vm->id,
 		    vcpu_index(current));
@@ -234,16 +234,16 @@
 /**
  * Returns the ID of the VM.
  */
-struct spci_value api_spci_id_get(const struct vcpu *current)
+struct ffa_value api_ffa_id_get(const struct vcpu *current)
 {
-	return (struct spci_value){.func = SPCI_SUCCESS_32,
-				   .arg2 = current->vm->id};
+	return (struct ffa_value){.func = FFA_SUCCESS_32,
+				  .arg2 = current->vm->id};
 }
 
 /**
  * Returns the number of VMs configured to run.
  */
-spci_vm_count_t api_vm_get_count(void)
+ffa_vm_count_t api_vm_get_count(void)
 {
 	return vm_get_count();
 }
@@ -252,8 +252,8 @@
  * Returns the number of vCPUs configured in the given VM, or 0 if there is no
  * such VM or the caller is not the primary VM.
  */
-spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
-				     const struct vcpu *current)
+ffa_vcpu_count_t api_vcpu_get_count(ffa_vm_id_t vm_id,
+				    const struct vcpu *current)
 {
 	struct vm *vm;
 
@@ -370,29 +370,29 @@
 }
 
 /**
- * Constructs an SPCI_MSG_SEND value to return from a successful SPCI_MSG_POLL
- * or SPCI_MSG_WAIT call.
+ * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
+ * or FFA_MSG_WAIT call.
  */
-static struct spci_value spci_msg_recv_return(const struct vm *receiver)
+static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
 {
 	switch (receiver->mailbox.recv_func) {
-	case SPCI_MSG_SEND_32:
-		return (struct spci_value){
-			.func = SPCI_MSG_SEND_32,
+	case FFA_MSG_SEND_32:
+		return (struct ffa_value){
+			.func = FFA_MSG_SEND_32,
 			.arg1 = (receiver->mailbox.recv_sender << 16) |
 				receiver->id,
 			.arg3 = receiver->mailbox.recv_size};
-	case SPCI_MEM_DONATE_32:
-	case SPCI_MEM_LEND_32:
-	case SPCI_MEM_SHARE_32:
-		return (struct spci_value){.func = receiver->mailbox.recv_func,
-					   .arg1 = receiver->mailbox.recv_size,
-					   .arg2 = receiver->mailbox.recv_size};
+	case FFA_MEM_DONATE_32:
+	case FFA_MEM_LEND_32:
+	case FFA_MEM_SHARE_32:
+		return (struct ffa_value){.func = receiver->mailbox.recv_func,
+					  .arg1 = receiver->mailbox.recv_size,
+					  .arg2 = receiver->mailbox.recv_size};
 	default:
 		/* This should never be reached, but return an error in case. */
 		dlog_error("Tried to return an invalid message function %#x\n",
 			   receiver->mailbox.recv_func);
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 }
 
@@ -401,7 +401,7 @@
  * value needs to be forced onto the vCPU.
  */
 static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
-				 struct spci_value *run_ret)
+				 struct ffa_value *run_ret)
 {
 	bool need_vm_lock;
 	bool ret;
@@ -443,7 +443,7 @@
 		 * other physical CPU that is currently running this vCPU will
 		 * return the sleep duration if needed.
 		 */
-		*run_ret = spci_error(SPCI_BUSY);
+		*run_ret = ffa_error(FFA_BUSY);
 		ret = false;
 		goto out;
 	}
@@ -472,7 +472,7 @@
 		 */
 		if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
 			arch_regs_set_retval(&vcpu->regs,
-					     spci_msg_recv_return(vcpu->vm));
+					     ffa_msg_recv_return(vcpu->vm));
 			vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
 			break;
 		}
@@ -501,10 +501,10 @@
 			 */
 			run_ret->func =
 				vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
-					? SPCI_MSG_WAIT_32
-					: HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+					? FFA_MSG_WAIT_32
+					: HF_FFA_RUN_WAIT_FOR_INTERRUPT;
 			run_ret->arg1 =
-				spci_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
+				ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
 			run_ret->arg2 = timer_remaining_ns;
 		}
 
@@ -537,16 +537,16 @@
 	return ret;
 }
 
-struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
-			       const struct vcpu *current, struct vcpu **next)
+struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+			     const struct vcpu *current, struct vcpu **next)
 {
 	struct vm *vm;
 	struct vcpu *vcpu;
-	struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
+	struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
 
 	/* Only the primary VM can switch vCPUs. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
-		ret.arg2 = SPCI_DENIED;
+		ret.arg2 = FFA_DENIED;
 		goto out;
 	}
 
@@ -600,8 +600,8 @@
 	 * Set a placeholder return code to the scheduler. This will be
 	 * overwritten when the switch back to the primary occurs.
 	 */
-	ret.func = SPCI_INTERRUPT_32;
-	ret.arg1 = spci_vm_vcpu(vm_id, vcpu_idx);
+	ret.func = FFA_INTERRUPT_32;
+	ret.arg1 = ffa_vm_vcpu(vm_id, vcpu_idx);
 	ret.arg2 = 0;
 
 out:
@@ -618,24 +618,24 @@
 }
 
 /**
- * Determines the value to be returned by api_vm_configure and spci_rx_release
+ * Determines the value to be returned by api_vm_configure and ffa_rx_release
  * after they've succeeded. If a secondary VM is running and there are waiters,
  * it also switches back to the primary VM for it to wake waiters up.
  */
-static struct spci_value api_waiter_result(struct vm_locked locked_vm,
-					   struct vcpu *current,
-					   struct vcpu **next)
+static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
+					  struct vcpu *current,
+					  struct vcpu **next)
 {
 	struct vm *vm = locked_vm.vm;
 
 	if (list_empty(&vm->mailbox.waiter_list)) {
 		/* No waiters, nothing else to do. */
-		return (struct spci_value){.func = SPCI_SUCCESS_32};
+		return (struct ffa_value){.func = FFA_SUCCESS_32};
 	}
 
 	if (vm->id == HF_PRIMARY_VM_ID) {
 		/* The caller is the primary VM. Tell it to wake up waiters. */
-		return (struct spci_value){.func = SPCI_RX_RELEASE_32};
+		return (struct ffa_value){.func = FFA_RX_RELEASE_32};
 	}
 
 	/*
@@ -643,10 +643,10 @@
 	 * that need to be notified.
 	 */
 	*next = api_switch_to_primary(
-		current, (struct spci_value){.func = SPCI_RX_RELEASE_32},
+		current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
 		VCPU_STATE_READY);
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 /**
@@ -783,19 +783,19 @@
  * must not be shared.
  *
  * Returns:
- *  - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
+ *  - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
  *    aligned or are the same.
- *  - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
+ *  - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
  *    due to insuffient page table memory.
- *  - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
+ *  - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
  *    the caller.
- *  - SPCI_SUCCESS on success if no further action is needed.
- *  - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ *  - FFA_SUCCESS on success if no further action is needed.
+ *  - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
  *    needs to wake up or kick waiters.
  */
-struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
-				    uint32_t page_count, struct vcpu *current,
-				    struct vcpu **next)
+struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
+				  uint32_t page_count, struct vcpu *current,
+				  struct vcpu **next)
 {
 	struct vm *vm = current->vm;
 	struct vm_locked vm_locked;
@@ -805,17 +805,17 @@
 	paddr_t pa_recv_end;
 	uint32_t orig_send_mode;
 	uint32_t orig_recv_mode;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	/* Hafnium only supports a fixed size of RX/TX buffers. */
-	if (page_count != HF_MAILBOX_SIZE / SPCI_PAGE_SIZE) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+	if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Fail if addresses are not page-aligned. */
 	if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
 	    !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Convert to physical addresses. */
@@ -827,7 +827,7 @@
 
 	/* Fail if the same page is used for the send and receive pages. */
 	if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -842,7 +842,7 @@
 
 	/* We only allow these to be setup once. */
 	if (vm->mailbox.send || vm->mailbox.recv) {
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto exit;
 	}
 
@@ -855,7 +855,7 @@
 	    !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
 	    (orig_send_mode & MM_MODE_R) == 0 ||
 	    (orig_send_mode & MM_MODE_W) == 0) {
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto exit;
 	}
 
@@ -863,14 +863,14 @@
 			    &orig_recv_mode) ||
 	    !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
 	    (orig_recv_mode & MM_MODE_R) == 0) {
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto exit;
 	}
 
 	if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
 				    orig_send_mode, pa_recv_begin, pa_recv_end,
 				    orig_recv_mode)) {
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto exit;
 	}
 
@@ -916,12 +916,12 @@
  * Notifies the `to` VM about the message currently in its mailbox, possibly
  * with the help of the primary VM.
  */
-static struct spci_value deliver_msg(struct vm_locked to, spci_vm_id_t from_id,
-				     struct vcpu *current, struct vcpu **next)
+static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
+				    struct vcpu *current, struct vcpu **next)
 {
-	struct spci_value ret = (struct spci_value){.func = SPCI_SUCCESS_32};
-	struct spci_value primary_ret = {
-		.func = SPCI_MSG_SEND_32,
+	struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+	struct ffa_value primary_ret = {
+		.func = FFA_MSG_SEND_32,
 		.arg1 = ((uint32_t)from_id << 16) | to.vm->id,
 	};
 
@@ -932,7 +932,7 @@
 		 * message is for it, to avoid leaking data about messages for
 		 * other VMs.
 		 */
-		primary_ret = spci_msg_recv_return(to.vm);
+		primary_ret = ffa_msg_recv_return(to.vm);
 
 		to.vm->mailbox.state = MAILBOX_STATE_READ;
 		*next = api_switch_to_primary(current, primary_ret,
@@ -944,7 +944,7 @@
 
 	/* Messages for the TEE are sent on via the dispatcher. */
 	if (to.vm->id == HF_TEE_VM_ID) {
-		struct spci_value call = spci_msg_recv_return(to.vm);
+		struct ffa_value call = ffa_msg_recv_return(to.vm);
 
 		ret = arch_tee_call(call);
 		/*
@@ -954,7 +954,7 @@
 		to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
 		/*
 		 * Don't return to the primary VM in this case, as the TEE is
-		 * not (yet) scheduled via SPCI.
+		 * not (yet) scheduled via FF-A.
 		 */
 		return ret;
 	}
@@ -975,38 +975,38 @@
  * If the recipient's receive buffer is busy, it can optionally register the
  * caller to be notified when the recipient's receive buffer becomes available.
  */
-struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
-				    spci_vm_id_t receiver_vm_id, uint32_t size,
-				    uint32_t attributes, struct vcpu *current,
-				    struct vcpu **next)
+struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
+				  ffa_vm_id_t receiver_vm_id, uint32_t size,
+				  uint32_t attributes, struct vcpu *current,
+				  struct vcpu **next)
 {
 	struct vm *from = current->vm;
 	struct vm *to;
 	struct vm_locked to_locked;
 	const void *from_msg;
-	struct spci_value ret;
-	bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
-		      SPCI_MSG_SEND_NOTIFY;
+	struct ffa_value ret;
+	bool notify =
+		(attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
 
 	/* Ensure sender VM ID corresponds to the current VM. */
 	if (sender_vm_id != from->id) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Disallow reflexive requests as this suggests an error in the VM. */
 	if (receiver_vm_id == from->id) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Limit the size of transfer. */
-	if (size > SPCI_MSG_PAYLOAD_MAX) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+	if (size > FFA_MSG_PAYLOAD_MAX) {
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Ensure the receiver VM exists. */
 	to = vm_find(receiver_vm_id);
 	if (to == NULL) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -1020,21 +1020,21 @@
 	sl_unlock(&from->lock);
 
 	if (from_msg == NULL) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	to_locked = vm_lock(to);
 
 	if (msg_receiver_busy(to_locked, from, notify)) {
-		ret = spci_error(SPCI_BUSY);
+		ret = ffa_error(FFA_BUSY);
 		goto out;
 	}
 
 	/* Copy data. */
-	memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg, size);
+	memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
 	to->mailbox.recv_size = size;
 	to->mailbox.recv_sender = sender_vm_id;
-	to->mailbox.recv_func = SPCI_MSG_SEND_32;
+	to->mailbox.recv_func = FFA_MSG_SEND_32;
 	ret = deliver_msg(to_locked, sender_vm_id, current, next);
 
 out:
@@ -1047,7 +1047,7 @@
  * Checks whether the vCPU's attempt to block for a message has already been
  * interrupted or whether it is allowed to block.
  */
-bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
+bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
 {
 	bool interrupted;
 
@@ -1070,18 +1070,18 @@
  *
  * No new messages can be received until the mailbox has been cleared.
  */
-struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
-				    struct vcpu **next)
+struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
+				  struct vcpu **next)
 {
 	struct vm *vm = current->vm;
-	struct spci_value return_code;
+	struct ffa_value return_code;
 
 	/*
 	 * The primary VM will receive messages as a status code from running
 	 * vCPUs and must not call this function.
 	 */
 	if (vm->id == HF_PRIMARY_VM_ID) {
-		return spci_error(SPCI_NOT_SUPPORTED);
+		return ffa_error(FFA_NOT_SUPPORTED);
 	}
 
 	sl_lock(&vm->lock);
@@ -1089,31 +1089,31 @@
 	/* Return pending messages without blocking. */
 	if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
 		vm->mailbox.state = MAILBOX_STATE_READ;
-		return_code = spci_msg_recv_return(vm);
+		return_code = ffa_msg_recv_return(vm);
 		goto out;
 	}
 
 	/* No pending message so fail if not allowed to block. */
 	if (!block) {
-		return_code = spci_error(SPCI_RETRY);
+		return_code = ffa_error(FFA_RETRY);
 		goto out;
 	}
 
 	/*
 	 * From this point onward this call can only be interrupted or a message
 	 * received. If a message is received the return value will be set at
-	 * that time to SPCI_SUCCESS.
+	 * that time to FFA_SUCCESS.
 	 */
-	return_code = spci_error(SPCI_INTERRUPTED);
-	if (api_spci_msg_recv_block_interrupted(current)) {
+	return_code = ffa_error(FFA_INTERRUPTED);
+	if (api_ffa_msg_recv_block_interrupted(current)) {
 		goto out;
 	}
 
 	/* Switch back to primary VM to block. */
 	{
-		struct spci_value run_return = {
-			.func = SPCI_MSG_WAIT_32,
-			.arg1 = spci_vm_vcpu(vm->id, vcpu_index(current)),
+		struct ffa_value run_return = {
+			.func = FFA_MSG_WAIT_32,
+			.arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
 		};
 
 		*next = api_switch_to_primary(current, run_return,
@@ -1165,7 +1165,7 @@
  * Returns -1 on failure or if there are no waiters; the VM id of the next
  * waiter otherwise.
  */
-int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current)
+int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
 {
 	struct vm *vm;
 	struct vm_locked locked;
@@ -1210,23 +1210,23 @@
  * will overwrite the old and will arrive asynchronously.
  *
  * Returns:
- *  - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
- *  - SPCI_SUCCESS on success if no further action is needed.
- *  - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ *  - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
+ *  - FFA_SUCCESS on success if no further action is needed.
+ *  - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
  *    needs to wake up or kick waiters. Waiters should be retrieved by calling
  *    hf_mailbox_waiter_get.
  */
-struct spci_value api_spci_rx_release(struct vcpu *current, struct vcpu **next)
+struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
 {
 	struct vm *vm = current->vm;
 	struct vm_locked locked;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	locked = vm_lock(vm);
 	switch (vm->mailbox.state) {
 	case MAILBOX_STATE_EMPTY:
 	case MAILBOX_STATE_RECEIVED:
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		break;
 
 	case MAILBOX_STATE_READ:
@@ -1351,8 +1351,8 @@
  *  - 1 if it was called by the primary VM and the primary VM now needs to wake
  *    up or kick the target vCPU.
  */
-int64_t api_interrupt_inject(spci_vm_id_t target_vm_id,
-			     spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
+int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
+			     ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
 			     struct vcpu *current, struct vcpu **next)
 {
 	struct vcpu *target_vcpu;
@@ -1383,25 +1383,25 @@
 	return internal_interrupt_inject(target_vcpu, intid, current, next);
 }
 
-/** Returns the version of the implemented SPCI specification. */
-struct spci_value api_spci_version(uint32_t requested_version)
+/** Returns the version of the implemented FF-A specification. */
+struct ffa_value api_ffa_version(uint32_t requested_version)
 {
 	/*
 	 * Ensure that both major and minor revision representation occupies at
 	 * most 15 bits.
 	 */
-	static_assert(0x8000 > SPCI_VERSION_MAJOR,
+	static_assert(0x8000 > FFA_VERSION_MAJOR,
 		      "Major revision representation takes more than 15 bits.");
-	static_assert(0x10000 > SPCI_VERSION_MINOR,
+	static_assert(0x10000 > FFA_VERSION_MINOR,
 		      "Minor revision representation takes more than 16 bits.");
-	if (requested_version & SPCI_VERSION_RESERVED_BIT) {
+	if (requested_version & FFA_VERSION_RESERVED_BIT) {
 		/* Invalid encoding, return an error. */
-		return (struct spci_value){.func = SPCI_NOT_SUPPORTED};
+		return (struct ffa_value){.func = FFA_NOT_SUPPORTED};
 	}
 
-	return (struct spci_value){
-		.func = (SPCI_VERSION_MAJOR << SPCI_VERSION_MAJOR_OFFSET) |
-			SPCI_VERSION_MINOR};
+	return (struct ffa_value){
+		.func = (FFA_VERSION_MAJOR << FFA_VERSION_MAJOR_OFFSET) |
+			FFA_VERSION_MINOR};
 }
 
 int64_t api_debug_log(char c, struct vcpu *current)
@@ -1430,59 +1430,59 @@
 
 /**
  * Discovery function returning information about the implementation of optional
- * SPCI interfaces.
+ * FF-A interfaces.
  */
-struct spci_value api_spci_features(uint32_t function_id)
+struct ffa_value api_ffa_features(uint32_t function_id)
 {
 	switch (function_id) {
-	case SPCI_ERROR_32:
-	case SPCI_SUCCESS_32:
-	case SPCI_INTERRUPT_32:
-	case SPCI_VERSION_32:
-	case SPCI_FEATURES_32:
-	case SPCI_RX_RELEASE_32:
-	case SPCI_RXTX_MAP_64:
-	case SPCI_ID_GET_32:
-	case SPCI_MSG_POLL_32:
-	case SPCI_MSG_WAIT_32:
-	case SPCI_YIELD_32:
-	case SPCI_RUN_32:
-	case SPCI_MSG_SEND_32:
-	case SPCI_MEM_DONATE_32:
-	case SPCI_MEM_LEND_32:
-	case SPCI_MEM_SHARE_32:
-	case SPCI_MEM_RETRIEVE_REQ_32:
-	case SPCI_MEM_RETRIEVE_RESP_32:
-	case SPCI_MEM_RELINQUISH_32:
-	case SPCI_MEM_RECLAIM_32:
-		return (struct spci_value){.func = SPCI_SUCCESS_32};
+	case FFA_ERROR_32:
+	case FFA_SUCCESS_32:
+	case FFA_INTERRUPT_32:
+	case FFA_VERSION_32:
+	case FFA_FEATURES_32:
+	case FFA_RX_RELEASE_32:
+	case FFA_RXTX_MAP_64:
+	case FFA_ID_GET_32:
+	case FFA_MSG_POLL_32:
+	case FFA_MSG_WAIT_32:
+	case FFA_YIELD_32:
+	case FFA_RUN_32:
+	case FFA_MSG_SEND_32:
+	case FFA_MEM_DONATE_32:
+	case FFA_MEM_LEND_32:
+	case FFA_MEM_SHARE_32:
+	case FFA_MEM_RETRIEVE_REQ_32:
+	case FFA_MEM_RETRIEVE_RESP_32:
+	case FFA_MEM_RELINQUISH_32:
+	case FFA_MEM_RECLAIM_32:
+		return (struct ffa_value){.func = FFA_SUCCESS_32};
 	default:
-		return spci_error(SPCI_NOT_SUPPORTED);
+		return ffa_error(FFA_NOT_SUPPORTED);
 	}
 }
 
-struct spci_value api_spci_mem_send(uint32_t share_func, uint32_t length,
-				    uint32_t fragment_length, ipaddr_t address,
-				    uint32_t page_count, struct vcpu *current,
-				    struct vcpu **next)
+struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
+				  uint32_t fragment_length, ipaddr_t address,
+				  uint32_t page_count, struct vcpu *current,
+				  struct vcpu **next)
 {
 	struct vm *from = current->vm;
 	struct vm *to;
 	const void *from_msg;
-	struct spci_memory_region *memory_region;
-	struct spci_value ret;
+	struct ffa_memory_region *memory_region;
+	struct ffa_value ret;
 
 	if (ipa_addr(address) != 0 || page_count != 0) {
 		/*
 		 * Hafnium only supports passing the descriptor in the TX
 		 * mailbox.
 		 */
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	if (fragment_length != length) {
 		dlog_verbose("Fragmentation not yet supported.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -1496,7 +1496,7 @@
 	sl_unlock(&from->lock);
 
 	if (from_msg == NULL) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -1505,20 +1505,19 @@
 	 * also lets us keep it around in the share state table if needed.
 	 */
 	if (length > HF_MAILBOX_SIZE || length > MM_PPOOL_ENTRY_SIZE) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
-	memory_region =
-		(struct spci_memory_region *)mpool_alloc(&api_page_pool);
+	memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
 	if (memory_region == NULL) {
 		dlog_verbose("Failed to allocate memory region copy.\n");
-		return spci_error(SPCI_NO_MEMORY);
+		return ffa_error(FFA_NO_MEMORY);
 	}
 	memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, length);
 
 	/* The sender must match the caller. */
 	if (memory_region->sender != from->id) {
 		dlog_verbose("Memory region sender doesn't match caller.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1528,7 +1527,7 @@
 			"Multi-way memory sharing not supported (got %d "
 			"endpoint memory access descriptors, expected 1).\n",
 			memory_region->receiver_count);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1538,7 +1537,7 @@
 	to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
 	if (to == NULL || to == from) {
 		dlog_verbose("Invalid receiver.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1550,15 +1549,15 @@
 		struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
 
 		if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
-			ret = spci_error(SPCI_BUSY);
+			ret = ffa_error(FFA_BUSY);
 			goto out_unlock;
 		}
 
-		ret = spci_memory_send(to, vm_to_from_lock.vm2, memory_region,
-				       length, share_func, &api_page_pool);
-		if (ret.func == SPCI_SUCCESS_32) {
+		ret = ffa_memory_send(to, vm_to_from_lock.vm2, memory_region,
+				      length, share_func, &api_page_pool);
+		if (ret.func == FFA_SUCCESS_32) {
 			/* Forward memory send message on to TEE. */
-			memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
+			memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
 				 memory_region, length);
 			to->mailbox.recv_size = length;
 			to->mailbox.recv_sender = from->id;
@@ -1573,10 +1572,10 @@
 	} else {
 		struct vm_locked from_locked = vm_lock(from);
 
-		ret = spci_memory_send(to, from_locked, memory_region, length,
-				       share_func, &api_page_pool);
+		ret = ffa_memory_send(to, from_locked, memory_region, length,
+				      share_func, &api_page_pool);
 		/*
-		 * spci_memory_send takes ownership of the memory_region, so
+		 * ffa_memory_send takes ownership of the memory_region, so
 		 * make sure we don't free it.
 		 */
 		memory_region = NULL;
@@ -1592,38 +1591,37 @@
 	return ret;
 }
 
-struct spci_value api_spci_mem_retrieve_req(uint32_t length,
-					    uint32_t fragment_length,
-					    ipaddr_t address,
-					    uint32_t page_count,
-					    struct vcpu *current)
+struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
+					  uint32_t fragment_length,
+					  ipaddr_t address, uint32_t page_count,
+					  struct vcpu *current)
 {
 	struct vm *to = current->vm;
 	struct vm_locked to_locked;
 	const void *to_msg;
-	struct spci_memory_region *retrieve_request;
+	struct ffa_memory_region *retrieve_request;
 	uint32_t message_buffer_size;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	if (ipa_addr(address) != 0 || page_count != 0) {
 		/*
 		 * Hafnium only supports passing the descriptor in the TX
 		 * mailbox.
 		 */
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	if (fragment_length != length) {
 		dlog_verbose("Fragmentation not yet supported.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	retrieve_request =
-		(struct spci_memory_region *)cpu_get_buffer(current->cpu);
+		(struct ffa_memory_region *)cpu_get_buffer(current->cpu);
 	message_buffer_size = cpu_get_buffer_size(current->cpu);
 	if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
 		dlog_verbose("Retrieve request too long.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	to_locked = vm_lock(to);
@@ -1631,7 +1629,7 @@
 
 	if (to_msg == NULL) {
 		dlog_verbose("TX buffer not setup.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1647,26 +1645,26 @@
 		 * available.
 		 */
 		dlog_verbose("RX buffer not ready.\n");
-		ret = spci_error(SPCI_BUSY);
+		ret = ffa_error(FFA_BUSY);
 		goto out;
 	}
 
-	ret = spci_memory_retrieve(to_locked, retrieve_request, length,
-				   &api_page_pool);
+	ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
+				  &api_page_pool);
 
 out:
 	vm_unlock(&to_locked);
 	return ret;
 }
 
-struct spci_value api_spci_mem_relinquish(struct vcpu *current)
+struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
 {
 	struct vm *from = current->vm;
 	struct vm_locked from_locked;
 	const void *from_msg;
-	struct spci_mem_relinquish *relinquish_request;
+	struct ffa_mem_relinquish *relinquish_request;
 	uint32_t message_buffer_size;
-	struct spci_value ret;
+	struct ffa_value ret;
 	uint32_t length;
 
 	from_locked = vm_lock(from);
@@ -1674,7 +1672,7 @@
 
 	if (from_msg == NULL) {
 		dlog_verbose("TX buffer not setup.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1682,72 +1680,72 @@
 	 * Calculate length from relinquish descriptor before copying. We will
 	 * check again later to make sure it hasn't changed.
 	 */
-	length = sizeof(struct spci_mem_relinquish) +
-		 ((struct spci_mem_relinquish *)from_msg)->endpoint_count *
-			 sizeof(spci_vm_id_t);
+	length = sizeof(struct ffa_mem_relinquish) +
+		 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
+			 sizeof(ffa_vm_id_t);
 	/*
 	 * Copy the relinquish descriptor to an internal buffer, so that the
 	 * caller can't change it underneath us.
 	 */
 	relinquish_request =
-		(struct spci_mem_relinquish *)cpu_get_buffer(current->cpu);
+		(struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
 	message_buffer_size = cpu_get_buffer_size(current->cpu);
 	if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
 		dlog_verbose("Relinquish message too long.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 	memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
 
-	if (sizeof(struct spci_mem_relinquish) +
-		    relinquish_request->endpoint_count * sizeof(spci_vm_id_t) !=
+	if (sizeof(struct ffa_mem_relinquish) +
+		    relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
 	    length) {
 		dlog_verbose(
 			"Endpoint count changed while copying to internal "
 			"buffer.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
-	ret = spci_memory_relinquish(from_locked, relinquish_request,
-				     &api_page_pool);
+	ret = ffa_memory_relinquish(from_locked, relinquish_request,
+				    &api_page_pool);
 
 out:
 	vm_unlock(&from_locked);
 	return ret;
 }
 
-static struct spci_value spci_mem_reclaim_tee(struct vm_locked to_locked,
-					      struct vm_locked from_locked,
-					      spci_memory_handle_t handle,
-					      spci_memory_region_flags_t flags,
-					      struct cpu *cpu)
+static struct ffa_value ffa_mem_reclaim_tee(struct vm_locked to_locked,
+					    struct vm_locked from_locked,
+					    ffa_memory_handle_t handle,
+					    ffa_memory_region_flags_t flags,
+					    struct cpu *cpu)
 {
 	uint32_t fragment_length;
 	uint32_t length;
 	uint32_t request_length;
-	struct spci_memory_region *memory_region =
-		(struct spci_memory_region *)cpu_get_buffer(cpu);
+	struct ffa_memory_region *memory_region =
+		(struct ffa_memory_region *)cpu_get_buffer(cpu);
 	uint32_t message_buffer_size = cpu_get_buffer_size(cpu);
-	struct spci_value tee_ret;
+	struct ffa_value tee_ret;
 
-	request_length = spci_memory_lender_retrieve_request_init(
+	request_length = ffa_memory_lender_retrieve_request_init(
 		from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
 
 	/* Retrieve memory region information from the TEE. */
 	tee_ret = arch_tee_call(
-		(struct spci_value){.func = SPCI_MEM_RETRIEVE_REQ_32,
-				    .arg1 = request_length,
-				    .arg2 = request_length});
-	if (tee_ret.func == SPCI_ERROR_32) {
+		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
+				   .arg1 = request_length,
+				   .arg2 = request_length});
+	if (tee_ret.func == FFA_ERROR_32) {
 		dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
 		return tee_ret;
 	}
-	if (tee_ret.func != SPCI_MEM_RETRIEVE_RESP_32) {
+	if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
 		dlog_verbose(
-			"Got %#x from EL3, expected SPCI_MEM_RETRIEVE_RESP.\n",
+			"Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
 			tee_ret.func);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	length = tee_ret.arg1;
@@ -1757,7 +1755,7 @@
 	    fragment_length > message_buffer_size) {
 		dlog_verbose("Invalid fragment length %d (max %d).\n", length,
 			     HF_MAILBOX_SIZE);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* TODO: Support fragmentation. */
@@ -1766,7 +1764,7 @@
 			"Message fragmentation not yet supported (fragment "
 			"length %d but length %d).\n",
 			fragment_length, length);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -1780,34 +1778,34 @@
 	 * Validate that transition is allowed (e.g. that caller is owner),
 	 * forward the reclaim request to the TEE, and update page tables.
 	 */
-	return spci_memory_tee_reclaim(to_locked, handle, memory_region,
-				       flags & SPCI_MEM_RECLAIM_CLEAR,
-				       &api_page_pool);
+	return ffa_memory_tee_reclaim(to_locked, handle, memory_region,
+				      flags & FFA_MEM_RECLAIM_CLEAR,
+				      &api_page_pool);
 }
 
-struct spci_value api_spci_mem_reclaim(spci_memory_handle_t handle,
-				       spci_memory_region_flags_t flags,
-				       struct vcpu *current)
+struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
+				     ffa_memory_region_flags_t flags,
+				     struct vcpu *current)
 {
 	struct vm *to = current->vm;
-	struct spci_value ret;
+	struct ffa_value ret;
 
-	if ((handle & SPCI_MEMORY_HANDLE_ALLOCATOR_MASK) ==
-	    SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
+	if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
+	    FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
 		struct vm_locked to_locked = vm_lock(to);
 
-		ret = spci_memory_reclaim(to_locked, handle,
-					  flags & SPCI_MEM_RECLAIM_CLEAR,
-					  &api_page_pool);
+		ret = ffa_memory_reclaim(to_locked, handle,
+					 flags & FFA_MEM_RECLAIM_CLEAR,
+					 &api_page_pool);
 
 		vm_unlock(&to_locked);
 	} else {
 		struct vm *from = vm_find(HF_TEE_VM_ID);
 		struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
 
-		ret = spci_mem_reclaim_tee(vm_to_from_lock.vm1,
-					   vm_to_from_lock.vm2, handle, flags,
-					   current->cpu);
+		ret = ffa_mem_reclaim_tee(vm_to_from_lock.vm1,
+					  vm_to_from_lock.vm2, handle, flags,
+					  current->cpu);
 
 		vm_unlock(&vm_to_from_lock.vm1);
 		vm_unlock(&vm_to_from_lock.vm2);
diff --git a/src/arch/aarch64/hftest/power_mgmt.c b/src/arch/aarch64/hftest/power_mgmt.c
index 5412fd1..d53d609 100644
--- a/src/arch/aarch64/hftest/power_mgmt.c
+++ b/src/arch/aarch64/hftest/power_mgmt.c
@@ -36,7 +36,7 @@
 bool arch_cpu_start(uintptr_t id, struct arch_cpu_start_state *state)
 {
 	void vm_cpu_entry(uintptr_t arg);
-	struct spci_value smc_res;
+	struct ffa_value smc_res;
 
 	/* Try to start the CPU. */
 	smc_res = smc64(PSCI_CPU_ON, id, (uintptr_t)&vm_cpu_entry,
@@ -69,7 +69,7 @@
 enum power_status arch_cpu_status(cpu_id_t cpu_id)
 {
 	uint32_t lowest_affinity_level = 0;
-	struct spci_value smc_res;
+	struct ffa_value smc_res;
 
 	/*
 	 * This works because the power_status enum values happen to be the same
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index 97457c4..1d32587 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -23,7 +23,7 @@
 #include "hf/arch/plat/psci.h"
 
 #include "hf/addr.h"
-#include "hf/spci.h"
+#include "hf/ffa.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -69,7 +69,7 @@
 
 void arch_regs_reset(struct vcpu *vcpu)
 {
-	spci_vm_id_t vm_id = vcpu->vm->id;
+	ffa_vm_id_t vm_id = vcpu->vm->id;
 	bool is_primary = vm_id == HF_PRIMARY_VM_ID;
 	cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu);
 	paddr_t table = vcpu->vm->ptable.root;
@@ -125,7 +125,7 @@
 	r->r[0] = arg;
 }
 
-void arch_regs_set_retval(struct arch_regs *r, struct spci_value v)
+void arch_regs_set_retval(struct arch_regs *r, struct ffa_value v)
 {
 	r->r[0] = v.func;
 	r->r[1] = v.arg1;
diff --git a/src/arch/aarch64/hypervisor/debug_el1.c b/src/arch/aarch64/hypervisor/debug_el1.c
index f46cbe6..e346085 100644
--- a/src/arch/aarch64/hypervisor/debug_el1.c
+++ b/src/arch/aarch64/hypervisor/debug_el1.c
@@ -141,7 +141,7 @@
  * Processes an access (msr, mrs) to an EL1 debug register.
  * Returns true if the access was allowed and performed, false otherwise.
  */
-bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+bool debug_el1_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
 			      uintreg_t esr)
 {
 	/*
diff --git a/src/arch/aarch64/hypervisor/debug_el1.h b/src/arch/aarch64/hypervisor/debug_el1.h
index 9dc1ef6..86b7d60 100644
--- a/src/arch/aarch64/hypervisor/debug_el1.h
+++ b/src/arch/aarch64/hypervisor/debug_el1.h
@@ -20,9 +20,9 @@
 
 #include "hf/cpu.h"
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 bool debug_el1_is_register_access(uintreg_t esr_el2);
 
-bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+bool debug_el1_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
 			      uintreg_t esr_el2);
diff --git a/src/arch/aarch64/hypervisor/feature_id.h b/src/arch/aarch64/hypervisor/feature_id.h
index 86c7c01..15e8265 100644
--- a/src/arch/aarch64/hypervisor/feature_id.h
+++ b/src/arch/aarch64/hypervisor/feature_id.h
@@ -20,7 +20,7 @@
 
 #include "hf/cpu.h"
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 #define HF_FEATURE_NONE UINT64_C(0)
 
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index d3a4a75..6ad8616 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -25,8 +25,8 @@
 #include "hf/check.h"
 #include "hf/cpu.h"
 #include "hf/dlog.h"
+#include "hf/ffa.h"
 #include "hf/panic.h"
-#include "hf/spci.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/call.h"
@@ -154,7 +154,7 @@
 void maybe_invalidate_tlb(struct vcpu *vcpu)
 {
 	size_t current_cpu_index = cpu_index(vcpu->cpu);
-	spci_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
+	ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
 
 	if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
 	    new_vcpu_index) {
@@ -280,9 +280,9 @@
  * Applies SMC access control according to manifest and forwards the call if
  * access is granted.
  */
-static void smc_forwarder(const struct vm *vm, struct spci_value *args)
+static void smc_forwarder(const struct vm *vm, struct ffa_value *args)
 {
-	struct spci_value ret;
+	struct ffa_value ret;
 	uint32_t client_id = vm->id;
 	uintreg_t arg7 = args->arg7;
 
@@ -313,73 +313,72 @@
 	*args = ret;
 }
 
-static bool spci_handler(struct spci_value *args, struct vcpu **next)
+static bool ffa_handler(struct ffa_value *args, struct vcpu **next)
 {
 	uint32_t func = args->func & ~SMCCC_CONVENTION_MASK;
 
 	/*
 	 * NOTE: When adding new methods to this handler update
-	 * api_spci_features accordingly.
+	 * api_ffa_features accordingly.
 	 */
 	switch (func) {
-	case SPCI_VERSION_32:
-		*args = api_spci_version(args->arg1);
+	case FFA_VERSION_32:
+		*args = api_ffa_version(args->arg1);
 		return true;
-	case SPCI_ID_GET_32:
-		*args = api_spci_id_get(current());
+	case FFA_ID_GET_32:
+		*args = api_ffa_id_get(current());
 		return true;
-	case SPCI_FEATURES_32:
-		*args = api_spci_features(args->arg1);
+	case FFA_FEATURES_32:
+		*args = api_ffa_features(args->arg1);
 		return true;
-	case SPCI_RX_RELEASE_32:
-		*args = api_spci_rx_release(current(), next);
+	case FFA_RX_RELEASE_32:
+		*args = api_ffa_rx_release(current(), next);
 		return true;
-	case SPCI_RXTX_MAP_32:
-		*args = api_spci_rxtx_map(ipa_init(args->arg1),
-					  ipa_init(args->arg2), args->arg3,
-					  current(), next);
+	case FFA_RXTX_MAP_32:
+		*args = api_ffa_rxtx_map(ipa_init(args->arg1),
+					 ipa_init(args->arg2), args->arg3,
+					 current(), next);
 		return true;
-	case SPCI_YIELD_32:
+	case FFA_YIELD_32:
 		api_yield(current(), next);
 
-		/* SPCI_YIELD always returns SPCI_SUCCESS. */
-		*args = (struct spci_value){.func = SPCI_SUCCESS_32};
+		/* FFA_YIELD always returns FFA_SUCCESS. */
+		*args = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 		return true;
-	case SPCI_MSG_SEND_32:
-		*args = api_spci_msg_send(spci_msg_send_sender(*args),
-					  spci_msg_send_receiver(*args),
-					  spci_msg_send_size(*args),
-					  spci_msg_send_attributes(*args),
-					  current(), next);
+	case FFA_MSG_SEND_32:
+		*args = api_ffa_msg_send(
+			ffa_msg_send_sender(*args),
+			ffa_msg_send_receiver(*args), ffa_msg_send_size(*args),
+			ffa_msg_send_attributes(*args), current(), next);
 		return true;
-	case SPCI_MSG_WAIT_32:
-		*args = api_spci_msg_recv(true, current(), next);
+	case FFA_MSG_WAIT_32:
+		*args = api_ffa_msg_recv(true, current(), next);
 		return true;
-	case SPCI_MSG_POLL_32:
-		*args = api_spci_msg_recv(false, current(), next);
+	case FFA_MSG_POLL_32:
+		*args = api_ffa_msg_recv(false, current(), next);
 		return true;
-	case SPCI_RUN_32:
-		*args = api_spci_run(spci_vm_id(*args), spci_vcpu_index(*args),
-				     current(), next);
+	case FFA_RUN_32:
+		*args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args),
+				    current(), next);
 		return true;
-	case SPCI_MEM_DONATE_32:
-	case SPCI_MEM_LEND_32:
-	case SPCI_MEM_SHARE_32:
-		*args = api_spci_mem_send(func, args->arg1, args->arg2,
-					  ipa_init(args->arg3), args->arg4,
-					  current(), next);
+	case FFA_MEM_DONATE_32:
+	case FFA_MEM_LEND_32:
+	case FFA_MEM_SHARE_32:
+		*args = api_ffa_mem_send(func, args->arg1, args->arg2,
+					 ipa_init(args->arg3), args->arg4,
+					 current(), next);
 		return true;
-	case SPCI_MEM_RETRIEVE_REQ_32:
-		*args = api_spci_mem_retrieve_req(args->arg1, args->arg2,
-						  ipa_init(args->arg3),
-						  args->arg4, current());
+	case FFA_MEM_RETRIEVE_REQ_32:
+		*args = api_ffa_mem_retrieve_req(args->arg1, args->arg2,
+						 ipa_init(args->arg3),
+						 args->arg4, current());
 		return true;
-	case SPCI_MEM_RELINQUISH_32:
-		*args = api_spci_mem_relinquish(current());
+	case FFA_MEM_RELINQUISH_32:
+		*args = api_ffa_mem_relinquish(current());
 		return true;
-	case SPCI_MEM_RECLAIM_32:
-		*args = api_spci_mem_reclaim(
+	case FFA_MEM_RECLAIM_32:
+		*args = api_ffa_mem_reclaim(
 			(args->arg1 & 0xffffffff) | args->arg2 << 32,
 			args->arg3, current());
 		return true;
@@ -422,7 +421,7 @@
  */
 static struct vcpu *smc_handler(struct vcpu *vcpu)
 {
-	struct spci_value args = {
+	struct ffa_value args = {
 		.func = vcpu->regs.r[0],
 		.arg1 = vcpu->regs.r[1],
 		.arg2 = vcpu->regs.r[2],
@@ -439,7 +438,7 @@
 		return next;
 	}
 
-	if (spci_handler(&args, &next)) {
+	if (ffa_handler(&args, &next)) {
 		arch_regs_set_retval(&vcpu->regs, args);
 		update_vi(next);
 		return next;
@@ -595,7 +594,7 @@
 
 struct vcpu *hvc_handler(struct vcpu *vcpu)
 {
-	struct spci_value args = {
+	struct ffa_value args = {
 		.func = vcpu->regs.r[0],
 		.arg1 = vcpu->regs.r[1],
 		.arg2 = vcpu->regs.r[2],
@@ -612,7 +611,7 @@
 		return next;
 	}
 
-	if (spci_handler(&args, &next)) {
+	if (ffa_handler(&args, &next)) {
 		arch_regs_set_retval(&vcpu->regs, args);
 		update_vi(next);
 		return next;
@@ -816,7 +815,7 @@
 void handle_system_register_access(uintreg_t esr_el2)
 {
 	struct vcpu *vcpu = current();
-	spci_vm_id_t vm_id = vcpu->vm->id;
+	ffa_vm_id_t vm_id = vcpu->vm->id;
 	uintreg_t ec = GET_ESR_EC(esr_el2);
 
 	CHECK(ec == EC_MSR);
diff --git a/src/arch/aarch64/hypervisor/perfmon.c b/src/arch/aarch64/hypervisor/perfmon.c
index 4532e31..81644a0 100644
--- a/src/arch/aarch64/hypervisor/perfmon.c
+++ b/src/arch/aarch64/hypervisor/perfmon.c
@@ -157,8 +157,7 @@
  * Processes an access (msr, mrs) to a performance monitor register.
  * Returns true if the access was allowed and performed, false otherwise.
  */
-bool perfmon_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
-			    uintreg_t esr)
+bool perfmon_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id, uintreg_t esr)
 {
 	/*
 	 * For now, performance monitor registers are not supported by secondary
@@ -232,7 +231,7 @@
 /**
  * Returns the value register PMCCFILTR_EL0 should have at initialization.
  */
-uintreg_t perfmon_get_pmccfiltr_el0_init_value(spci_vm_id_t vm_id)
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_vm_id_t vm_id)
 {
 	if (vm_id != HF_PRIMARY_VM_ID) {
 		/* Disable cycle counting for secondary VMs. */
diff --git a/src/arch/aarch64/hypervisor/perfmon.h b/src/arch/aarch64/hypervisor/perfmon.h
index afeabd9..a9d68f7 100644
--- a/src/arch/aarch64/hypervisor/perfmon.h
+++ b/src/arch/aarch64/hypervisor/perfmon.h
@@ -20,7 +20,7 @@
 
 #include "hf/cpu.h"
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 /**
  * Set to disable cycle counting when event counting is prohibited.
@@ -74,7 +74,7 @@
 
 bool perfmon_is_register_access(uintreg_t esr_el2);
 
-bool perfmon_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+bool perfmon_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
 			    uintreg_t esr_el2);
 
-uintreg_t perfmon_get_pmccfiltr_el0_init_value(spci_vm_id_t vm_id);
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_vm_id_t vm_id);
diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c
index aabe979..2f1b7b5 100644
--- a/src/arch/aarch64/hypervisor/psci_handler.c
+++ b/src/arch/aarch64/hypervisor/psci_handler.c
@@ -24,8 +24,8 @@
 #include "hf/api.h"
 #include "hf/cpu.h"
 #include "hf/dlog.h"
+#include "hf/ffa.h"
 #include "hf/panic.h"
-#include "hf/spci.h"
 #include "hf/vm.h"
 
 #include "psci.h"
@@ -38,7 +38,7 @@
 /* Performs arch specific boot time initialisation. */
 void arch_one_time_init(void)
 {
-	struct spci_value smc_res =
+	struct ffa_value smc_res =
 		smc32(PSCI_VERSION, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
 
 	el3_psci_version = smc_res.func;
@@ -73,7 +73,7 @@
 			     uintreg_t arg1, uintreg_t arg2, uintreg_t *ret)
 {
 	struct cpu *c;
-	struct spci_value smc_res;
+	struct ffa_value smc_res;
 
 	/*
 	 * If there's a problem with the EL3 PSCI, block standard secure service
@@ -242,7 +242,7 @@
  * Convert a PSCI CPU / affinity ID for a secondary VM to the corresponding vCPU
  * index.
  */
-spci_vcpu_index_t vcpu_id_to_index(cpu_id_t vcpu_id)
+ffa_vcpu_index_t vcpu_id_to_index(cpu_id_t vcpu_id)
 {
 	/* For now we use indices as IDs for the purposes of PSCI. */
 	return vcpu_id;
@@ -297,7 +297,7 @@
 		uint32_t lowest_affinity_level = arg1;
 		struct vm *vm = vcpu->vm;
 		struct vcpu_locked target_vcpu;
-		spci_vcpu_index_t target_vcpu_index =
+		ffa_vcpu_index_t target_vcpu_index =
 			vcpu_id_to_index(target_affinity);
 
 		if (lowest_affinity_level != 0) {
@@ -343,7 +343,7 @@
 		cpu_id_t target_cpu = arg0;
 		ipaddr_t entry_point_address = ipa_init(arg1);
 		uint64_t context_id = arg2;
-		spci_vcpu_index_t target_vcpu_index =
+		ffa_vcpu_index_t target_vcpu_index =
 			vcpu_id_to_index(target_cpu);
 		struct vm *vm = vcpu->vm;
 		struct vcpu *target_vcpu;
diff --git a/src/arch/aarch64/hypervisor/tee.c b/src/arch/aarch64/hypervisor/tee.c
index c3885ac..90d9e32 100644
--- a/src/arch/aarch64/hypervisor/tee.c
+++ b/src/arch/aarch64/hypervisor/tee.c
@@ -18,8 +18,8 @@
 
 #include "hf/check.h"
 #include "hf/dlog.h"
+#include "hf/ffa.h"
 #include "hf/panic.h"
-#include "hf/spci.h"
 #include "hf/vm.h"
 
 #include "smc.h"
@@ -27,7 +27,7 @@
 void arch_tee_init(void)
 {
 	struct vm *tee_vm = vm_find(HF_TEE_VM_ID);
-	struct spci_value ret;
+	struct ffa_value ret;
 	uint32_t func;
 
 	CHECK(tee_vm != NULL);
@@ -37,11 +37,11 @@
 	 * perspective and vice-versa.
 	 */
 	dlog_verbose("Setting up buffers for TEE.\n");
-	ret = arch_tee_call((struct spci_value){
-		.func = SPCI_RXTX_MAP_64,
+	ret = arch_tee_call((struct ffa_value){
+		.func = FFA_RXTX_MAP_64,
 		.arg1 = pa_addr(pa_from_va(va_from_ptr(tee_vm->mailbox.recv))),
 		.arg2 = pa_addr(pa_from_va(va_from_ptr(tee_vm->mailbox.send))),
-		.arg3 = HF_MAILBOX_SIZE / SPCI_PAGE_SIZE});
+		.arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
 	func = ret.func & ~SMCCC_CONVENTION_MASK;
 	if (ret.func == SMCCC_ERROR_UNKNOWN) {
 		dlog_error(
@@ -49,9 +49,9 @@
 			"Memory sharing with TEE will not work.\n");
 		return;
 	}
-	if (func == SPCI_ERROR_32) {
+	if (func == FFA_ERROR_32) {
 		panic("Error %d setting up TEE message buffers.", ret.arg2);
-	} else if (func != SPCI_SUCCESS_32) {
+	} else if (func != FFA_SUCCESS_32) {
 		panic("Unexpected function %#x returned setting up TEE message "
 		      "buffers.",
 		      ret.func);
@@ -59,7 +59,7 @@
 	dlog_verbose("TEE finished setting up buffers.\n");
 }
 
-struct spci_value arch_tee_call(struct spci_value args)
+struct ffa_value arch_tee_call(struct ffa_value args)
 {
 	return smc_forward(args.func, args.arg1, args.arg2, args.arg3,
 			   args.arg4, args.arg5, args.arg6, args.arg7);
diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
index 2ecd722..2f530b6 100644
--- a/src/arch/aarch64/inc/hf/arch/types.h
+++ b/src/arch/aarch64/inc/hf/arch/types.h
@@ -19,7 +19,7 @@
 #include <stdalign.h>
 #include <stdint.h>
 
-#include "hf/spci.h"
+#include "hf/ffa.h"
 #include "hf/static_assert.h"
 
 #define PAGE_BITS 12
@@ -67,7 +67,7 @@
 	 * on that CPU, which avoids contention and so no lock is needed to
 	 * access this field.
 	 */
-	spci_vcpu_index_t last_vcpu_on_cpu[MAX_CPUS];
+	ffa_vcpu_index_t last_vcpu_on_cpu[MAX_CPUS];
 	arch_features_t trapped_features;
 
 	/*
diff --git a/src/arch/aarch64/plat/smc/absent.c b/src/arch/aarch64/plat/smc/absent.c
index 6ea9b8d..4545693 100644
--- a/src/arch/aarch64/plat/smc/absent.c
+++ b/src/arch/aarch64/plat/smc/absent.c
@@ -16,7 +16,7 @@
 
 #include "hf/arch/plat/smc.h"
 
-void plat_smc_post_forward(struct spci_value args, struct spci_value *ret)
+void plat_smc_post_forward(struct ffa_value args, struct ffa_value *ret)
 {
 	(void)args;
 	(void)ret;
diff --git a/src/arch/aarch64/smc.c b/src/arch/aarch64/smc.c
index e5de0bd..633c6de 100644
--- a/src/arch/aarch64/smc.c
+++ b/src/arch/aarch64/smc.c
@@ -18,12 +18,12 @@
 
 #include <stdint.h>
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
-static struct spci_value smc_internal(uint32_t func, uint64_t arg0,
-				      uint64_t arg1, uint64_t arg2,
-				      uint64_t arg3, uint64_t arg4,
-				      uint64_t arg5, uint32_t caller_id)
+static struct ffa_value smc_internal(uint32_t func, uint64_t arg0,
+				     uint64_t arg1, uint64_t arg2,
+				     uint64_t arg3, uint64_t arg4,
+				     uint64_t arg5, uint32_t caller_id)
 {
 	register uint64_t r0 __asm__("x0") = func;
 	register uint64_t r1 __asm__("x1") = arg0;
@@ -40,35 +40,35 @@
 		"+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5),
 		"+r"(r6), "+r"(r7));
 
-	return (struct spci_value){.func = r0,
-				   .arg1 = r1,
-				   .arg2 = r2,
-				   .arg3 = r3,
-				   .arg4 = r4,
-				   .arg5 = r5,
-				   .arg6 = r6,
-				   .arg7 = r7};
+	return (struct ffa_value){.func = r0,
+				  .arg1 = r1,
+				  .arg2 = r2,
+				  .arg3 = r3,
+				  .arg4 = r4,
+				  .arg5 = r5,
+				  .arg6 = r6,
+				  .arg7 = r7};
 }
 
-struct spci_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
-			uint32_t arg2, uint32_t arg3, uint32_t arg4,
-			uint32_t arg5, uint32_t caller_id)
+struct ffa_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
+		       uint32_t arg2, uint32_t arg3, uint32_t arg4,
+		       uint32_t arg5, uint32_t caller_id)
 {
 	return smc_internal(func | SMCCC_32_BIT, arg0, arg1, arg2, arg3, arg4,
 			    arg5, caller_id);
 }
 
-struct spci_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
-			uint64_t arg2, uint64_t arg3, uint64_t arg4,
-			uint64_t arg5, uint32_t caller_id)
+struct ffa_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
+		       uint64_t arg2, uint64_t arg3, uint64_t arg4,
+		       uint64_t arg5, uint32_t caller_id)
 {
 	return smc_internal(func | SMCCC_64_BIT, arg0, arg1, arg2, arg3, arg4,
 			    arg5, caller_id);
 }
 
-struct spci_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
-			      uint64_t arg2, uint64_t arg3, uint64_t arg4,
-			      uint64_t arg5, uint32_t caller_id)
+struct ffa_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
+			     uint64_t arg2, uint64_t arg3, uint64_t arg4,
+			     uint64_t arg5, uint32_t caller_id)
 {
 	return smc_internal(func, arg0, arg1, arg2, arg3, arg4, arg5,
 			    caller_id);
diff --git a/src/arch/aarch64/smc.h b/src/arch/aarch64/smc.h
index ad8ce5b..872929a 100644
--- a/src/arch/aarch64/smc.h
+++ b/src/arch/aarch64/smc.h
@@ -18,7 +18,7 @@
 
 #include <stdint.h>
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 /* clang-format off */
 
@@ -49,14 +49,14 @@
 
 /* clang-format on */
 
-struct spci_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
-			uint32_t arg2, uint32_t arg3, uint32_t arg4,
-			uint32_t arg5, uint32_t caller_id);
+struct ffa_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
+		       uint32_t arg2, uint32_t arg3, uint32_t arg4,
+		       uint32_t arg5, uint32_t caller_id);
 
-struct spci_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
-			uint64_t arg2, uint64_t arg3, uint64_t arg4,
-			uint64_t arg5, uint32_t caller_id);
+struct ffa_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
+		       uint64_t arg2, uint64_t arg3, uint64_t arg4,
+		       uint64_t arg5, uint32_t caller_id);
 
-struct spci_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
-			      uint64_t arg2, uint64_t arg3, uint64_t arg4,
-			      uint64_t arg5, uint32_t caller_id);
+struct ffa_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
+			     uint64_t arg2, uint64_t arg3, uint64_t arg4,
+			     uint64_t arg5, uint32_t caller_id);
diff --git a/src/arch/aarch64/sysregs.c b/src/arch/aarch64/sysregs.c
index 8f807a0..9e56265 100644
--- a/src/arch/aarch64/sysregs.c
+++ b/src/arch/aarch64/sysregs.c
@@ -35,7 +35,7 @@
  * Returns the value for HCR_EL2 for the particular VM.
  * For now, the primary VM has one value and all secondary VMs share a value.
  */
-uintreg_t get_hcr_el2_value(spci_vm_id_t vm_id)
+uintreg_t get_hcr_el2_value(ffa_vm_id_t vm_id)
 {
 	uintreg_t hcr_el2_value = 0;
 
diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
index 7198576..9dbd165 100644
--- a/src/arch/aarch64/sysregs.h
+++ b/src/arch/aarch64/sysregs.h
@@ -20,7 +20,7 @@
 
 #include "hf/cpu.h"
 
-#include "vmapi/hf/spci.h"
+#include "vmapi/hf/ffa.h"
 
 /**
  * RT value that indicates an access to register XZR (always 0).
@@ -588,7 +588,7 @@
  */
 #define SCTLR_EL2_M (UINT64_C(0x1) << 0)
 
-uintreg_t get_hcr_el2_value(spci_vm_id_t vm_id);
+uintreg_t get_hcr_el2_value(ffa_vm_id_t vm_id);
 
 uintreg_t get_mdcr_el2_value(void);
 
diff --git a/src/arch/fake/hypervisor/cpu.c b/src/arch/fake/hypervisor/cpu.c
index 3fc09f9..4a0bcfe 100644
--- a/src/arch/fake/hypervisor/cpu.c
+++ b/src/arch/fake/hypervisor/cpu.c
@@ -17,7 +17,7 @@
 #include "hf/arch/cpu.h"
 
 #include "hf/cpu.h"
-#include "hf/spci.h"
+#include "hf/ffa.h"
 
 void arch_irq_disable(void)
 {
@@ -41,7 +41,7 @@
 	r->arg[0] = arg;
 }
 
-void arch_regs_set_retval(struct arch_regs *r, struct spci_value v)
+void arch_regs_set_retval(struct arch_regs *r, struct ffa_value v)
 {
 	r->arg[0] = v.func;
 	r->arg[1] = v.arg1;
diff --git a/src/arch/fake/hypervisor/tee.c b/src/arch/fake/hypervisor/tee.c
index 6f1eaca..c78c82f 100644
--- a/src/arch/fake/hypervisor/tee.c
+++ b/src/arch/fake/hypervisor/tee.c
@@ -17,11 +17,11 @@
 #include "hf/arch/tee.h"
 
 #include "hf/dlog.h"
-#include "hf/spci.h"
-#include "hf/spci_internal.h"
+#include "hf/ffa.h"
+#include "hf/ffa_internal.h"
 
-struct spci_value arch_tee_call(struct spci_value args)
+struct ffa_value arch_tee_call(struct ffa_value args)
 {
 	dlog_error("Attempted to call TEE function %#x\n", args.func);
-	return spci_error(SPCI_NOT_SUPPORTED);
+	return ffa_error(FFA_NOT_SUPPORTED);
 }
diff --git a/src/cpu.c b/src/cpu.c
index e52fe2d..c31ad0f 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -42,13 +42,13 @@
 	      "Page alignment is too weak for the stack.");
 
 /**
- * Internal buffer used to store SPCI messages from a VM Tx. Its usage prevents
+ * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents
  * TOCTOU issues while Hafnium performs actions on information that would
  * otherwise be re-writable by the VM.
  *
  * Each buffer is owned by a single CPU. The buffer can only be used for
- * spci_msg_send. The information stored in the buffer is only valid during the
- * spci_msg_send request is performed.
+ * ffa_msg_send. The information stored in the buffer is only valid during the
+ * ffa_msg_send request is performed.
  */
 alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE];
 
diff --git a/src/dlog.c b/src/dlog.c
index 1a4bf67..32600ce 100644
--- a/src/dlog.c
+++ b/src/dlog.c
@@ -19,7 +19,7 @@
 #include <stdbool.h>
 #include <stddef.h>
 
-#include "hf/spci.h"
+#include "hf/ffa.h"
 #include "hf/spinlock.h"
 #include "hf/std.h"
 #include "hf/stdout.h"
@@ -229,7 +229,7 @@
  * Send the contents of the given VM's log buffer to the log, preceded by the VM
  * ID and followed by a newline.
  */
-void dlog_flush_vm_buffer(spci_vm_id_t id, char buffer[], size_t length)
+void dlog_flush_vm_buffer(ffa_vm_id_t id, char buffer[], size_t length)
 {
 	lock();
 
diff --git a/src/spci_memory.c b/src/ffa_memory.c
similarity index 65%
rename from src/spci_memory.c
rename to src/ffa_memory.c
index 95cc3ae..8def572 100644
--- a/src/spci_memory.c
+++ b/src/ffa_memory.c
@@ -14,15 +14,15 @@
  * limitations under the License.
  */
 
-#include "hf/spci_memory.h"
+#include "hf/ffa_memory.h"
 
 #include "hf/arch/tee.h"
 
 #include "hf/api.h"
 #include "hf/check.h"
 #include "hf/dlog.h"
+#include "hf/ffa_internal.h"
 #include "hf/mpool.h"
-#include "hf/spci_internal.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -36,32 +36,32 @@
  */
 #define MAX_MEM_SHARES 100
 
-static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0,
-	      "struct spci_memory_region_constituent must be a multiple of 16 "
+static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
+	      "struct ffa_memory_region_constituent must be a multiple of 16 "
 	      "bytes long.");
-static_assert(sizeof(struct spci_composite_memory_region) % 16 == 0,
-	      "struct spci_composite_memory_region must be a multiple of 16 "
+static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0,
+	      "struct ffa_composite_memory_region must be a multiple of 16 "
 	      "bytes long.");
-static_assert(sizeof(struct spci_memory_region_attributes) == 4,
-	      "struct spci_memory_region_attributes must be 4bytes long.");
-static_assert(sizeof(struct spci_memory_access) % 16 == 0,
-	      "struct spci_memory_access must be a multiple of 16 bytes long.");
-static_assert(sizeof(struct spci_memory_region) % 16 == 0,
-	      "struct spci_memory_region must be a multiple of 16 bytes long.");
-static_assert(sizeof(struct spci_mem_relinquish) % 16 == 0,
-	      "struct spci_mem_relinquish must be a multiple of 16 "
+static_assert(sizeof(struct ffa_memory_region_attributes) == 4,
+	      "struct ffa_memory_region_attributes must be 4bytes long.");
+static_assert(sizeof(struct ffa_memory_access) % 16 == 0,
+	      "struct ffa_memory_access must be a multiple of 16 bytes long.");
+static_assert(sizeof(struct ffa_memory_region) % 16 == 0,
+	      "struct ffa_memory_region must be a multiple of 16 bytes long.");
+static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0,
+	      "struct ffa_mem_relinquish must be a multiple of 16 "
 	      "bytes long.");
 
-struct spci_memory_share_state {
+struct ffa_memory_share_state {
 	/**
 	 * The memory region being shared, or NULL if this share state is
 	 * unallocated.
 	 */
-	struct spci_memory_region *memory_region;
+	struct ffa_memory_region *memory_region;
 
 	/**
-	 * The SPCI function used for sharing the memory. Must be one of
-	 * SPCI_MEM_DONATE_32, SPCI_MEM_LEND_32 or SPCI_MEM_SHARE_32 if the
+	 * The FF-A function used for sharing the memory. Must be one of
+	 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
 	 * share state is allocated, or 0.
 	 */
 	uint32_t share_func;
@@ -79,24 +79,24 @@
  * Encapsulates the set of share states while the `share_states_lock` is held.
  */
 struct share_states_locked {
-	struct spci_memory_share_state *share_states;
+	struct ffa_memory_share_state *share_states;
 };
 
 /**
- * All access to members of a `struct spci_memory_share_state` must be guarded
+ * All access to members of a `struct ffa_memory_share_state` must be guarded
  * by this lock.
  */
 static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
-static struct spci_memory_share_state share_states[MAX_MEM_SHARES];
+static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
 
 /**
- * Initialises the next available `struct spci_memory_share_state` and sets
+ * Initialises the next available `struct ffa_memory_share_state` and sets
  * `handle` to its handle. Returns true on succes or false if none are
  * available.
  */
 static bool allocate_share_state(uint32_t share_func,
-				 struct spci_memory_region *memory_region,
-				 spci_memory_handle_t *handle)
+				 struct ffa_memory_region *memory_region,
+				 ffa_memory_handle_t *handle)
 {
 	uint64_t i;
 
@@ -106,14 +106,14 @@
 	for (i = 0; i < MAX_MEM_SHARES; ++i) {
 		if (share_states[i].share_func == 0) {
 			uint32_t j;
-			struct spci_memory_share_state *allocated_state =
+			struct ffa_memory_share_state *allocated_state =
 				&share_states[i];
 			allocated_state->share_func = share_func;
 			allocated_state->memory_region = memory_region;
 			for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
 				allocated_state->retrieved[j] = false;
 			}
-			*handle = i | SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
+			*handle = i | FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
 			sl_unlock(&share_states_lock_instance);
 			return true;
 		}
@@ -145,11 +145,11 @@
  * returns true. Otherwise returns false and doesn't take the lock.
  */
 static bool get_share_state(struct share_states_locked share_states,
-			    spci_memory_handle_t handle,
-			    struct spci_memory_share_state **share_state_ret)
+			    ffa_memory_handle_t handle,
+			    struct ffa_memory_share_state **share_state_ret)
 {
-	struct spci_memory_share_state *share_state;
-	uint32_t index = handle & ~SPCI_MEMORY_HANDLE_ALLOCATOR_MASK;
+	struct ffa_memory_share_state *share_state;
+	uint32_t index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
 
 	if (index >= MAX_MEM_SHARES) {
 		return false;
@@ -167,7 +167,7 @@
 
 /** Marks a share state as unallocated. */
 static void share_state_free(struct share_states_locked share_states,
-			     struct spci_memory_share_state *share_state,
+			     struct ffa_memory_share_state *share_state,
 			     struct mpool *page_pool)
 {
 	CHECK(share_states.share_states != NULL);
@@ -180,11 +180,11 @@
  * Marks the share state with the given handle as unallocated, or returns false
  * if the handle was invalid.
  */
-static bool share_state_free_handle(spci_memory_handle_t handle,
+static bool share_state_free_handle(ffa_memory_handle_t handle,
 				    struct mpool *page_pool)
 {
 	struct share_states_locked share_states = share_states_lock();
-	struct spci_memory_share_state *share_state;
+	struct ffa_memory_share_state *share_state;
 
 	if (!get_share_state(share_states, handle, &share_state)) {
 		share_states_unlock(&share_states);
@@ -197,7 +197,7 @@
 	return true;
 }
 
-static void dump_memory_region(struct spci_memory_region *memory_region)
+static void dump_memory_region(struct ffa_memory_region *memory_region)
 {
 	uint32_t i;
 
@@ -238,13 +238,13 @@
 		if (share_states[i].share_func != 0) {
 			dlog("%d: ", i);
 			switch (share_states[i].share_func) {
-			case SPCI_MEM_SHARE_32:
+			case FFA_MEM_SHARE_32:
 				dlog("SHARE");
 				break;
-			case SPCI_MEM_LEND_32:
+			case FFA_MEM_LEND_32:
 				dlog("LEND");
 				break;
-			case SPCI_MEM_DONATE_32:
+			case FFA_MEM_DONATE_32:
 				dlog("DONATE");
 				break;
 			default:
@@ -265,32 +265,32 @@
 }
 
 /* TODO: Add device attributes: GRE, cacheability, shareability. */
-static inline uint32_t spci_memory_permissions_to_mode(
-	spci_memory_access_permissions_t permissions)
+static inline uint32_t ffa_memory_permissions_to_mode(
+	ffa_memory_access_permissions_t permissions)
 {
 	uint32_t mode = 0;
 
-	switch (spci_get_data_access_attr(permissions)) {
-	case SPCI_DATA_ACCESS_RO:
+	switch (ffa_get_data_access_attr(permissions)) {
+	case FFA_DATA_ACCESS_RO:
 		mode = MM_MODE_R;
 		break;
-	case SPCI_DATA_ACCESS_RW:
-	case SPCI_DATA_ACCESS_NOT_SPECIFIED:
+	case FFA_DATA_ACCESS_RW:
+	case FFA_DATA_ACCESS_NOT_SPECIFIED:
 		mode = MM_MODE_R | MM_MODE_W;
 		break;
-	case SPCI_DATA_ACCESS_RESERVED:
-		panic("Tried to convert SPCI_DATA_ACCESS_RESERVED.");
+	case FFA_DATA_ACCESS_RESERVED:
+		panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
 	}
 
-	switch (spci_get_instruction_access_attr(permissions)) {
-	case SPCI_INSTRUCTION_ACCESS_NX:
+	switch (ffa_get_instruction_access_attr(permissions)) {
+	case FFA_INSTRUCTION_ACCESS_NX:
 		break;
-	case SPCI_INSTRUCTION_ACCESS_X:
-	case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
+	case FFA_INSTRUCTION_ACCESS_X:
+	case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
 		mode |= MM_MODE_X;
 		break;
-	case SPCI_INSTRUCTION_ACCESS_RESERVED:
-		panic("Tried to convert SPCI_INSTRUCTION_ACCESS_RESVERVED.");
+	case FFA_INSTRUCTION_ACCESS_RESERVED:
+		panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
 	}
 
 	return mode;
@@ -299,11 +299,11 @@
 /**
  * Get the current mode in the stage-2 page table of the given vm of all the
  * pages in the given constituents, if they all have the same mode, or return
- * an appropriate SPCI error if not.
+ * an appropriate FF-A error if not.
  */
-static struct spci_value constituents_get_mode(
+static struct ffa_value constituents_get_mode(
 	struct vm_locked vm, uint32_t *orig_mode,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count)
 {
 	uint32_t i;
@@ -313,7 +313,7 @@
 		 * Fail if there are no constituents. Otherwise we would get an
 		 * uninitialised *orig_mode.
 		 */
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	for (i = 0; i < constituent_count; ++i) {
@@ -325,7 +325,7 @@
 		/* Fail if addresses are not page-aligned. */
 		if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
 		    !is_aligned(ipa_addr(end), PAGE_SIZE)) {
-			return spci_error(SPCI_INVALID_PARAMETERS);
+			return ffa_error(FFA_INVALID_PARAMETERS);
 		}
 
 		/*
@@ -334,7 +334,7 @@
 		 */
 		if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
 				    &current_mode)) {
-			return spci_error(SPCI_DENIED);
+			return ffa_error(FFA_DENIED);
 		}
 
 		/*
@@ -343,11 +343,11 @@
 		if (i == 0) {
 			*orig_mode = current_mode;
 		} else if (current_mode != *orig_mode) {
-			return spci_error(SPCI_DENIED);
+			return ffa_error(FFA_DENIED);
 		}
 	}
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 /**
@@ -356,29 +356,29 @@
  * to the sending VM.
  *
  * Returns:
- *   1) SPCI_DENIED if a state transition was not found;
- *   2) SPCI_DENIED if the pages being shared do not have the same mode within
+ *   1) FFA_DENIED if a state transition was not found;
+ *   2) FFA_DENIED if the pages being shared do not have the same mode within
  *     the <from> VM;
- *   3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
+ *   3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
  *     aligned;
- *   4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
- *  Or SPCI_SUCCESS on success.
+ *   4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
+ *  Or FFA_SUCCESS on success.
  */
-static struct spci_value spci_send_check_transition(
+static struct ffa_value ffa_send_check_transition(
 	struct vm_locked from, uint32_t share_func,
-	spci_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
-	struct spci_memory_region_constituent *constituents,
+	ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t *from_mode)
 {
 	const uint32_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 	const uint32_t required_from_mode =
-		spci_memory_permissions_to_mode(permissions);
-	struct spci_value ret;
+		ffa_memory_permissions_to_mode(permissions);
+	struct ffa_value ret;
 
 	ret = constituents_get_mode(from, orig_from_mode, constituents,
 				    constituent_count);
-	if (ret.func != SPCI_SUCCESS_32) {
+	if (ret.func != FFA_SUCCESS_32) {
 		return ret;
 	}
 
@@ -386,7 +386,7 @@
 	if (*orig_from_mode & MM_MODE_D) {
 		dlog_verbose("Can't share device memory (mode is %#x).\n",
 			     *orig_from_mode);
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 
 	/*
@@ -394,7 +394,7 @@
 	 * memory.
 	 */
 	if ((*orig_from_mode & state_mask) != 0) {
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 
 	if ((*orig_from_mode & required_from_mode) != required_from_mode) {
@@ -402,44 +402,44 @@
 			"Sender tried to send memory with permissions which "
 			"required mode %#x but only had %#x itself.\n",
 			required_from_mode, *orig_from_mode);
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 
 	/* Find the appropriate new mode. */
 	*from_mode = ~state_mask & *orig_from_mode;
 	switch (share_func) {
-	case SPCI_MEM_DONATE_32:
+	case FFA_MEM_DONATE_32:
 		*from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
 		break;
 
-	case SPCI_MEM_LEND_32:
+	case FFA_MEM_LEND_32:
 		*from_mode |= MM_MODE_INVALID;
 		break;
 
-	case SPCI_MEM_SHARE_32:
+	case FFA_MEM_SHARE_32:
 		*from_mode |= MM_MODE_SHARED;
 		break;
 
 	default:
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
-static struct spci_value spci_relinquish_check_transition(
+static struct ffa_value ffa_relinquish_check_transition(
 	struct vm_locked from, uint32_t *orig_from_mode,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t *from_mode)
 {
 	const uint32_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 	uint32_t orig_from_state;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	ret = constituents_get_mode(from, orig_from_mode, constituents,
 				    constituent_count);
-	if (ret.func != SPCI_SUCCESS_32) {
+	if (ret.func != FFA_SUCCESS_32) {
 		return ret;
 	}
 
@@ -447,7 +447,7 @@
 	if (*orig_from_mode & MM_MODE_D) {
 		dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
 			     *orig_from_mode);
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 
 	/*
@@ -461,13 +461,13 @@
 			"but "
 			"should be %#x).\n",
 			*orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
-		return spci_error(SPCI_DENIED);
+		return ffa_error(FFA_DENIED);
 	}
 
 	/* Find the appropriate new mode. */
 	*from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 /**
@@ -476,37 +476,37 @@
  * to the retrieving VM.
  *
  * Returns:
- *   1) SPCI_DENIED if a state transition was not found;
- *   2) SPCI_DENIED if the pages being shared do not have the same mode within
+ *   1) FFA_DENIED if a state transition was not found;
+ *   2) FFA_DENIED if the pages being shared do not have the same mode within
  *     the <to> VM;
- *   3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
+ *   3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
  *     aligned;
- *   4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
- *  Or SPCI_SUCCESS on success.
+ *   4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
+ *  Or FFA_SUCCESS on success.
  */
-static struct spci_value spci_retrieve_check_transition(
+static struct ffa_value ffa_retrieve_check_transition(
 	struct vm_locked to, uint32_t share_func,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t memory_to_attributes,
 	uint32_t *to_mode)
 {
 	uint32_t orig_to_mode;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	ret = constituents_get_mode(to, &orig_to_mode, constituents,
 				    constituent_count);
-	if (ret.func != SPCI_SUCCESS_32) {
+	if (ret.func != FFA_SUCCESS_32) {
 		return ret;
 	}
 
-	if (share_func == SPCI_MEM_RECLAIM_32) {
+	if (share_func == FFA_MEM_RECLAIM_32) {
 		const uint32_t state_mask =
 			MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 		uint32_t orig_to_state = orig_to_mode & state_mask;
 
 		if (orig_to_state != MM_MODE_INVALID &&
 		    orig_to_state != MM_MODE_SHARED) {
-			return spci_error(SPCI_DENIED);
+			return ffa_error(FFA_DENIED);
 		}
 	} else {
 		/*
@@ -516,34 +516,34 @@
 		 */
 		if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
 		    MM_MODE_UNMAPPED_MASK) {
-			return spci_error(SPCI_DENIED);
+			return ffa_error(FFA_DENIED);
 		}
 	}
 
 	/* Find the appropriate new mode. */
 	*to_mode = memory_to_attributes;
 	switch (share_func) {
-	case SPCI_MEM_DONATE_32:
+	case FFA_MEM_DONATE_32:
 		*to_mode |= 0;
 		break;
 
-	case SPCI_MEM_LEND_32:
+	case FFA_MEM_LEND_32:
 		*to_mode |= MM_MODE_UNOWNED;
 		break;
 
-	case SPCI_MEM_SHARE_32:
+	case FFA_MEM_SHARE_32:
 		*to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
 		break;
 
-	case SPCI_MEM_RECLAIM_32:
+	case FFA_MEM_RECLAIM_32:
 		*to_mode |= 0;
 		break;
 
 	default:
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 /**
@@ -564,9 +564,9 @@
  * Returns true on success, or false if the update failed and no changes were
  * made to memory mappings.
  */
-static bool spci_region_group_identity_map(
+static bool ffa_region_group_identity_map(
 	struct vm_locked vm_locked,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, int mode, struct mpool *ppool, bool commit)
 {
 	/* Iterate over the memory region constituents. */
@@ -633,8 +633,8 @@
  * Clears a region of physical memory by overwriting it with zeros. The data is
  * flushed from the cache so the memory has been cleared across the system.
  */
-static bool spci_clear_memory_constituents(
-	struct spci_memory_region_constituent *constituents,
+static bool ffa_clear_memory_constituents(
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, struct mpool *page_pool)
 {
 	struct mpool local_page_pool;
@@ -685,33 +685,33 @@
  *
  * Returns:
  *  In case of error, one of the following values is returned:
- *   1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
+ *   1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
  *     erroneous;
- *   2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
+ *   2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
  *     the request.
- *   3) SPCI_DENIED - The sender doesn't have sufficient access to send the
+ *   3) FFA_DENIED - The sender doesn't have sufficient access to send the
  *     memory with the given permissions.
- *  Success is indicated by SPCI_SUCCESS.
+ *  Success is indicated by FFA_SUCCESS.
  */
-static struct spci_value spci_send_memory(
+static struct ffa_value ffa_send_memory(
 	struct vm_locked from_locked,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t share_func,
-	spci_memory_access_permissions_t permissions, struct mpool *page_pool,
+	ffa_memory_access_permissions_t permissions, struct mpool *page_pool,
 	bool clear)
 {
 	struct vm *from = from_locked.vm;
 	uint32_t orig_from_mode;
 	uint32_t from_mode;
 	struct mpool local_page_pool;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	/*
 	 * Make sure constituents are properly aligned to a 64-bit boundary. If
 	 * not we would get alignment faults trying to read (64-bit) values.
 	 */
 	if (!is_aligned(constituents, 8)) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -719,10 +719,10 @@
 	 * all constituents of a memory region being shared are at the same
 	 * state.
 	 */
-	ret = spci_send_check_transition(from_locked, share_func, permissions,
-					 &orig_from_mode, constituents,
-					 constituent_count, &from_mode);
-	if (ret.func != SPCI_SUCCESS_32) {
+	ret = ffa_send_check_transition(from_locked, share_func, permissions,
+					&orig_from_mode, constituents,
+					constituent_count, &from_mode);
+	if (ret.func != FFA_SUCCESS_32) {
 		return ret;
 	}
 
@@ -738,11 +738,11 @@
 	 * without committing, to make sure the entire operation will succeed
 	 * without exhausting the page pool.
 	 */
-	if (!spci_region_group_identity_map(from_locked, constituents,
-					    constituent_count, from_mode,
-					    page_pool, false)) {
+	if (!ffa_region_group_identity_map(from_locked, constituents,
+					   constituent_count, from_mode,
+					   page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
@@ -752,12 +752,12 @@
 	 * case that a whole block is being unmapped that was previously
 	 * partially mapped.
 	 */
-	CHECK(spci_region_group_identity_map(from_locked, constituents,
-					     constituent_count, from_mode,
-					     &local_page_pool, true));
+	CHECK(ffa_region_group_identity_map(from_locked, constituents,
+					    constituent_count, from_mode,
+					    &local_page_pool, true));
 
 	/* Clear the memory so no VM or device can see the previous contents. */
-	if (clear && !spci_clear_memory_constituents(
+	if (clear && !ffa_clear_memory_constituents(
 			     constituents, constituent_count, page_pool)) {
 		/*
 		 * On failure, roll back by returning memory to the sender. This
@@ -765,15 +765,15 @@
 		 * `local_page_pool` by the call above, but will never allocate
 		 * more pages than that so can never fail.
 		 */
-		CHECK(spci_region_group_identity_map(
+		CHECK(ffa_region_group_identity_map(
 			from_locked, constituents, constituent_count,
 			orig_from_mode, &local_page_pool, true));
 
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
-	ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 out:
 	mpool_fini(&local_page_pool);
@@ -794,22 +794,22 @@
  *
  * Returns:
  *  In case of error, one of the following values is returned:
- *   1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
+ *   1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
  *     erroneous;
- *   2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
+ *   2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
  *     the request.
- *  Success is indicated by SPCI_SUCCESS.
+ *  Success is indicated by FFA_SUCCESS.
  */
-static struct spci_value spci_retrieve_memory(
+static struct ffa_value ffa_retrieve_memory(
 	struct vm_locked to_locked,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t memory_to_attributes,
 	uint32_t share_func, bool clear, struct mpool *page_pool)
 {
 	struct vm *to = to_locked.vm;
 	uint32_t to_mode;
 	struct mpool local_page_pool;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	/*
 	 * Make sure constituents are properly aligned to a 32-bit boundary. If
@@ -817,7 +817,7 @@
 	 */
 	if (!is_aligned(constituents, 4)) {
 		dlog_verbose("Constituents not aligned.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -825,10 +825,10 @@
 	 * that all constituents of the memory region being retrieved are at the
 	 * same state.
 	 */
-	ret = spci_retrieve_check_transition(to_locked, share_func,
-					     constituents, constituent_count,
-					     memory_to_attributes, &to_mode);
-	if (ret.func != SPCI_SUCCESS_32) {
+	ret = ffa_retrieve_check_transition(to_locked, share_func, constituents,
+					    constituent_count,
+					    memory_to_attributes, &to_mode);
+	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose("Invalid transition.\n");
 		return ret;
 	}
@@ -845,21 +845,21 @@
 	 * the recipient page tables without committing, to make sure the entire
 	 * operation will succeed without exhausting the page pool.
 	 */
-	if (!spci_region_group_identity_map(to_locked, constituents,
-					    constituent_count, to_mode,
-					    page_pool, false)) {
+	if (!ffa_region_group_identity_map(to_locked, constituents,
+					   constituent_count, to_mode,
+					   page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		dlog_verbose(
 			"Insufficient memory to update recipient page "
 			"table.\n");
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
 	/* Clear the memory so no VM or device can see the previous contents. */
-	if (clear && !spci_clear_memory_constituents(
+	if (clear && !ffa_clear_memory_constituents(
 			     constituents, constituent_count, page_pool)) {
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
@@ -868,11 +868,11 @@
 	 * won't allocate because the transaction was already prepared above, so
 	 * it doesn't need to use the `local_page_pool`.
 	 */
-	CHECK(spci_region_group_identity_map(to_locked, constituents,
-					     constituent_count, to_mode,
-					     page_pool, true));
+	CHECK(ffa_region_group_identity_map(to_locked, constituents,
+					    constituent_count, to_mode,
+					    page_pool, true));
 
-	ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 out:
 	mpool_fini(&local_page_pool);
@@ -896,23 +896,23 @@
  *
  * Returns:
  *  In case of error, one of the following values is returned:
- *   1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
+ *   1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
  *     erroneous;
- *   2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
+ *   2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
  *     the request.
- *  Success is indicated by SPCI_SUCCESS.
+ *  Success is indicated by FFA_SUCCESS.
  */
-static struct spci_value spci_tee_reclaim_memory(
-	struct vm_locked to_locked, spci_memory_handle_t handle,
-	struct spci_memory_region_constituent *constituents,
+static struct ffa_value ffa_tee_reclaim_memory(
+	struct vm_locked to_locked, ffa_memory_handle_t handle,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
 	struct mpool *page_pool)
 {
 	struct vm *to = to_locked.vm;
 	uint32_t to_mode;
 	struct mpool local_page_pool;
-	struct spci_value ret;
-	spci_memory_region_flags_t tee_flags;
+	struct ffa_value ret;
+	ffa_memory_region_flags_t tee_flags;
 
 	/*
 	 * Make sure constituents are properly aligned to a 32-bit boundary. If
@@ -920,7 +920,7 @@
 	 */
 	if (!is_aligned(constituents, 4)) {
 		dlog_verbose("Constituents not aligned.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
@@ -928,10 +928,10 @@
 	 * that all constituents of the memory region being retrieved are at the
 	 * same state.
 	 */
-	ret = spci_retrieve_check_transition(to_locked, SPCI_MEM_RECLAIM_32,
-					     constituents, constituent_count,
-					     memory_to_attributes, &to_mode);
-	if (ret.func != SPCI_SUCCESS_32) {
+	ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
+					    constituents, constituent_count,
+					    memory_to_attributes, &to_mode);
+	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose("Invalid transition.\n");
 		return ret;
 	}
@@ -948,14 +948,14 @@
 	 * the recipient page tables without committing, to make sure the entire
 	 * operation will succeed without exhausting the page pool.
 	 */
-	if (!spci_region_group_identity_map(to_locked, constituents,
-					    constituent_count, to_mode,
-					    page_pool, false)) {
+	if (!ffa_region_group_identity_map(to_locked, constituents,
+					   constituent_count, to_mode,
+					   page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		dlog_verbose(
 			"Insufficient memory to update recipient page "
 			"table.\n");
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
@@ -964,18 +964,17 @@
 	 */
 	tee_flags = 0;
 	if (clear) {
-		tee_flags |= SPCI_MEMORY_REGION_FLAG_CLEAR;
+		tee_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
 	}
-	ret = arch_tee_call(
-		(struct spci_value){.func = SPCI_MEM_RECLAIM_32,
-				    .arg1 = (uint32_t)handle,
-				    .arg2 = (uint32_t)(handle >> 32),
-				    .arg3 = tee_flags});
+	ret = arch_tee_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
+					       .arg1 = (uint32_t)handle,
+					       .arg2 = (uint32_t)(handle >> 32),
+					       .arg3 = tee_flags});
 
-	if (ret.func != SPCI_SUCCESS_32) {
+	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose(
 			"Got %#x (%d) from EL3 in response to "
-			"SPCI_MEM_RECLAIM_32, expected SPCI_SUCCESS_32.\n",
+			"FFA_MEM_RECLAIM_32, expected FFA_SUCCESS_32.\n",
 			ret.func, ret.arg2);
 		goto out;
 	}
@@ -986,11 +985,11 @@
 	 * transaction was already prepared above, so it doesn't need to use the
 	 * `local_page_pool`.
 	 */
-	CHECK(spci_region_group_identity_map(to_locked, constituents,
-					     constituent_count, to_mode,
-					     page_pool, true));
+	CHECK(ffa_region_group_identity_map(to_locked, constituents,
+					    constituent_count, to_mode,
+					    page_pool, true));
 
-	ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 out:
 	mpool_fini(&local_page_pool);
@@ -1004,20 +1003,20 @@
 	return ret;
 }
 
-static struct spci_value spci_relinquish_memory(
+static struct ffa_value ffa_relinquish_memory(
 	struct vm_locked from_locked,
-	struct spci_memory_region_constituent *constituents,
+	struct ffa_memory_region_constituent *constituents,
 	uint32_t constituent_count, struct mpool *page_pool, bool clear)
 {
 	uint32_t orig_from_mode;
 	uint32_t from_mode;
 	struct mpool local_page_pool;
-	struct spci_value ret;
+	struct ffa_value ret;
 
-	ret = spci_relinquish_check_transition(from_locked, &orig_from_mode,
-					       constituents, constituent_count,
-					       &from_mode);
-	if (ret.func != SPCI_SUCCESS_32) {
+	ret = ffa_relinquish_check_transition(from_locked, &orig_from_mode,
+					      constituents, constituent_count,
+					      &from_mode);
+	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose("Invalid transition.\n");
 		return ret;
 	}
@@ -1034,11 +1033,11 @@
 	 * without committing, to make sure the entire operation will succeed
 	 * without exhausting the page pool.
 	 */
-	if (!spci_region_group_identity_map(from_locked, constituents,
-					    constituent_count, from_mode,
-					    page_pool, false)) {
+	if (!ffa_region_group_identity_map(from_locked, constituents,
+					   constituent_count, from_mode,
+					   page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
@@ -1048,12 +1047,12 @@
 	 * case that a whole block is being unmapped that was previously
 	 * partially mapped.
 	 */
-	CHECK(spci_region_group_identity_map(from_locked, constituents,
-					     constituent_count, from_mode,
-					     &local_page_pool, true));
+	CHECK(ffa_region_group_identity_map(from_locked, constituents,
+					    constituent_count, from_mode,
+					    &local_page_pool, true));
 
 	/* Clear the memory so no VM or device can see the previous contents. */
-	if (clear && !spci_clear_memory_constituents(
+	if (clear && !ffa_clear_memory_constituents(
 			     constituents, constituent_count, page_pool)) {
 		/*
 		 * On failure, roll back by returning memory to the sender. This
@@ -1061,15 +1060,15 @@
 		 * `local_page_pool` by the call above, but will never allocate
 		 * more pages than that so can never fail.
 		 */
-		CHECK(spci_region_group_identity_map(
+		CHECK(ffa_region_group_identity_map(
 			from_locked, constituents, constituent_count,
 			orig_from_mode, &local_page_pool, true));
 
-		ret = spci_error(SPCI_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
 
-	ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 out:
 	mpool_fini(&local_page_pool);
@@ -1087,20 +1086,20 @@
  * Check that the given `memory_region` represents a valid memory send request
  * of the given `share_func` type, return the clear flag and permissions via the
  * respective output parameters, and update the permissions if necessary.
- * Returns SPCI_SUCCESS if the request was valid, or the relevant SPCI_ERROR if
+ * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
  * not.
  */
-static struct spci_value spci_memory_send_validate(
+static struct ffa_value ffa_memory_send_validate(
 	struct vm *to, struct vm_locked from_locked,
-	struct spci_memory_region *memory_region, uint32_t memory_share_size,
+	struct ffa_memory_region *memory_region, uint32_t memory_share_size,
 	uint32_t share_func, bool *clear,
-	spci_memory_access_permissions_t *permissions)
+	ffa_memory_access_permissions_t *permissions)
 {
-	struct spci_composite_memory_region *composite;
+	struct ffa_composite_memory_region *composite;
 	uint32_t receivers_size;
 	uint32_t constituents_size;
-	enum spci_data_access data_access;
-	enum spci_instruction_access instruction_access;
+	enum ffa_data_access data_access;
+	enum ffa_instruction_access instruction_access;
 
 	CHECK(clear != NULL);
 	CHECK(permissions != NULL);
@@ -1108,97 +1107,97 @@
 	/* The sender must match the message sender. */
 	if (memory_region->sender != from_locked.vm->id) {
 		dlog_verbose("Invalid sender %d.\n", memory_region->sender);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* We only support a single recipient. */
 	if (memory_region->receiver_count != 1) {
 		dlog_verbose("Multiple recipients not supported.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/*
 	 * Ensure that the composite header is within the memory bounds and
 	 * doesn't overlap the first part of the message.
 	 */
-	receivers_size = sizeof(struct spci_memory_access) *
+	receivers_size = sizeof(struct ffa_memory_access) *
 			 memory_region->receiver_count;
 	if (memory_region->receivers[0].composite_memory_region_offset <
-		    sizeof(struct spci_memory_region) + receivers_size ||
+		    sizeof(struct ffa_memory_region) + receivers_size ||
 	    memory_region->receivers[0].composite_memory_region_offset +
-			    sizeof(struct spci_composite_memory_region) >=
+			    sizeof(struct ffa_composite_memory_region) >=
 		    memory_share_size) {
 		dlog_verbose(
 			"Invalid composite memory region descriptor offset.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	composite = spci_memory_region_get_composite(memory_region, 0);
+	composite = ffa_memory_region_get_composite(memory_region, 0);
 
 	/*
 	 * Ensure the number of constituents are within the memory
 	 * bounds.
 	 */
-	constituents_size = sizeof(struct spci_memory_region_constituent) *
+	constituents_size = sizeof(struct ffa_memory_region_constituent) *
 			    composite->constituent_count;
 	if (memory_share_size !=
 	    memory_region->receivers[0].composite_memory_region_offset +
-		    sizeof(struct spci_composite_memory_region) +
+		    sizeof(struct ffa_composite_memory_region) +
 		    constituents_size) {
 		dlog_verbose("Invalid size %d or constituent offset %d.\n",
 			     memory_share_size,
 			     memory_region->receivers[0]
 				     .composite_memory_region_offset);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* The recipient must match the message recipient. */
 	if (memory_region->receivers[0].receiver_permissions.receiver !=
 	    to->id) {
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	*clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
+	*clear = memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
 	/*
 	 * Clear is not allowed for memory sharing, as the sender still has
 	 * access to the memory.
 	 */
-	if (*clear && share_func == SPCI_MEM_SHARE_32) {
+	if (*clear && share_func == FFA_MEM_SHARE_32) {
 		dlog_verbose("Memory can't be cleared while being shared.\n");
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* No other flags are allowed/supported here. */
-	if (memory_region->flags & ~SPCI_MEMORY_REGION_FLAG_CLEAR) {
+	if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
 		dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* Check that the permissions are valid. */
 	*permissions =
 		memory_region->receivers[0].receiver_permissions.permissions;
-	data_access = spci_get_data_access_attr(*permissions);
-	instruction_access = spci_get_instruction_access_attr(*permissions);
-	if (data_access == SPCI_DATA_ACCESS_RESERVED ||
-	    instruction_access == SPCI_INSTRUCTION_ACCESS_RESERVED) {
+	data_access = ffa_get_data_access_attr(*permissions);
+	instruction_access = ffa_get_instruction_access_attr(*permissions);
+	if (data_access == FFA_DATA_ACCESS_RESERVED ||
+	    instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
 		dlog_verbose("Reserved value for receiver permissions %#x.\n",
 			     *permissions);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
-	if (instruction_access != SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
+	if (instruction_access != FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
 		dlog_verbose(
 			"Invalid instruction access permissions %#x for "
 			"sending memory.\n",
 			*permissions);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
-	if (share_func == SPCI_MEM_SHARE_32) {
-		if (data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+	if (share_func == FFA_MEM_SHARE_32) {
+		if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
 			dlog_verbose(
 				"Invalid data access permissions %#x for "
 				"sharing memory.\n",
 				*permissions);
-			return spci_error(SPCI_INVALID_PARAMETERS);
+			return ffa_error(FFA_INVALID_PARAMETERS);
 		}
 		/*
 		 * According to section 6.11.3 of the FF-A spec NX is required
@@ -1206,29 +1205,29 @@
 		 * sender) so set it in the copy that we store, ready to be
 		 * returned to the retriever.
 		 */
-		spci_set_instruction_access_attr(permissions,
-						 SPCI_INSTRUCTION_ACCESS_NX);
+		ffa_set_instruction_access_attr(permissions,
+						FFA_INSTRUCTION_ACCESS_NX);
 		memory_region->receivers[0].receiver_permissions.permissions =
 			*permissions;
 	}
-	if (share_func == SPCI_MEM_LEND_32 &&
-	    data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+	if (share_func == FFA_MEM_LEND_32 &&
+	    data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
 		dlog_verbose(
 			"Invalid data access permissions %#x for lending "
 			"memory.\n",
 			*permissions);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
-	if (share_func == SPCI_MEM_DONATE_32 &&
-	    data_access != SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+	if (share_func == FFA_MEM_DONATE_32 &&
+	    data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
 		dlog_verbose(
 			"Invalid data access permissions %#x for donating "
 			"memory.\n",
 			*permissions);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32};
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 /**
@@ -1245,43 +1244,42 @@
  * This function takes ownership of the `memory_region` passed in; it must not
  * be freed by the caller.
  */
-struct spci_value spci_memory_send(struct vm *to, struct vm_locked from_locked,
-				   struct spci_memory_region *memory_region,
-				   uint32_t memory_share_size,
-				   uint32_t share_func, struct mpool *page_pool)
+struct ffa_value ffa_memory_send(struct vm *to, struct vm_locked from_locked,
+				 struct ffa_memory_region *memory_region,
+				 uint32_t memory_share_size,
+				 uint32_t share_func, struct mpool *page_pool)
 {
-	struct spci_composite_memory_region *composite;
+	struct ffa_composite_memory_region *composite;
 	bool clear;
-	spci_memory_access_permissions_t permissions;
-	struct spci_value ret;
-	spci_memory_handle_t handle;
+	ffa_memory_access_permissions_t permissions;
+	struct ffa_value ret;
+	ffa_memory_handle_t handle;
 
 	/*
 	 * If there is an error validating the `memory_region` then we need to
 	 * free it because we own it but we won't be storing it in a share state
 	 * after all.
 	 */
-	ret = spci_memory_send_validate(to, from_locked, memory_region,
-					memory_share_size, share_func, &clear,
-					&permissions);
-	if (ret.func != SPCI_SUCCESS_32) {
+	ret = ffa_memory_send_validate(to, from_locked, memory_region,
+				       memory_share_size, share_func, &clear,
+				       &permissions);
+	if (ret.func != FFA_SUCCESS_32) {
 		mpool_free(page_pool, memory_region);
 		return ret;
 	}
 
 	/* Set flag for share function, ready to be retrieved later. */
 	switch (share_func) {
-	case SPCI_MEM_SHARE_32:
+	case FFA_MEM_SHARE_32:
 		memory_region->flags |=
-			SPCI_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
+			FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
 		break;
-	case SPCI_MEM_LEND_32:
-		memory_region->flags |=
-			SPCI_MEMORY_REGION_TRANSACTION_TYPE_LEND;
+	case FFA_MEM_LEND_32:
+		memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
 		break;
-	case SPCI_MEM_DONATE_32:
+	case FFA_MEM_DONATE_32:
 		memory_region->flags |=
-			SPCI_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
+			FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
 		break;
 	}
 
@@ -1295,17 +1293,17 @@
 	    !allocate_share_state(share_func, memory_region, &handle)) {
 		dlog_verbose("Failed to allocate share state.\n");
 		mpool_free(page_pool, memory_region);
-		return spci_error(SPCI_NO_MEMORY);
+		return ffa_error(FFA_NO_MEMORY);
 	}
 
 	dump_share_states();
 
 	/* Check that state is valid in sender page table and update. */
-	composite = spci_memory_region_get_composite(memory_region, 0);
-	ret = spci_send_memory(from_locked, composite->constituents,
-			       composite->constituent_count, share_func,
-			       permissions, page_pool, clear);
-	if (ret.func != SPCI_SUCCESS_32) {
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+	ret = ffa_send_memory(from_locked, composite->constituents,
+			      composite->constituent_count, share_func,
+			      permissions, page_pool, clear);
+	if (ret.func != FFA_SUCCESS_32) {
 		if (to->id != HF_TEE_VM_ID) {
 			/* Free share state. */
 			bool freed = share_state_free_handle(handle, page_pool);
@@ -1318,63 +1316,64 @@
 
 	if (to->id == HF_TEE_VM_ID) {
 		/* No share state allocated here so no handle to return. */
-		return (struct spci_value){.func = SPCI_SUCCESS_32};
+		return (struct ffa_value){.func = FFA_SUCCESS_32};
 	}
 
-	return (struct spci_value){.func = SPCI_SUCCESS_32, .arg2 = handle};
+	return (struct ffa_value){.func = FFA_SUCCESS_32, .arg2 = handle};
 }
 
-struct spci_value spci_memory_retrieve(
-	struct vm_locked to_locked, struct spci_memory_region *retrieve_request,
-	uint32_t retrieve_request_size, struct mpool *page_pool)
+struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
+				     struct ffa_memory_region *retrieve_request,
+				     uint32_t retrieve_request_size,
+				     struct mpool *page_pool)
 {
 	uint32_t expected_retrieve_request_size =
-		sizeof(struct spci_memory_region) +
+		sizeof(struct ffa_memory_region) +
 		retrieve_request->receiver_count *
-			sizeof(struct spci_memory_access);
-	spci_memory_handle_t handle = retrieve_request->handle;
-	spci_memory_region_flags_t transaction_type =
+			sizeof(struct ffa_memory_access);
+	ffa_memory_handle_t handle = retrieve_request->handle;
+	ffa_memory_region_flags_t transaction_type =
 		retrieve_request->flags &
-		SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK;
-	struct spci_memory_region *memory_region;
-	spci_memory_access_permissions_t sent_permissions;
-	enum spci_data_access sent_data_access;
-	enum spci_instruction_access sent_instruction_access;
-	spci_memory_access_permissions_t requested_permissions;
-	enum spci_data_access requested_data_access;
-	enum spci_instruction_access requested_instruction_access;
-	spci_memory_access_permissions_t permissions;
+		FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
+	struct ffa_memory_region *memory_region;
+	ffa_memory_access_permissions_t sent_permissions;
+	enum ffa_data_access sent_data_access;
+	enum ffa_instruction_access sent_instruction_access;
+	ffa_memory_access_permissions_t requested_permissions;
+	enum ffa_data_access requested_data_access;
+	enum ffa_instruction_access requested_instruction_access;
+	ffa_memory_access_permissions_t permissions;
 	uint32_t memory_to_attributes;
-	struct spci_composite_memory_region *composite;
+	struct ffa_composite_memory_region *composite;
 	struct share_states_locked share_states;
-	struct spci_memory_share_state *share_state;
-	struct spci_value ret;
+	struct ffa_memory_share_state *share_state;
+	struct ffa_value ret;
 	uint32_t response_size;
 
 	dump_share_states();
 
 	if (retrieve_request_size != expected_retrieve_request_size) {
 		dlog_verbose(
-			"Invalid length for SPCI_MEM_RETRIEVE_REQ, expected %d "
+			"Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
 			"but was %d.\n",
 			expected_retrieve_request_size, retrieve_request_size);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	if (retrieve_request->receiver_count != 1) {
 		dlog_verbose(
 			"Multi-way memory sharing not supported (got %d "
-			"receivers descriptors on SPCI_MEM_RETRIEVE_REQ, "
+			"receivers descriptors on FFA_MEM_RETRIEVE_REQ, "
 			"expected 1).\n",
 			retrieve_request->receiver_count);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	share_states = share_states_lock();
 	if (!get_share_state(share_states, handle, &share_state)) {
-		dlog_verbose("Invalid handle %#x for SPCI_MEM_RETRIEVE_REQ.\n",
+		dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
 			     handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1386,36 +1385,36 @@
 	 * if it has been specified.
 	 */
 	if (transaction_type !=
-		    SPCI_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
+		    FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
 	    transaction_type != (memory_region->flags &
-				 SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
+				 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
 		dlog_verbose(
 			"Incorrect transaction type %#x for "
-			"SPCI_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
+			"FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
 			transaction_type,
 			memory_region->flags &
-				SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK,
+				FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
 			handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
 	if (retrieve_request->sender != memory_region->sender) {
 		dlog_verbose(
-			"Incorrect sender ID %d for SPCI_MEM_RETRIEVE_REQ, "
+			"Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, "
 			"expected %d for handle %#x.\n",
 			retrieve_request->sender, memory_region->sender,
 			handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
 	if (retrieve_request->tag != memory_region->tag) {
 		dlog_verbose(
-			"Incorrect tag %d for SPCI_MEM_RETRIEVE_REQ, expected "
+			"Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
 			"%d for handle %#x.\n",
 			retrieve_request->tag, memory_region->tag, handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1423,10 +1422,10 @@
 	    to_locked.vm->id) {
 		dlog_verbose(
 			"Retrieve request receiver VM ID %d didn't match "
-			"caller of SPCI_MEM_RETRIEVE_REQ.\n",
+			"caller of FFA_MEM_RETRIEVE_REQ.\n",
 			retrieve_request->receivers[0]
 				.receiver_permissions.receiver);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1434,19 +1433,19 @@
 	    to_locked.vm->id) {
 		dlog_verbose(
 			"Incorrect receiver VM ID %d for "
-			"SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n",
+			"FFA_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n",
 			to_locked.vm->id,
 			memory_region->receivers[0]
 				.receiver_permissions.receiver,
 			handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
 	if (share_state->retrieved[0]) {
 		dlog_verbose("Memory with handle %#x already retrieved.\n",
 			     handle);
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto out;
 	}
 
@@ -1458,7 +1457,7 @@
 			"%d).\n",
 			retrieve_request->receivers[0]
 				.composite_memory_region_offset);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1469,60 +1468,58 @@
 	/* TODO: Check attributes too. */
 	sent_permissions =
 		memory_region->receivers[0].receiver_permissions.permissions;
-	sent_data_access = spci_get_data_access_attr(sent_permissions);
+	sent_data_access = ffa_get_data_access_attr(sent_permissions);
 	sent_instruction_access =
-		spci_get_instruction_access_attr(sent_permissions);
+		ffa_get_instruction_access_attr(sent_permissions);
 	requested_permissions =
 		retrieve_request->receivers[0].receiver_permissions.permissions;
-	requested_data_access =
-		spci_get_data_access_attr(requested_permissions);
+	requested_data_access = ffa_get_data_access_attr(requested_permissions);
 	requested_instruction_access =
-		spci_get_instruction_access_attr(requested_permissions);
+		ffa_get_instruction_access_attr(requested_permissions);
 	permissions = 0;
 	switch (sent_data_access) {
-	case SPCI_DATA_ACCESS_NOT_SPECIFIED:
-	case SPCI_DATA_ACCESS_RW:
-		if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
-		    requested_data_access == SPCI_DATA_ACCESS_RW) {
-			spci_set_data_access_attr(&permissions,
-						  SPCI_DATA_ACCESS_RW);
+	case FFA_DATA_ACCESS_NOT_SPECIFIED:
+	case FFA_DATA_ACCESS_RW:
+		if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
+		    requested_data_access == FFA_DATA_ACCESS_RW) {
+			ffa_set_data_access_attr(&permissions,
+						 FFA_DATA_ACCESS_RW);
 			break;
 		}
 		/* Intentional fall-through. */
-	case SPCI_DATA_ACCESS_RO:
-		if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
-		    requested_data_access == SPCI_DATA_ACCESS_RO) {
-			spci_set_data_access_attr(&permissions,
-						  SPCI_DATA_ACCESS_RO);
+	case FFA_DATA_ACCESS_RO:
+		if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
+		    requested_data_access == FFA_DATA_ACCESS_RO) {
+			ffa_set_data_access_attr(&permissions,
+						 FFA_DATA_ACCESS_RO);
 			break;
 		}
 		dlog_verbose(
 			"Invalid data access requested; sender specified "
 			"permissions %#x but receiver requested %#x.\n",
 			sent_permissions, requested_permissions);
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto out;
-	case SPCI_DATA_ACCESS_RESERVED:
-		panic("Got unexpected SPCI_DATA_ACCESS_RESERVED. Should be "
+	case FFA_DATA_ACCESS_RESERVED:
+		panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
 		      "checked before this point.");
 	}
 	switch (sent_instruction_access) {
-	case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
-	case SPCI_INSTRUCTION_ACCESS_X:
+	case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
+	case FFA_INSTRUCTION_ACCESS_X:
 		if (requested_instruction_access ==
-			    SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
-		    requested_instruction_access == SPCI_INSTRUCTION_ACCESS_X) {
-			spci_set_instruction_access_attr(
-				&permissions, SPCI_INSTRUCTION_ACCESS_X);
+			    FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
+		    requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
+			ffa_set_instruction_access_attr(
+				&permissions, FFA_INSTRUCTION_ACCESS_X);
 			break;
 		}
-	case SPCI_INSTRUCTION_ACCESS_NX:
+	case FFA_INSTRUCTION_ACCESS_NX:
 		if (requested_instruction_access ==
-			    SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
-		    requested_instruction_access ==
-			    SPCI_INSTRUCTION_ACCESS_NX) {
-			spci_set_instruction_access_attr(
-				&permissions, SPCI_INSTRUCTION_ACCESS_NX);
+			    FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
+		    requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
+			ffa_set_instruction_access_attr(
+				&permissions, FFA_INSTRUCTION_ACCESS_NX);
 			break;
 		}
 		dlog_verbose(
@@ -1530,20 +1527,20 @@
 			"specified "
 			"permissions %#x but receiver requested %#x.\n",
 			sent_permissions, requested_permissions);
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto out;
-	case SPCI_INSTRUCTION_ACCESS_RESERVED:
-		panic("Got unexpected SPCI_INSTRUCTION_ACCESS_RESERVED. Should "
+	case FFA_INSTRUCTION_ACCESS_RESERVED:
+		panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
 		      "be checked before this point.");
 	}
-	memory_to_attributes = spci_memory_permissions_to_mode(permissions);
+	memory_to_attributes = ffa_memory_permissions_to_mode(permissions);
 
-	composite = spci_memory_region_get_composite(memory_region, 0);
-	ret = spci_retrieve_memory(to_locked, composite->constituents,
-				   composite->constituent_count,
-				   memory_to_attributes,
-				   share_state->share_func, false, page_pool);
-	if (ret.func != SPCI_SUCCESS_32) {
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+	ret = ffa_retrieve_memory(to_locked, composite->constituents,
+				  composite->constituent_count,
+				  memory_to_attributes, share_state->share_func,
+				  false, page_pool);
+	if (ret.func != FFA_SUCCESS_32) {
 		goto out;
 	}
 
@@ -1552,17 +1549,17 @@
 	 * must be done before the share_state is (possibly) freed.
 	 */
 	/* TODO: combine attributes from sender and request. */
-	response_size = spci_retrieved_memory_region_init(
+	response_size = ffa_retrieved_memory_region_init(
 		to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
 		memory_region->sender, memory_region->attributes,
 		memory_region->flags, handle, to_locked.vm->id, permissions,
 		composite->constituents, composite->constituent_count);
 	to_locked.vm->mailbox.recv_size = response_size;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
-	to_locked.vm->mailbox.recv_func = SPCI_MEM_RETRIEVE_RESP_32;
+	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
 	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
 
-	if (share_state->share_func == SPCI_MEM_DONATE_32) {
+	if (share_state->share_func == FFA_MEM_DONATE_32) {
 		/*
 		 * Memory that has been donated can't be relinquished, so no
 		 * need to keep the share state around.
@@ -1573,9 +1570,9 @@
 		share_state->retrieved[0] = true;
 	}
 
-	ret = (struct spci_value){.func = SPCI_MEM_RETRIEVE_RESP_32,
-				  .arg1 = response_size,
-				  .arg2 = response_size};
+	ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
+				 .arg1 = response_size,
+				 .arg2 = response_size};
 
 out:
 	share_states_unlock(&share_states);
@@ -1583,24 +1580,24 @@
 	return ret;
 }
 
-struct spci_value spci_memory_relinquish(
+struct ffa_value ffa_memory_relinquish(
 	struct vm_locked from_locked,
-	struct spci_mem_relinquish *relinquish_request, struct mpool *page_pool)
+	struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
 {
-	spci_memory_handle_t handle = relinquish_request->handle;
+	ffa_memory_handle_t handle = relinquish_request->handle;
 	struct share_states_locked share_states;
-	struct spci_memory_share_state *share_state;
-	struct spci_memory_region *memory_region;
+	struct ffa_memory_share_state *share_state;
+	struct ffa_memory_region *memory_region;
 	bool clear;
-	struct spci_composite_memory_region *composite;
-	struct spci_value ret;
+	struct ffa_composite_memory_region *composite;
+	struct ffa_value ret;
 
 	if (relinquish_request->endpoint_count != 1) {
 		dlog_verbose(
 			"Stream endpoints not supported (got %d endpoints on "
-			"SPCI_MEM_RELINQUISH, expected 1).\n",
+			"FFA_MEM_RELINQUISH, expected 1).\n",
 			relinquish_request->endpoint_count);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	if (relinquish_request->endpoints[0] != from_locked.vm->id) {
@@ -1608,16 +1605,16 @@
 			"VM ID %d in relinquish message doesn't match calling "
 			"VM ID %d.\n",
 			relinquish_request->endpoints[0], from_locked.vm->id);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	dump_share_states();
 
 	share_states = share_states_lock();
 	if (!get_share_state(share_states, handle, &share_state)) {
-		dlog_verbose("Invalid handle %#x for SPCI_MEM_RELINQUISH.\n",
+		dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
 			     handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1632,7 +1629,7 @@
 			from_locked.vm->id, handle,
 			memory_region->receivers[0]
 				.receiver_permissions.receiver);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1641,28 +1638,28 @@
 			"Memory with handle %#x not yet retrieved, can't "
 			"relinquish.\n",
 			handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
-	clear = relinquish_request->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
+	clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
 
 	/*
 	 * Clear is not allowed for memory that was shared, as the original
 	 * sender still has access to the memory.
 	 */
-	if (clear && share_state->share_func == SPCI_MEM_SHARE_32) {
+	if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
 		dlog_verbose("Memory which was shared can't be cleared.\n");
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
-	composite = spci_memory_region_get_composite(memory_region, 0);
-	ret = spci_relinquish_memory(from_locked, composite->constituents,
-				     composite->constituent_count, page_pool,
-				     clear);
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+	ret = ffa_relinquish_memory(from_locked, composite->constituents,
+				    composite->constituent_count, page_pool,
+				    clear);
 
-	if (ret.func == SPCI_SUCCESS_32) {
+	if (ret.func == FFA_SUCCESS_32) {
 		/*
 		 * Mark memory handle as not retrieved, so it can be reclaimed
 		 * (or retrieved again).
@@ -1681,24 +1678,24 @@
  * updates the page table of the reclaiming VM, and frees the internal state
  * associated with the handle.
  */
-struct spci_value spci_memory_reclaim(struct vm_locked to_locked,
-				      spci_memory_handle_t handle, bool clear,
-				      struct mpool *page_pool)
+struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
+				    ffa_memory_handle_t handle, bool clear,
+				    struct mpool *page_pool)
 {
 	struct share_states_locked share_states;
-	struct spci_memory_share_state *share_state;
-	struct spci_memory_region *memory_region;
-	struct spci_composite_memory_region *composite;
+	struct ffa_memory_share_state *share_state;
+	struct ffa_memory_region *memory_region;
+	struct ffa_composite_memory_region *composite;
 	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
-	struct spci_value ret;
+	struct ffa_value ret;
 
 	dump_share_states();
 
 	share_states = share_states_lock();
 	if (!get_share_state(share_states, handle, &share_state)) {
-		dlog_verbose("Invalid handle %#x for SPCI_MEM_RECLAIM.\n",
+		dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
 			     handle);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1710,7 +1707,7 @@
 			"VM %d attempted to reclaim memory handle %#x "
 			"originally sent by VM %d.\n",
 			to_locked.vm->id, handle, memory_region->sender);
-		ret = spci_error(SPCI_INVALID_PARAMETERS);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
@@ -1719,17 +1716,17 @@
 			"Tried to reclaim memory handle %#x that has not been "
 			"relinquished.\n",
 			handle);
-		ret = spci_error(SPCI_DENIED);
+		ret = ffa_error(FFA_DENIED);
 		goto out;
 	}
 
-	composite = spci_memory_region_get_composite(memory_region, 0);
-	ret = spci_retrieve_memory(to_locked, composite->constituents,
-				   composite->constituent_count,
-				   memory_to_attributes, SPCI_MEM_RECLAIM_32,
-				   clear, page_pool);
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+	ret = ffa_retrieve_memory(to_locked, composite->constituents,
+				  composite->constituent_count,
+				  memory_to_attributes, FFA_MEM_RECLAIM_32,
+				  clear, page_pool);
 
-	if (ret.func == SPCI_SUCCESS_32) {
+	if (ret.func == FFA_SUCCESS_32) {
 		share_state_free(share_states, share_state, page_pool);
 		dlog_verbose("Freed share state after successful reclaim.\n");
 	}
@@ -1743,13 +1740,13 @@
  * Validates that the reclaim transition is allowed for the given memory region
  * and updates the page table of the reclaiming VM.
  */
-struct spci_value spci_memory_tee_reclaim(
-	struct vm_locked to_locked, spci_memory_handle_t handle,
-	struct spci_memory_region *memory_region, bool clear,
-	struct mpool *page_pool)
+struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
+					ffa_memory_handle_t handle,
+					struct ffa_memory_region *memory_region,
+					bool clear, struct mpool *page_pool)
 {
 	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
-	struct spci_composite_memory_region *composite;
+	struct ffa_composite_memory_region *composite;
 
 	if (memory_region->receiver_count != 1) {
 		/* Only one receiver supported by Hafnium for now. */
@@ -1757,7 +1754,7 @@
 			"Multiple recipients not supported (got %d, expected "
 			"1).\n",
 			memory_region->receiver_count);
-		return spci_error(SPCI_NOT_SUPPORTED);
+		return ffa_error(FFA_NOT_SUPPORTED);
 	}
 
 	if (memory_region->handle != handle) {
@@ -1765,7 +1762,7 @@
 			"Got memory region handle %#x from TEE but requested "
 			"handle %#x.\n",
 			memory_region->handle, handle);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	/* The original sender must match the caller. */
@@ -1774,17 +1771,17 @@
 			"VM %d attempted to reclaim memory handle %#x "
 			"originally sent by VM %d.\n",
 			to_locked.vm->id, handle, memory_region->sender);
-		return spci_error(SPCI_INVALID_PARAMETERS);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	composite = spci_memory_region_get_composite(memory_region, 0);
+	composite = ffa_memory_region_get_composite(memory_region, 0);
 
 	/*
 	 * Forward the request to the TEE and then map the memory back into the
 	 * caller's stage-2 page table.
 	 */
-	return spci_tee_reclaim_memory(to_locked, handle,
-				       composite->constituents,
-				       composite->constituent_count,
-				       memory_to_attributes, clear, page_pool);
+	return ffa_tee_reclaim_memory(to_locked, handle,
+				      composite->constituents,
+				      composite->constituent_count,
+				      memory_to_attributes, clear, page_pool);
 }
diff --git a/src/init.c b/src/init.c
index d224d49..5652897 100644
--- a/src/init.c
+++ b/src/init.c
@@ -160,7 +160,7 @@
 	/* Enable TLB invalidation for VM page table updates. */
 	mm_vm_enable_invalidation();
 
-	if (manifest.spci_tee_enabled) {
+	if (manifest.ffa_tee_enabled) {
 		/* Set up message buffers for TEE dispatcher. */
 		arch_tee_init();
 	}
diff --git a/src/load.c b/src/load.c
index 0040201..259726e 100644
--- a/src/load.c
+++ b/src/load.c
@@ -411,7 +411,7 @@
 
 	for (i = 0; i < manifest->vm_count; ++i) {
 		const struct manifest_vm *manifest_vm = &manifest->vm[i];
-		spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+		ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
 		uint64_t mem_size;
 		paddr_t secondary_mem_begin;
 		paddr_t secondary_mem_end;
diff --git a/src/manifest.c b/src/manifest.c
index 9b80106..460494c 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -41,7 +41,7 @@
 static_assert(HF_TEE_VM_ID > VM_ID_MAX,
 	      "TrustZone VM ID clashes with normal VM range.");
 
-static inline size_t count_digits(spci_vm_id_t vm_id)
+static inline size_t count_digits(ffa_vm_id_t vm_id)
 {
 	size_t digits = 0;
 
@@ -56,7 +56,7 @@
  * Generates a string with the two letters "vm" followed by an integer.
  * Assumes `buf` is of size VM_NAME_BUF_SIZE.
  */
-static void generate_vm_node_name(struct string *str, spci_vm_id_t vm_id)
+static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
 {
 	static const char *digits = "0123456789";
 	size_t vm_id_digits = count_digits(vm_id);
@@ -216,7 +216,7 @@
 
 static enum manifest_return_code parse_vm(const struct fdt_node *node,
 					  struct manifest_vm *vm,
-					  spci_vm_id_t vm_id)
+					  ffa_vm_id_t vm_id)
 {
 	struct uint32list_iter smcs;
 	size_t idx;
@@ -280,11 +280,11 @@
 		return MANIFEST_ERROR_NOT_COMPATIBLE;
 	}
 
-	TRY(read_bool(&hyp_node, "spci_tee", &manifest->spci_tee_enabled));
+	TRY(read_bool(&hyp_node, "ffa_tee", &manifest->ffa_tee_enabled));
 
 	/* Iterate over reserved VM IDs and check no such nodes exist. */
 	for (i = 0; i < HF_VM_ID_OFFSET; i++) {
-		spci_vm_id_t vm_id = (spci_vm_id_t)i;
+		ffa_vm_id_t vm_id = (ffa_vm_id_t)i;
 		struct fdt_node vm_node = hyp_node;
 
 		generate_vm_node_name(&vm_name, vm_id);
@@ -295,7 +295,7 @@
 
 	/* Iterate over VM nodes until we find one that does not exist. */
 	for (i = 0; i <= MAX_VMS; ++i) {
-		spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+		ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
 		struct fdt_node vm_node = hyp_node;
 
 		generate_vm_node_name(&vm_name, vm_id);
diff --git a/src/vcpu.c b/src/vcpu.c
index b77689d..c98f2ad 100644
--- a/src/vcpu.c
+++ b/src/vcpu.c
@@ -64,7 +64,7 @@
 	vcpu.vcpu->state = VCPU_STATE_READY;
 }
 
-spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
+ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
 {
 	size_t index = vcpu - vcpu->vm->vcpus;
 
diff --git a/src/vm.c b/src/vm.c
index 7237dfb..0f2b9c8 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -19,18 +19,18 @@
 #include "hf/api.h"
 #include "hf/check.h"
 #include "hf/cpu.h"
+#include "hf/ffa.h"
 #include "hf/layout.h"
 #include "hf/plat/iommu.h"
-#include "hf/spci.h"
 #include "hf/std.h"
 
 #include "vmapi/hf/call.h"
 
 static struct vm vms[MAX_VMS];
 static struct vm tee_vm;
-static spci_vm_count_t vm_count;
+static ffa_vm_count_t vm_count;
 
-struct vm *vm_init(spci_vm_id_t id, spci_vcpu_count_t vcpu_count,
+struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
 		   struct mpool *ppool)
 {
 	uint32_t i;
@@ -76,7 +76,7 @@
 	return vm;
 }
 
-bool vm_init_next(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
+bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
 		  struct vm **new_vm)
 {
 	if (vm_count >= MAX_VMS) {
@@ -93,12 +93,12 @@
 	return true;
 }
 
-spci_vm_count_t vm_get_count(void)
+ffa_vm_count_t vm_get_count(void)
 {
 	return vm_count;
 }
 
-struct vm *vm_find(spci_vm_id_t id)
+struct vm *vm_find(ffa_vm_id_t id)
 {
 	uint16_t index;
 
@@ -167,7 +167,7 @@
  * Get the vCPU with the given index from the given VM.
  * This assumes the index is valid, i.e. less than vm->vcpu_count.
  */
-struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index)
+struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
 {
 	CHECK(vcpu_index < vm->vcpu_count);
 	return &vm->vcpus[vcpu_index];
@@ -176,7 +176,7 @@
 /**
  * Gets `vm`'s wait entry for waiting on the `for_vm`.
  */
-struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm)
+struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
 {
 	uint16_t index;
 
@@ -190,7 +190,7 @@
 /**
  * Gets the ID of the VM which the given VM's wait entry is for.
  */
-spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
+ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
 {
 	uint16_t index = entry - vm->wait_entries;