fix: rxtx unmap forwarding and handling

For calls to FFA_RXTX_UNMAP at the non-secure physical instance
the FF-A v1.1 prescribes that x1 contains the ID of the
NS-endpoint, in bits [31:16].

In forwarding the call, the hypervisor was setting the arguments
correctly, but the SPMC would retrieve without shifting the
register with arguments. This patch fixes that.

Also, this patch makes it such the Hypervisor only
forwards the call to the SPMC if the FF-A version of
the caller is FF-A v1.1.

Change-Id: I7565cc52d387c550295114ab79b2e5bb76b4281e
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 224f61a..a530af1 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -126,7 +126,7 @@
 
 void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
 
-void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id);
+void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked);
 
 /**
  * Checks whether managed exit is supported by given SP.
diff --git a/inc/vmapi/hf/ffa.h b/inc/vmapi/hf/ffa.h
index 484dd9e..5fef6c7 100644
--- a/inc/vmapi/hf/ffa.h
+++ b/inc/vmapi/hf/ffa.h
@@ -165,6 +165,7 @@
 
 #define FFA_RXTX_HEADER_SIZE sizeof(struct ffa_partition_rxtx_header)
 #define FFA_RXTX_SENDER_SHIFT (0x10U)
+#define FFA_RXTX_ALLOCATOR_SHIFT 16
 
 static inline void ffa_rxtx_header_init(
 	ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t size,
diff --git a/src/api.c b/src/api.c
index cb6f779..b3eea83 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1467,7 +1467,7 @@
 	plat_ffa_vm_destroy(vm_locked);
 
 	/* Forward buffer unmapping to SPMC if coming from a VM. */
-	plat_ffa_rxtx_unmap_forward(owner_vm_id);
+	plat_ffa_rxtx_unmap_forward(vm_locked);
 
 	mm_unlock_stage1(&mm_stage1_locked);
 
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 3422ff7..a6fe272 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -507,7 +507,7 @@
 					 current);
 		return true;
 	case FFA_RXTX_UNMAP_32:
-		*args = api_ffa_rxtx_unmap(args->arg1, current);
+		*args = api_ffa_rxtx_unmap(ffa_vm_id(*args), current);
 		return true;
 	case FFA_RX_ACQUIRE_32:
 		*args = api_ffa_rx_acquire(ffa_receiver(*args), current);
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 244da47..10fd888 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -114,9 +114,9 @@
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
-	(void)id;
+	(void)vm_locked;
 }
 
 bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index e8a9fe6..9c3c9dc 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -727,18 +727,28 @@
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
 	struct ffa_value ret;
 	uint64_t func;
+	ffa_vm_id_t id;
+
+	assert(vm_locked.vm != NULL);
+
+	id = vm_locked.vm->id;
 
 	if (!ffa_tee_enabled) {
 		return;
 	}
 
+	if (vm_locked.vm->ffa_version < MAKE_FFA_VERSION(1, 1)) {
+		return;
+	}
+
 	/* Hypervisor always forwards forward RXTX_UNMAP to SPMC. */
-	ret = arch_other_world_call((struct ffa_value){
-		.func = FFA_RXTX_UNMAP_32, .arg1 = id << 16});
+	ret = arch_other_world_call(
+		(struct ffa_value){.func = FFA_RXTX_UNMAP_32,
+				   .arg1 = id << FFA_RXTX_ALLOCATOR_SHIFT});
 	func = ret.func & ~SMCCC_CONVENTION_MASK;
 	if (ret.func == SMCCC_ERROR_UNKNOWN) {
 		panic("Unknown error forwarding RXTX_UNMAP.\n");
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 18bb84b..0159d01 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -749,9 +749,9 @@
 	(void)vm_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
-	(void)id;
+	(void)vm_locked;
 }
 
 bool plat_ffa_is_notification_get_valid(struct vcpu *current,
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 140b0e4..969622d 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -84,9 +84,9 @@
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
-	(void)id;
+	(void)vm_locked;
 }
 
 bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c
index d06ee3c..6ce646e 100644
--- a/test/vmapi/primary_with_secondaries/no_services.c
+++ b/test/vmapi/primary_with_secondaries/no_services.c
@@ -302,7 +302,8 @@
 
 	/* Set the `allocator_id`, which MBZ at virtual instances. */
 	ret = ffa_call(
-		(struct ffa_value){.func = FFA_RXTX_UNMAP_32, .arg1 = 1});
+		(struct ffa_value){.func = FFA_RXTX_UNMAP_32,
+				   .arg1 = 1ULL << FFA_RXTX_ALLOCATOR_SHIFT});
 	EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
 
 	EXPECT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);