test: endpoint can yield cycles under RTM_FFA_DIR_REQ
An endpoint can yield CPU cycles received from a VM or SP
through a direct request message.
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
Change-Id: I69c773b1397fb8ad3639b0b9f2a7196b5d1e9c0f
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index b029819..d5d9f34 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -120,12 +120,23 @@
}
/**
- * Hints that the vCPU is willing to yield its current use of the physical CPU.
- * This call always returns FFA_SUCCESS.
+ * Hints that the vCPU is willing to yield its current use of the physical CPU
+ * and intends to be resumed at the expiration of the timeout.
+ */
+static inline struct ffa_value ffa_yield_timeout(uint32_t timeout_low,
+ uint32_t timeout_high)
+{
+ return ffa_call((struct ffa_value){.func = FFA_YIELD_32,
+ .arg2 = timeout_low,
+ .arg3 = timeout_high});
+}
+
+/**
+ * Relinquish the current physical CPU cycles back.
*/
static inline struct ffa_value ffa_yield(void)
{
- return ffa_call((struct ffa_value){.func = FFA_YIELD_32});
+ return ffa_yield_timeout(0, 0);
}
/**
diff --git a/test/vmapi/primary_with_secondaries/dir_msg.c b/test/vmapi/primary_with_secondaries/dir_msg.c
index af7a7a1..79c9833 100644
--- a/test/vmapi/primary_with_secondaries/dir_msg.c
+++ b/test/vmapi/primary_with_secondaries/dir_msg.c
@@ -44,6 +44,58 @@
}
/**
+ * Send direct message to an VM/SP. Expect it to yield its CPU cycles. Allocate
+ * cycles through FFA_RUN and verify that sent info is echoed back.
+ */
+TEST(direct_message, ffa_send_direct_message_req_yield_echo)
+{
+ const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
+ 0x88889999};
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct ffa_value res;
+ struct ffa_partition_info *service1_info = service1(mb.recv);
+
+ SERVICE_SELECT(service1_info->vm_id,
+ "ffa_yield_direct_message_resp_echo", mb.send);
+ ffa_run(service1_info->vm_id, 0);
+
+ res = ffa_msg_send_direct_req(HF_PRIMARY_VM_ID, service1_info->vm_id,
+ msg[0], msg[1], msg[2], msg[3], msg[4]);
+
+ /*
+ * Consider the scenario where VM1 allocated CPU cycles to SP1 through
+ * a direct request message but SP1 yields execution back to VM1
+ * instead of busy waiting for an IO operation.
+ */
+ EXPECT_EQ(res.func, FFA_YIELD_32);
+
+ /* SP1 id/vCPU index are passed through arg1. */
+ EXPECT_EQ(res.arg1, ffa_vm_vcpu(service1_info->vm_id, 0));
+
+ /*
+ * Additionally, SP1 can also specify timeout while yielding cycles
+ * back to VM1. This is a hint to VM1 that it can be resumed upon
+ * expiration of the timeout.
+ * Check for 64-bit timeout specified by SP1 through arg2 and arg3. The
+ * purpose of these checks is just to validate the timeout value but
+ * not to leverage it upon expiration.
+ */
+ EXPECT_EQ(res.arg2, 0x1);
+ EXPECT_EQ(res.arg3, 0x23456789);
+
+ /* Allocate CPU cycles to resume SP. */
+ res = ffa_run(service1_info->vm_id, 0);
+
+ EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
+
+ EXPECT_EQ(res.arg3, msg[0]);
+ EXPECT_EQ(res.arg4, msg[1]);
+ EXPECT_EQ(res.arg5, msg[2]);
+ EXPECT_EQ(res.arg6, msg[3]);
+ EXPECT_EQ(res.arg7, msg[4]);
+}
+
+/**
* Initiate direct message request between test SPs.
* If test services are VMs, test should be skipped.
*/
@@ -74,6 +126,40 @@
}
/**
+ * Initiate direct message request between two Secure Partitions. Configure
+ * the second SP in the call chain to yield cycles received from first SP
+ * through direct message request. The first SP is equipped to reallocate
+ * CPU cycles to resume the direct message processing.
+ */
+TEST_PRECONDITION(direct_message, ffa_direct_message_services_yield_echo,
+ service1_and_service2_are_secure)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct ffa_partition_info *service1_info = service1(mb.recv);
+ struct ffa_partition_info *service2_info = service2(mb.recv);
+ ffa_vm_id_t own_id = hf_vm_get_id();
+ struct ffa_value ret;
+
+ /* Run service2 for it to wait for a request from service1. */
+ SERVICE_SELECT(service2_info->vm_id,
+ "ffa_yield_direct_message_resp_echo", mb.send);
+ ffa_run(service2_info->vm_id, 0);
+
+ /* Service1 requests echo from service2. */
+ SERVICE_SELECT(service1_info->vm_id,
+ "ffa_yield_direct_message_echo_services", mb.send);
+
+ /* Send to service1 the FF-A ID of the target for its message. */
+ ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
+ &service2_info->vm_id,
+ sizeof(service2_info->vm_id), 0);
+ ASSERT_EQ(ret.func, FFA_SUCCESS_32);
+
+ ret = ffa_run(service1_info->vm_id, 0);
+ EXPECT_EQ(ret.func, FFA_YIELD_32);
+}
+
+/**
* If Hafnium is the hypervisor, and service1 is a VM:
* - Service verifies disallowed SMC invocations while ffa_msg_send_direct_req
* is being serviced.
diff --git a/test/vmapi/primary_with_secondaries/services/dir_msg.c b/test/vmapi/primary_with_secondaries/services/dir_msg.c
index 1ee442b..1d25eab 100644
--- a/test/vmapi/primary_with_secondaries/services/dir_msg.c
+++ b/test/vmapi/primary_with_secondaries/services/dir_msg.c
@@ -23,6 +23,28 @@
ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
args.arg3, args.arg4, args.arg5, args.arg6,
args.arg7);
+
+ FAIL("Direct response not expected to return");
+}
+
+TEST_SERVICE(ffa_yield_direct_message_resp_echo)
+{
+ struct ffa_value args = ffa_msg_wait();
+
+ EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
+
+ /*
+ * Give back control to VM/SP, that sent the direct request message,
+ * through FFA_YIELD ABI and specify timeout of 0x123456789.
+ */
+ ffa_yield_timeout(0x1, 0x23456789);
+
+ /* Send the echo through direct message response. */
+ ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
+ args.arg3, args.arg4, args.arg5, args.arg6,
+ args.arg7);
+
+ FAIL("Direct response not expected to return");
}
TEST_SERVICE(ffa_direct_message_echo_services)
@@ -53,6 +75,45 @@
ffa_yield();
}
+TEST_SERVICE(ffa_yield_direct_message_echo_services)
+{
+ const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
+ 0x88889999};
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct ffa_value res;
+ ffa_vm_id_t target_id;
+
+ /* Retrieve FF-A ID of the target endpoint. */
+ receive_indirect_message((void *)&target_id, sizeof(target_id),
+ recv_buf, NULL);
+
+ HFTEST_LOG("Echo test with: %x", target_id);
+
+ res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
+ msg[2], msg[3], msg[4]);
+
+ /*
+ * Be prepared to allocate CPU cycles to target vCPU if it yields while
+ * processing direct message.
+ */
+ while (res.func == FFA_YIELD_32) {
+ /* VM id/vCPU index are passed through arg1. */
+ EXPECT_EQ(res.arg1, ffa_vm_vcpu(target_id, 0));
+
+ /* Allocate CPU cycles to resume SP. */
+ res = ffa_run(target_id, 0);
+ }
+ EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
+
+ EXPECT_EQ(res.arg3, msg[0]);
+ EXPECT_EQ(res.arg4, msg[1]);
+ EXPECT_EQ(res.arg5, msg[2]);
+ EXPECT_EQ(res.arg6, msg[3]);
+ EXPECT_EQ(res.arg7, msg[4]);
+
+ ffa_yield();
+}
+
TEST_SERVICE(ffa_direct_msg_req_disallowed_smc)
{
struct ffa_value args = ffa_msg_wait();
@@ -72,6 +133,8 @@
ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
args.arg3, args.arg4, args.arg5, args.arg6,
args.arg7);
+
+ FAIL("Direct response not expected to return");
}
/**
@@ -99,6 +162,8 @@
ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
args.arg3, args.arg4, args.arg5, args.arg6,
args.arg7);
+
+ FAIL("Direct response not expected to return");
}
/**
@@ -122,6 +187,8 @@
ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
args.arg3, args.arg4, args.arg5, args.arg6,
args.arg7);
+
+ FAIL("Direct response not expected to return");
}
/**
@@ -154,6 +221,8 @@
EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
ffa_msg_send_direct_resp(own_id, sender, 0, 0, 0, 0, 0);
+
+ FAIL("Direct response not expected to return");
}
TEST_SERVICE(ffa_direct_message_cycle_denied)
@@ -176,4 +245,6 @@
ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
args.arg3, args.arg4, args.arg5, args.arg6,
args.arg7);
+
+ FAIL("Direct response not expected to return");
}