Using custom call for push relinquish.
Bug: 132420445
Change-Id: I694250cea48aef9ae4083f1687e1db83d6508d9f
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 3475316..9be2e0e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -18,7 +18,8 @@
"spci.h": "c",
"spci_internal.h": "c",
"interrupts_gicv3.h": "c",
- "interrupts.h": "c"
+ "interrupts.h": "c",
+ "abi.h": "c"
},
"C_Cpp.errorSquiggles": "Disabled"
}
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index ed004b7..2802479 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -35,6 +35,9 @@
#define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff09
#define HF_SPCI_RUN_WAKE_UP 0xff0a
+/* Custom SPCI-like call for relinquishing memory in the push model. */
+#define HF_SPCI_MEM_RELINQUISH 0xffab
+
/* This matches what Trusty and its ATF module currently use. */
#define HF_DEBUG_LOG 0xbd000000
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 5d217da..ad26137 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -158,6 +158,15 @@
.arg5 = handle});
}
+static inline struct spci_value hf_spci_mem_relinquish(
+ uint32_t remaining_fragment_count, uint32_t length, uint32_t handle)
+{
+ return spci_call((struct spci_value){.func = HF_SPCI_MEM_RELINQUISH,
+ .arg3 = remaining_fragment_count,
+ .arg4 = length,
+ .arg5 = handle});
+}
+
/**
* Called by secondary VMs to receive a message. This will block until a message
* is received.
diff --git a/src/api.c b/src/api.c
index 405b784..143a624 100644
--- a/src/api.c
+++ b/src/api.c
@@ -940,11 +940,8 @@
{
struct vm *from = current->vm;
struct vm *to;
-
- struct two_vm_locked vm_to_from_lock;
-
+ struct vm_locked to_locked;
const void *from_msg;
-
struct spci_value ret;
bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
SPCI_MSG_SEND_NOTIFY;
@@ -984,71 +981,24 @@
return spci_error(SPCI_INVALID_PARAMETERS);
}
- /*
- * Hafnium needs to hold the lock on <to> before the mailbox state is
- * checked. The lock on <to> must be held until the information is
- * copied to <to> Rx buffer. Since in
- * spci_msg_handle_architected_message we may call api_spci_share_memory
- * which must hold the <from> lock, we must hold the <from> lock at this
- * point to prevent a deadlock scenario.
- */
- vm_to_from_lock = vm_lock_both(to, from);
+ to_locked = vm_lock(to);
- if (msg_receiver_busy(vm_to_from_lock.vm1, from, notify)) {
+ if (msg_receiver_busy(to_locked, from, notify)) {
ret = spci_error(SPCI_BUSY);
goto out;
}
- /* Handle legacy memory sharing messages. */
- if ((attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK) != 0) {
- /*
- * Buffer holding the internal copy of the shared memory
- * regions.
- */
- uint8_t *message_replica = cpu_get_buffer(current->cpu->id);
- uint32_t message_buffer_size =
- cpu_get_buffer_size(current->cpu->id);
+ /* Copy data. */
+ memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg, size);
+ to->mailbox.recv_size = size;
+ to->mailbox.recv_sender = sender_vm_id;
+ to->mailbox.recv_attributes = 0;
+ ret = (struct spci_value){.func = SPCI_SUCCESS_32};
- if (size > message_buffer_size) {
- ret = spci_error(SPCI_INVALID_PARAMETERS);
- goto out;
- }
-
- /* Copy the architected message into the internal buffer. */
- memcpy_s(message_replica, message_buffer_size, from_msg, size);
-
- /*
- * Note that architected_message_replica is passed as the third
- * parameter to spci_msg_handle_architected_message. The
- * execution flow commencing at
- * spci_msg_handle_architected_message will make several
- * accesses to fields in architected_message_replica. The memory
- * area architected_message_replica must be exclusively owned by
- * Hafnium so that TOCTOU issues do not arise.
- */
- ret = spci_msg_handle_architected_message(
- vm_to_from_lock.vm1, vm_to_from_lock.vm2,
- (struct spci_memory_region *)message_replica, size,
- attributes, &api_page_pool);
-
- if (ret.func != SPCI_SUCCESS_32) {
- goto out;
- }
- } else {
- /* Copy data. */
- memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg,
- size);
- to->mailbox.recv_size = size;
- to->mailbox.recv_sender = sender_vm_id;
- to->mailbox.recv_attributes = 0;
- ret = (struct spci_value){.func = SPCI_SUCCESS_32};
- }
-
- deliver_msg(vm_to_from_lock.vm1, sender_vm_id, current, next);
+ deliver_msg(to_locked, sender_vm_id, current, next);
out:
- vm_unlock(&vm_to_from_lock.vm1);
- vm_unlock(&vm_to_from_lock.vm2);
+ vm_unlock(&to_locked);
return ret;
}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index b730cf3..b1b3b59 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -384,6 +384,12 @@
args->arg3, args->arg4, args->arg5,
current(), next);
return true;
+ case HF_SPCI_MEM_RELINQUISH:
+ *args = api_spci_mem_send(
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH,
+ ipa_init(args->arg1), args->arg2, args->arg3,
+ args->arg4, args->arg5, current(), next);
+ return true;
}
return false;
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index d930a48..ecac191 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -176,9 +176,7 @@
SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_SPCI_ERROR(
- spci_msg_send(
- vms[j], vms[i], msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
+ hf_spci_mem_relinquish(0, msg_size, 0),
SPCI_INVALID_PARAMETERS);
}
}
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index 98553c0..a9d87f1 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -444,10 +444,7 @@
SPCI_MEMORY_OUTER_SHAREABLE);
/* Relevant information read, mailbox can be cleared. */
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
- spci_msg_send_sender(ret), msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
- .func,
+ EXPECT_EQ(hf_spci_mem_relinquish(0, msg_size, 0).func,
SPCI_SUCCESS_32);
/*
@@ -482,11 +479,14 @@
ptr = (uint8_t *)constituents[0].address;
- /* Check that one has access to the shared region. */
+ /* Check that we have access to the shared region. */
for (i = 0; i < PAGE_SIZE; ++i) {
ptr[i]++;
}
- /* Give the memory back and notify the sender. */
+ /*
+ * Attempt to relinquish the memory, which should fail because
+ * it was donated not lent.
+ */
msg_size = spci_memory_region_init(
send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
constituents, memory_region->constituent_count, 0, 0,
@@ -494,11 +494,8 @@
SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- EXPECT_SPCI_ERROR(
- spci_msg_send(spci_msg_send_receiver(ret),
- HF_PRIMARY_VM_ID, msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(hf_spci_mem_relinquish(0, msg_size, 0),
+ SPCI_INVALID_PARAMETERS);
/* Ensure we still have access to the memory. */
ptr[0] = 123;
@@ -526,16 +523,14 @@
EXPECT_EQ(spci_msg_send_attributes(ret),
SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
- /* Attempt to relinquish from primary VM. */
+ /* Attempt to relinquish to this same VM. */
msg_size = spci_memory_region_init(
send_buf, HF_PRIMARY_VM_ID, hf_vm_get_id(), constituents,
memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(
- spci_msg_send(HF_PRIMARY_VM_ID, hf_vm_get_id(), msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(hf_spci_mem_relinquish(0, msg_size, 0),
+ SPCI_INVALID_PARAMETERS);
/* Give the memory back and notify the sender. */
msg_size = spci_memory_region_init(
@@ -543,10 +538,7 @@
memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
- .func,
- SPCI_SUCCESS_32);
+ EXPECT_EQ(hf_spci_mem_relinquish(0, msg_size, 0).func, SPCI_SUCCESS_32);
/* Ensure we cannot lend from the primary to another secondary. */
msg_size = spci_memory_region_init(
@@ -610,10 +602,7 @@
SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
- HF_PRIMARY_VM_ID, msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
- .func,
+ EXPECT_EQ(hf_spci_mem_relinquish(0, msg_size, 0).func,
SPCI_SUCCESS_32);
}
}
@@ -675,10 +664,7 @@
&constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
- HF_PRIMARY_VM_ID, msg_size,
- SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
- .func,
+ EXPECT_EQ(hf_spci_mem_relinquish(0, msg_size, 0).func,
SPCI_SUCCESS_32);
}
}