fix(ff-a): clear memory bit use in mem share from NWd to SWd
Requesting the SPMC to clear memory at the memory share call would not
work for an operation between the SWd and the NWd.
This was because the SPMC was not using the NS bit, when mapping the
memory region into its own address space before clearing the memory.
This patch makes SPMC map the memory of a memory send operation as NS,
if the lender is from the NWd.
This is a temporary fix, to avoid erroneous memory accesses from the
SPMC when clearing a memory region.
For a proper fix, the SPMC needs to keep track of the security state for
the memory region.
Change-Id: I0d640761d0f6b85ba20049f303d2b43c6e0a7135
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 95f9d2d..dafafcd 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -121,6 +121,20 @@
ffa_memory_handle_t handle);
/**
+ * For non-secure memory, retrieve the NS mode if the partition manager supports
+ * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
+ * with NS accesses by default.
+ */
+uint32_t plat_ffa_other_world_mode(void);
+
+/**
+ * For memory management operations between SWd and NWd, the SPMC might need
+ * to operate NS-memory. The function below returns the mode to use in the mm.h
+ * library, depending on the memory ownder's id.
+ */
+uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id);
+
+/**
* Return the FF-A partition info VM/SP properties given the VM id.
*/
ffa_partition_properties_t plat_ffa_partition_properties(
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 17eec1b..35f3e5a 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -123,6 +123,17 @@
return false;
}
+uint32_t plat_ffa_other_world_mode(void)
+{
+ return 0U;
+}
+
+uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+{
+ (void)owner_id;
+ return 0U;
+}
+
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
ffa_vm_id_t sender_id,
ffa_vm_id_t receiver_id)
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index a53375b..dfb2865 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -256,6 +256,17 @@
FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
}
+uint32_t plat_ffa_other_world_mode(void)
+{
+ return 0U;
+}
+
+uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+{
+ (void)owner_id;
+ return plat_ffa_other_world_mode();
+}
+
ffa_partition_properties_t plat_ffa_partition_properties(
ffa_vm_id_t vm_id, const struct vm *target)
{
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 203b71e..c661cd4 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -399,6 +399,17 @@
FFA_MEMORY_HANDLE_ALLOCATOR_SPMC;
}
+uint32_t plat_ffa_other_world_mode(void)
+{
+ return MM_MODE_NS;
+}
+
+uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+{
+ return vm_id_is_current_world(owner_id) ? 0U
+ : plat_ffa_other_world_mode();
+}
+
ffa_partition_properties_t plat_ffa_partition_properties(
ffa_vm_id_t vm_id, const struct vm *target)
{
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 236fb16..27ed45a 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -101,6 +101,17 @@
return false;
}
+uint32_t plat_ffa_other_world_mode(void)
+{
+ return 0U;
+}
+
+uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+{
+ (void)owner_id;
+ return 0U;
+}
+
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
ffa_vm_id_t sender_id,
ffa_vm_id_t receiver_id)
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index c7ae984..78a9574 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -781,7 +781,8 @@
* Clears a region of physical memory by overwriting it with zeros. The data is
* flushed from the cache so the memory has been cleared across the system.
*/
-static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
+static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
+ uint32_t extra_mode_attributes)
{
/*
* TODO: change this to a CPU local single page window rather than a
@@ -791,8 +792,10 @@
*/
bool ret;
struct mm_stage1_locked stage1_locked = mm_lock_stage1();
- void *ptr =
- mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
+ void *ptr = mm_identity_map(stage1_locked, begin, end,
+ MM_MODE_W | (extra_mode_attributes &
+ plat_ffa_other_world_mode()),
+ ppool);
size_t size = pa_difference(begin, end);
if (!ptr) {
@@ -823,6 +826,7 @@
* flushed from the cache so the memory has been cleared across the system.
*/
static bool ffa_clear_memory_constituents(
+ uint32_t security_state_mode,
struct ffa_memory_region_constituent **fragments,
const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
struct mpool *page_pool)
@@ -849,7 +853,8 @@
pa_from_ipa(ipa_init(fragments[i][j].address));
paddr_t end = pa_add(begin, size);
- if (!clear_memory(begin, end, &local_page_pool)) {
+ if (!clear_memory(begin, end, &local_page_pool,
+ security_state_mode)) {
/*
* api_clear_memory will defrag on failure, so
* no need to do it here.
@@ -962,9 +967,10 @@
fragment_count, from_mode, &local_page_pool, true));
/* Clear the memory so no VM or device can see the previous contents. */
- if (clear && !ffa_clear_memory_constituents(
- fragments, fragment_constituent_counts,
- fragment_count, page_pool)) {
+ if (clear &&
+ !ffa_clear_memory_constituents(
+ plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
+ fragment_constituent_counts, fragment_count, page_pool)) {
/*
* On failure, roll back by returning memory to the sender. This
* may allocate pages which were previously freed into
@@ -1008,7 +1014,7 @@
* Success is indicated by FFA_SUCCESS.
*/
static struct ffa_value ffa_retrieve_check_update(
- struct vm_locked to_locked,
+ struct vm_locked to_locked, ffa_vm_id_t from_id,
struct ffa_memory_region_constituent **fragments,
uint32_t *fragment_constituent_counts, uint32_t fragment_count,
uint32_t memory_to_attributes, uint32_t share_func, bool clear,
@@ -1066,9 +1072,10 @@
}
/* Clear the memory so no VM or device can see the previous contents. */
- if (clear && !ffa_clear_memory_constituents(
- fragments, fragment_constituent_counts,
- fragment_count, page_pool)) {
+ if (clear &&
+ !ffa_clear_memory_constituents(
+ plat_ffa_owner_world_mode(from_id), fragments,
+ fragment_constituent_counts, fragment_count, page_pool)) {
ret = ffa_error(FFA_NO_MEMORY);
goto out;
}
@@ -1263,9 +1270,10 @@
fragment_count, from_mode, &local_page_pool, true));
/* Clear the memory so no VM or device can see the previous contents. */
- if (clear && !ffa_clear_memory_constituents(
- fragments, fragment_constituent_counts,
- fragment_count, page_pool)) {
+ if (clear &&
+ !ffa_clear_memory_constituents(
+ plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
+ fragment_constituent_counts, fragment_count, page_pool)) {
/*
* On failure, roll back by returning memory to the sender. This
* may allocate pages which were previously freed into
@@ -2398,7 +2406,7 @@
memory_to_attributes = ffa_memory_permissions_to_mode(
permissions, share_state->sender_orig_mode);
ret = ffa_retrieve_check_update(
- to_locked, share_state->fragments,
+ to_locked, memory_region->sender, share_state->fragments,
share_state->fragment_constituent_counts,
share_state->fragment_count, memory_to_attributes,
share_state->share_func, false, page_pool);
@@ -2719,7 +2727,7 @@
}
ret = ffa_retrieve_check_update(
- to_locked, share_state->fragments,
+ to_locked, memory_region->sender, share_state->fragments,
share_state->fragment_constituent_counts,
share_state->fragment_count, share_state->sender_orig_mode,
FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);