feat(lib/granule): Add dev_granule object type
Define dev_granule structure type and add device granule
objects to manage device memory in PCI regions.
Define device granule states:
- DEV_GRANULE_STATE_NS
- DEV_GRANULE_STATE_DELEGATED
- DEV_GRANULE_STATE_MAPPED
Add locking primitives and access functions for
dev_granule objects.
Add dev_granules[RMM_MAX_DEV_GRANULES] array of
dev_granule structures.
Change-Id: I857095a997f78d2c39b3958056460112f3b34595
Signed-off-by: AlexeiFedorov <Alexei.Fedorov@arm.com>
diff --git a/cmake/CommonConfigs.cmake b/cmake/CommonConfigs.cmake
index 44dd760..89d4c27 100644
--- a/cmake/CommonConfigs.cmake
+++ b/cmake/CommonConfigs.cmake
@@ -37,7 +37,7 @@
NAME RMM_MAX_COH_GRANULES
HELP "Maximum number of coherent device granules supported"
TYPE STRING
- DEFAULT 0x0)
+ DEFAULT 1)
#
# RMM_MAX_NCOH_GRANULES. Maximum number of non-coherent device granules supported.
@@ -46,7 +46,7 @@
NAME RMM_MAX_NCOH_GRANULES
HELP "Maximum number of non-coherent device granules supported"
TYPE STRING
- DEFAULT 0x0)
+ DEFAULT 1)
arm_config_option(
NAME RMM_NUM_PAGES_PER_STACK
@@ -121,11 +121,15 @@
target_compile_definitions(rmm-common
INTERFACE "RMM_MAX_GRANULES=U(${RMM_MAX_GRANULES})")
+if (RMM_MAX_COH_GRANULES EQUAL 0x0)
+ message (FATAL_ERROR "RMM_MAX_COH_GRANULES cannot be set to 0")
+endif()
+
target_compile_definitions(rmm-common
INTERFACE "RMM_MAX_COH_GRANULES=U(${RMM_MAX_COH_GRANULES})")
if (RMM_MAX_NCOH_GRANULES EQUAL 0x0)
- message (FATAL_ERROR "RMM_MAX_NCOH_GRANULES not configured")
+ message (FATAL_ERROR "RMM_MAX_NCOH_GRANULES cannot be set to 0")
endif()
target_compile_definitions(rmm-common
diff --git a/docs/design/locking.rst b/docs/design/locking.rst
index 3ce4c5d..11ac059 100644
--- a/docs/design/locking.rst
+++ b/docs/design/locking.rst
@@ -528,6 +528,8 @@
- GRANULE_STATE_DELEGATED
- GRANULE_STATE_RD
- GRANULE_STATE_REC
+ - DEV_GRANULE_STATE_NS
+ - DEV_GRANULE_STATE_DELEGATED
#. **Internal**: A granule state belongs to the `internal` class iff it is not
an `external`. These are objects which are referenced from another
@@ -537,6 +539,7 @@
- GRANULE_STATE_RTT
- GRANULE_STATE_DATA
+ - DEV_GRANULE_STATE_MAPPED
We now state the locking guidelines for |RMM| as:
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index ca4df15..7c434ad 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -284,10 +284,11 @@
MBEDTLS_ECP_MAX_OPS ,248 - ,1000 ,"Number of max operations per ECC signing iteration"
RMM_FPU_USE_AT_REL2 ,ON | OFF ,OFF(fake_host) ON(aarch64),"Enable FPU/SIMD usage in RMM."
RMM_MAX_GRANULES , ,0 ,"Maximum number of memory granules available to the system"
- RMM_MAX_COH_GRANULES , ,0 ,"Maximum number of coherent device granules available to the system"
- RMM_MAX_NCOH_GRANULES , ,0 ,"Maximum number of non-coherent device granules available to the system"
+ RMM_MAX_COH_GRANULES , ,1 ,"Maximum number of coherent device granules available to the system"
+ RMM_MAX_NCOH_GRANULES , ,1 ,"Maximum number of non-coherent device granules available to the system"
HOST_VARIANT ,host_build | host_test | host_cbmc ,host_build ,"Variant to build for the host platform. Only available when RMM_PLATFORM=host"
- HOST_MEM_SIZE , ,0x40000000 ,"Host memory size that will be used as physical granules"
+ HOST_DRAM_SIZE , ,0x20000000 ,"Host memory size that will be used as physical DRAM"
+ HOST_NCOH_DEV_SIZE , ,0xA000 ,"Host memory size that will be used as non-coherent device granules"
RMM_COVERAGE ,ON | OFF ,OFF ,"Enable coverage analysis"
RMM_HTML_COV_REPORT ,ON | OFF ,ON ,"Enable HTML output report for coverage analysis"
RMM_CBMC_VIEWER_OUTPUT ,ON | OFF ,OFF ,"Generate report of CBMC results using the tool cbmc-viewer"
diff --git a/lib/common/include/dev_type.h b/lib/common/include/dev_type.h
new file mode 100644
index 0000000..7f97417
--- /dev/null
+++ b/lib/common/include/dev_type.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef DEV_TYPE_H
+#define DEV_TYPE_H
+
+/* Types of device memory */
+enum dev_type {
+ DEV_RANGE_COHERENT,
+ DEV_RANGE_NON_COHERENT,
+ DEV_RANGE_MAX
+};
+
+#endif /* DEV_TYPE_H */
diff --git a/lib/common/include/platform_api.h b/lib/common/include/platform_api.h
index 6b26ac1..34e53b3 100644
--- a/lib/common/include/platform_api.h
+++ b/lib/common/include/platform_api.h
@@ -5,6 +5,7 @@
#ifndef PLATFORM_API_H
#define PLATFORM_API_H
+#include <dev_type.h>
#include <stdint.h>
void plat_warmboot_setup(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3);
@@ -20,10 +21,26 @@
unsigned long plat_granule_addr_to_idx(unsigned long addr);
/*
+ * Takes an aligned dev_granule address, validates it and if valid returns the
+ * index in the struct dev_granules array or UINT64_MAX in case of an error.
+ *
+ * This function also validates that the dev_granule address is a valid
+ * page address and returns device granule type if the addr is valid.
+ */
+unsigned long plat_dev_granule_addr_to_idx(unsigned long addr, enum dev_type *type);
+
+/*
* Takes an index in the struct granules array and returns the aligned granule
* address. The index must be within the number of granules expected by the
* platform.
*/
unsigned long plat_granule_idx_to_addr(unsigned long idx);
+/*
+ * Takes an index in the struct dev_granules array and returns the aligned
+ * dev_granule address of the specified device type. The index must be within
+ * the number of dev_granules expected by the platform.
+ */
+unsigned long plat_dev_granule_idx_to_addr(unsigned long idx, enum dev_type type);
+
#endif /* PLATFORM_API_H */
diff --git a/lib/granule/CMakeLists.txt b/lib/granule/CMakeLists.txt
index 53609d8..1f93036 100644
--- a/lib/granule/CMakeLists.txt
+++ b/lib/granule/CMakeLists.txt
@@ -15,6 +15,7 @@
"include/${RMM_ARCH}")
target_sources(rmm-lib-granule
- PRIVATE "src/granule.c")
+ PRIVATE "src/dev_granule.c"
+ "src/granule.c")
include (tests/CMakeLists.txt)
diff --git a/lib/granule/include/aarch64/granule_lock.h b/lib/granule/include/aarch64/granule_lock.h
index f23abd7..e2a04d2 100644
--- a/lib/granule/include/aarch64/granule_lock.h
+++ b/lib/granule/include/aarch64/granule_lock.h
@@ -45,4 +45,41 @@
);
}
+static inline void dev_granule_bitlock_acquire(struct dev_granule *g)
+{
+ /* To avoid misra-c2012-2.7 warnings */
+ (void)g;
+ uint32_t tmp;
+ uint32_t mask = DEV_GRN_LOCK_BIT;
+
+ asm volatile(
+ "1: ldsetab %w[mask], %w[tmp], %[lock]\n"
+ " tbz %w[tmp], #%c[bit], 2f\n"
+ " ldxrb %w[tmp], %[lock]\n"
+ " tbz %w[tmp], #%c[bit], 1b\n"
+ " wfe\n"
+ " b 1b\n"
+ "2:\n"
+ : [lock] "+Q" (g->descriptor),
+ [tmp] "=&r" (tmp)
+ : [mask] "r" (mask),
+ [bit] "i" (DEV_GRN_LOCK_SHIFT)
+ : "memory"
+ );
+}
+
+static inline void dev_granule_bitlock_release(struct dev_granule *g)
+{
+ /* To avoid misra-c2012-2.7 warnings */
+ (void)g;
+ uint32_t mask = DEV_GRN_LOCK_BIT;
+
+ asm volatile(
+ " stclrlb %w[mask], %[lock]\n"
+ : [lock] "+Q" (g->descriptor)
+ : [mask] "r" (mask)
+ : "memory"
+ );
+}
+
#endif /* GRANULE_LOCK_H */
diff --git a/lib/granule/include/dev_granule.h b/lib/granule/include/dev_granule.h
new file mode 100644
index 0000000..0525188
--- /dev/null
+++ b/lib/granule/include/dev_granule.h
@@ -0,0 +1,309 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef DEV_GRANULE_H
+#define DEV_GRANULE_H
+
+#include <assert.h>
+#include <atomics.h>
+#include <dev_type.h>
+#include <errno.h>
+#include <granule_lock.h>
+#include <memory.h>
+#include <stdbool.h>
+#include <utils_def.h>
+
+/* Maximum value defined by the 'refcount' field width in dev_granule descriptor */
+#define DEV_REFCOUNT_MAX (unsigned char) \
+ ((U(1) << DEV_GRN_REFCOUNT_WIDTH) - U(1))
+
+/* Dev Granule descriptor fields access macros */
+#define DEV_LOCKED(g) \
+ ((SCA_READ8(&(g)->descriptor) & DEV_GRN_LOCK_BIT) != 0U)
+
+#define DEV_REFCOUNT(g) \
+ (SCA_READ8(&(g)->descriptor) & DEV_REFCOUNT_MASK)
+
+#define DEV_STATE(g) \
+ (unsigned char)EXTRACT(DEV_GRN_STATE, SCA_READ8(&(g)->descriptor))
+
+/*
+ * Return refcount value using atomic read.
+ */
+static inline unsigned char dev_granule_refcount_read(struct dev_granule *g)
+{
+ assert(g != NULL);
+ return DEV_REFCOUNT(g);
+}
+
+/*
+ * Return refcount value using atomic read with acquire semantics.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline unsigned char dev_granule_refcount_read_acquire(struct dev_granule *g)
+{
+ assert((g != NULL) && DEV_LOCKED(g));
+ return SCA_READ8_ACQUIRE(&g->descriptor) & DEV_REFCOUNT_MASK;
+}
+
+/*
+ * Sanity-check unlocked dev_granule invariants.
+ * This check is performed just after acquiring the lock and/or just before
+ * releasing the lock.
+ *
+ * These invariants must hold for any dev_granule which is unlocked.
+ *
+ * These invariants may not hold transiently while a dev_granule is locked (e.g.
+ * when transitioning to/from delegated state).
+ *
+ * Note: this function is purely for debug/documentation purposes, and is not
+ * intended as a mechanism to ensure correctness.
+ */
+static inline void __dev_granule_assert_unlocked_invariants(struct dev_granule *g,
+ unsigned char state)
+{
+ (void)g;
+
+ switch (state) {
+ case DEV_GRANULE_STATE_NS:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ case DEV_GRANULE_STATE_DELEGATED:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ case DEV_GRANULE_STATE_MAPPED:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ default:
+ /* Unknown dev_granule type */
+ assert(false);
+ }
+}
+
+/*
+ * Return the state of unlocked dev_granule.
+ * This function should be used only for NS dev_granules where RMM performs NS
+ * specific operations on the granule.
+ */
+static inline unsigned char dev_granule_unlocked_state(struct granule *g)
+{
+ assert(g != NULL);
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ return DEV_STATE(g);
+}
+
+/* Must be called with dev_granule lock held */
+static inline unsigned char dev_granule_get_state(struct dev_granule *g)
+{
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ return DEV_STATE(g);
+}
+
+/* Must be called with dev_granule lock held */
+static inline void dev_granule_set_state(struct dev_granule *g, unsigned char state)
+{
+ unsigned char val;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ val = g->descriptor & DEV_STATE_MASK;
+
+ /* cppcheck-suppress misra-c2012-10.3 */
+ val ^= state << DEV_GRN_STATE_SHIFT;
+
+ /*
+ * Atomically EOR val while keeping the bits for refcount and
+ * bitlock as 0 which would preserve their values in memory.
+ */
+ (void)atomic_eor_8(&g->descriptor, val);
+}
+
+/*
+ * Acquire the bitlock and then check expected state
+ * Fails if unexpected locking sequence detected.
+ * Also asserts if invariant conditions are met.
+ */
+static inline bool dev_granule_lock_on_state_match(struct dev_granule *g,
+ unsigned char expected_state)
+{
+ dev_granule_bitlock_acquire(g);
+
+ if (dev_granule_get_state(g) != expected_state) {
+ dev_granule_bitlock_release(g);
+ return false;
+ }
+
+ __dev_granule_assert_unlocked_invariants(g, expected_state);
+ return true;
+}
+
+/*
+ * Used when we're certain of the type of an object (e.g. because we hold a
+ * reference to it). In these cases we should never fail to acquire the lock.
+ */
+static inline void dev_granule_lock(struct dev_granule *g,
+ unsigned char expected_state)
+{
+ __unused bool locked = dev_granule_lock_on_state_match(g, expected_state);
+
+ assert(locked);
+}
+
+static inline void dev_granule_unlock(struct dev_granule *g)
+{
+ __dev_granule_assert_unlocked_invariants(g, dev_granule_get_state(g));
+ dev_granule_bitlock_release(g);
+}
+
+/* Transtion state to @new_state and unlock the dev_granule */
+static inline void dev_granule_unlock_transition(struct dev_granule *g,
+ unsigned char new_state)
+{
+ dev_granule_set_state(g, new_state);
+ dev_granule_unlock(g);
+}
+
+/*
+ * Takes a valid pointer to a struct dev_granule, corresponding to dev_type
+ * and returns the dev_granule physical address.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the dev_granule (i.e. whether it is locked, its state or its reference count).
+ */
+unsigned long dev_granule_addr(const struct dev_granule *g, enum dev_type type);
+
+/*
+ * Takes an aligned dev_granule address, returns a pointer to the corresponding
+ * struct dev_granule and sets device granule type in address passed in @type.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+struct dev_granule *addr_to_dev_granule(unsigned long addr, enum dev_type *type);
+
+/*
+ * Verifies whether @addr is a valid dev_granule physical address, returns
+ * a pointer to the corresponding struct dev_granule and sets device granule type.
+ *
+ * This is purely a lookup, and provides no guarantees w.r.t the state of the
+ * granule (e.g. locking).
+ *
+ * Returns:
+ * Pointer to the struct dev_granule if @addr is a valid dev_granule physical
+ * address and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ */
+struct dev_granule *find_dev_granule(unsigned long addr, enum dev_type *type);
+
+/*
+ * Obtain a pointer to a locked dev_granule at @addr if @addr is a valid dev_granule
+ * physical address and the state of the dev_granule at @addr is @expected_state and
+ * set device granule type.
+ *
+ * Returns:
+ * A valid dev_granule pointer if @addr is a valid dev_granule physical address
+ * and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ * - if the state of the dev_granule at @addr is not @expected_state.
+ */
+struct dev_granule *find_lock_dev_granule(unsigned long addr,
+ unsigned char expected_state,
+ enum dev_type *type);
+/*
+ * Refcount field occupies LSB bits of the dev_granule descriptor,
+ * and functions which modify its value can operate directly on
+ * the whole 8-bit word without masking, provided that the result
+ * doesn't exceed DEV_REFCOUNT_MAX or set to negative number.
+ */
+
+/*
+ * Atomically increments the reference counter of the granule by @val.
+ *
+ * Must be called with granule lock held.
+ */
+static inline void dev_granule_refcount_inc(struct dev_granule *g,
+ unsigned char val)
+{
+ uint8_t old_refcount __unused;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+ old_refcount = atomic_load_add_8(&g->descriptor, val) &
+ DEV_REFCOUNT_MASK;
+ assert((old_refcount + val) <= DEV_REFCOUNT_MAX);
+}
+
+/*
+ * Atomically increments the reference counter of the granule.
+ *
+ * Must be called with granule lock held.
+ */
+static inline void atomic_dev_granule_get(struct dev_granule *g)
+{
+ dev_granule_refcount_inc(g, 1U);
+}
+
+/*
+ * Atomically increments the reference counter of the dev_granule by @val.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline void dev_granule_refcount_dec(struct dev_granule *g, unsigned char val)
+{
+ uint8_t old_refcount __unused;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+ old_refcount = atomic_load_add_8(&g->descriptor, (uint8_t)(-val)) &
+ DEV_REFCOUNT_MASK;
+ assert(old_refcount >= val);
+}
+
+/*
+ * Atomically decrements the reference counter of the dev_granule.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline void atomic_dev_granule_put(struct dev_granule *g)
+{
+ dev_granule_refcount_dec(g, 1U);
+}
+
+/*
+ * Atomically decrements the reference counter of the dev_granule.
+ * Stores to memory with release semantics.
+ */
+static inline void atomic_dev_granule_put_release(struct dev_granule *g)
+{
+ uint8_t old_refcount __unused;
+
+ assert(g != NULL);
+ old_refcount = atomic_load_add_release_8(&g->descriptor,
+ (uint8_t)(-1)) & DEV_REFCOUNT_MASK;
+ assert(old_refcount != 0U);
+}
+
+/*
+ * Returns 'true' if dev_granule is locked, 'false' otherwise
+ *
+ * This function is only meant to be used for verification and testing,
+ * and this functionlaity is not required for RMM operations.
+ */
+static inline bool is_dev_granule_locked(struct dev_granule *g)
+{
+ assert(g != NULL);
+ return DEV_LOCKED(g);
+}
+
+#endif /* DEV_GRANULE_H */
diff --git a/lib/granule/include/fake_host/granule_lock.h b/lib/granule/include/fake_host/granule_lock.h
index 49a87c5..bce5962 100644
--- a/lib/granule/include/fake_host/granule_lock.h
+++ b/lib/granule/include/fake_host/granule_lock.h
@@ -25,4 +25,20 @@
g->descriptor &= ~GRN_LOCK_BIT;
}
+static inline void dev_granule_bitlock_acquire(struct dev_granule *g)
+{
+ /*
+ * The fake_host architecture is single threaded and we do not expect
+ * the lock to be already acquired in properly implemented locking
+ * sequence.
+ */
+ assert((g->descriptor & DEV_GRN_LOCK_BIT) == 0);
+ g->descriptor |= DEV_GRN_LOCK_BIT;
+}
+
+static inline void dev_granule_bitlock_release(struct dev_granule *g)
+{
+ g->descriptor &= ~DEV_GRN_LOCK_BIT;
+}
+
#endif /* GRANULE_LOCK_H */
diff --git a/lib/granule/include/granule_types.h b/lib/granule/include/granule_types.h
index e95f921..367340a 100644
--- a/lib/granule/include/granule_types.h
+++ b/lib/granule/include/granule_types.h
@@ -28,6 +28,8 @@
* - GRANULE_STATE_REC
* - GRANULE_STATE_PDEV
* - GRANULE_STATE_VDEV
+ * - DEV_GRANULE_STATE_NS
+ * - DEV_GRANULE_STATE_DELEGATED
*
* Otherwise a granule state is considered `internal`.
*
@@ -38,6 +40,7 @@
* - GRANULE_STATE_REC_AUX
* - GRANULE_STATE_PDEV_AUX
* - GRANULE_STATE_VDEV_AUX
+ * - DEV_GRANULE_STATE_MAPPED
*
* The following locking rules must be followed in all cases:
*
@@ -54,8 +57,9 @@
* 4. Granules in an `internal` state must be locked in order of state:
* 1. `RTT`
* 2. `DATA`
- * 3. `REC_AUX`
- * 4. `PDEV_AUX`
+ * 3. `DEV_MAPPED`
+ * 4. `REC_AUX`
+ * 5. `PDEV_AUX`
*
* 5. Granules in the same `internal` state must be locked in the order defined
* below for that specific state.
@@ -235,7 +239,6 @@
* access.
* [9:0]: refcount
*/
-
struct granule {
uint16_t descriptor;
};
@@ -253,4 +256,70 @@
#define STATE_MASK (unsigned short)MASK(GRN_STATE)
#define REFCOUNT_MASK (unsigned short)MASK(GRN_REFCOUNT)
+/*
+ * Non-Secure dev_granule (external)
+ *
+ * Dev Granule content is not protected by dev_granule::lock, as it is always
+ * subject to reads and writes from the NS world.
+ */
+#define DEV_GRANULE_STATE_NS 0U
+
+/*
+ * Delegated Granule (external)
+ *
+ * Granule content is protected by granule::lock.
+ *
+ * No references are held on this granule type.
+ */
+#define DEV_GRANULE_STATE_DELEGATED 1U
+
+/*
+ * Mapped Dev Granule (internal)
+ *
+ * Dev Granule is assigned to an IPA in a Realm.
+ *
+ * Granule content is not protected by dev_granule::lock, as it is always
+ * subject to reads and writes from within a Realm.
+ *
+ * A granule in this state is always referenced from exactly one entry
+ * in an RTT granule which must be locked before locking this granule.
+ * Only a single Dev granule can be locked at a time.
+ * The complete internal locking order for MAPPED dev. granules is:
+ *
+ * RD -> RTT -> RTT -> ... -> DEV MAPPED
+ * No references are held on this granule type.
+ */
+#define DEV_GRANULE_STATE_MAPPED 2U
+
+struct dev_granule {
+ uint8_t descriptor;
+};
+
+/*
+ * Device granule descriptor bit fields:
+ *
+ * @bit_lock protects the struct dev_granule itself. Take this lock whenever
+ * inspecting or modifying any other fields in this descriptor.
+ * [7]: bit_lock
+ *
+ * [6]: reserved
+ *
+ * @state is the state of the device granule.
+ * [5:3]: state
+ *
+ * @refcount counts RMM and realm references to this device granule.
+ * [2:0]: refcount
+ */
+#define DEV_GRN_LOCK_SHIFT U(7)
+#define DEV_GRN_LOCK_BIT (U(1) << DEV_GRN_LOCK_SHIFT)
+
+#define DEV_GRN_STATE_SHIFT U(3)
+#define DEV_GRN_STATE_WIDTH U(3)
+
+#define DEV_GRN_REFCOUNT_SHIFT U(0)
+#define DEV_GRN_REFCOUNT_WIDTH U(3)
+
+#define DEV_STATE_MASK (unsigned char)MASK(DEV_GRN_STATE)
+#define DEV_REFCOUNT_MASK (unsigned char)MASK(DEV_GRN_REFCOUNT)
+
#endif /* GRANULE_TYPES_H */
diff --git a/lib/granule/src/dev_granule.c b/lib/granule/src/dev_granule.c
new file mode 100644
index 0000000..97337ba
--- /dev/null
+++ b/lib/granule/src/dev_granule.c
@@ -0,0 +1,132 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <dev_granule.h>
+#include <platform_api.h>
+#include <stddef.h>
+#include <utils_def.h>
+
+/* Non-coherent device granules */
+IF_NCBMC(static) struct dev_granule dev_ncoh_granules[RMM_MAX_NCOH_GRANULES]
+ IF_NCBMC(__section("granules_memory"));
+
+/*
+ * Takes a valid pointer to a struct dev_granule, corresponding to dev_type
+ * and returns the dev_granule physical address.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the dev_granule (i.e. whether it is locked, its state or its reference count).
+ */
+unsigned long dev_granule_addr(const struct dev_granule *g, enum dev_type type)
+{
+ (void)type;
+ unsigned long idx;
+
+ assert(g != NULL);
+
+ /* No coherent device granules */
+ assert(type == DEV_RANGE_NON_COHERENT);
+
+ idx = ((unsigned long)g - (unsigned long)dev_ncoh_granules) /
+ sizeof(struct dev_granule);
+ return plat_dev_granule_idx_to_addr(idx, type);
+}
+
+/*
+ * Takes a dev_granule index, corresponding to dev_type
+ * and returns a pointer to the struct dev_granule.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+static struct dev_granule *dev_granule_from_idx(unsigned long idx, enum dev_type type)
+{
+ (void)type;
+
+ /* No coherent device granules */
+ assert((type == DEV_RANGE_NON_COHERENT) && (idx < RMM_MAX_NCOH_GRANULES));
+
+ return &dev_ncoh_granules[idx];
+}
+
+/*
+ * Takes an aligned dev_granule address, returns a pointer to the corresponding
+ * struct dev_granule and sets device granule type in address passed in @type.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+struct dev_granule *addr_to_dev_granule(unsigned long addr, enum dev_type *type)
+{
+ unsigned long idx;
+
+ assert(GRANULE_ALIGNED(addr));
+
+ idx = plat_dev_granule_addr_to_idx(addr, type);
+ return dev_granule_from_idx(idx, *type);
+}
+
+/*
+ * Verifies whether @addr is a valid dev_granule physical address, returns
+ * a pointer to the corresponding struct dev_granule and sets device granule type.
+ *
+ * This is purely a lookup, and provides no guarantees w.r.t the state of the
+ * granule (e.g. locking).
+ *
+ * Returns:
+ * Pointer to the struct dev_granule if @addr is a valid dev_granule physical
+ * address and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ */
+struct dev_granule *find_dev_granule(unsigned long addr, enum dev_type *type)
+{
+ unsigned long idx;
+
+ if (!GRANULE_ALIGNED(addr)) {
+ return NULL;
+ }
+
+ idx = plat_dev_granule_addr_to_idx(addr, type);
+
+ if (idx == UINT64_MAX) {
+ return NULL;
+ }
+
+ return dev_granule_from_idx(idx, *type);
+}
+
+/*
+ * Obtain a pointer to a locked dev_granule at @addr if @addr is a valid dev_granule
+ * physical address and the state of the dev_granule at @addr is @expected_state and
+ * set device granule type.
+ *
+ * Returns:
+ * A valid dev-granule pointer if @addr is a valid dev_granule physical address
+ * and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ * - if the state of the dev_granule at @addr is not @expected_state.
+ */
+struct dev_granule *find_lock_dev_granule(unsigned long addr,
+ unsigned char expected_state,
+ enum dev_type *type)
+{
+ struct dev_granule *g = find_dev_granule(addr, type);
+
+ if (g == NULL) {
+ return NULL;
+ }
+
+ if (!dev_granule_lock_on_state_match(g, expected_state)) {
+ return NULL;
+ }
+
+ return g;
+}
diff --git a/lib/granule/src/granule.c b/lib/granule/src/granule.c
index b5dff23..8a67a87 100644
--- a/lib/granule/src/granule.c
+++ b/lib/granule/src/granule.c
@@ -7,12 +7,8 @@
#include <assert.h>
#include <debug.h>
#include <granule.h>
-#include <mmio.h>
#include <platform_api.h>
#include <stddef.h>
-/* According to the C standard, the memset function used in this file is declared in string.h */
-/* coverity[unnecessary_header: SUPPRESS] */
-#include <string.h>
#include <utils_def.h>
IF_NCBMC(static) struct granule granules[RMM_MAX_GRANULES]
diff --git a/lib/rmm_el3_ifc/include/rmm_el3_ifc.h b/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
index 8308f74..eca8794 100644
--- a/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
+++ b/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
@@ -6,6 +6,10 @@
#ifndef RMM_EL3_IFC_H
#define RMM_EL3_IFC_H
+#ifndef __ASSEMBLER__
+#include <dev_type.h>
+#endif
+
#include <sizes.h>
#include <smc.h>
@@ -249,11 +253,6 @@
/*****************************************************************************
* Boot Manifest definitions, functions and structures (v0.4)
****************************************************************************/
-enum range_type {
- DEV_RANGE_COHERENT,
- DEV_RANGE_NON_COHERENT,
- DEV_RANGE_MAX
-};
/* Console info structure */
struct console_info {
@@ -385,7 +384,7 @@
*/
int rmm_el3_ifc_get_dev_range_validated_pa(unsigned long max_num_banks,
struct memory_info **plat_dev_range_info,
- enum range_type type);
+ enum dev_type type);
/****************************************************************************
* RMM-EL3 Runtime interface APIs
diff --git a/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c b/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
index 738290f..651892e 100644
--- a/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
+++ b/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
@@ -242,7 +242,7 @@
*/
int rmm_el3_ifc_get_dev_range_validated_pa(unsigned long max_num_banks,
struct memory_info **plat_dev_range_info,
- enum range_type type)
+ enum dev_type type)
{
struct memory_info *plat_memory;
unsigned int max_granules;
diff --git a/plat/arm/src/arm_granule.c b/plat/arm/src/arm_granule.c
index bcf8ae1..f6745b2 100644
--- a/plat/arm/src/arm_granule.c
+++ b/plat/arm/src/arm_granule.c
@@ -27,23 +27,22 @@
return &arm_dev_coh;
}
-unsigned long plat_granule_addr_to_idx(unsigned long addr)
+static unsigned long granule_addr_to_idx(unsigned long addr,
+ struct arm_memory_layout *memory_ptr)
{
- const struct arm_memory_layout *dram = &arm_dram;
unsigned long r, l = 0UL;
- if (!GRANULE_ALIGNED(addr)) {
+ if (!GRANULE_ALIGNED(addr) || (memory_ptr->num_banks == 0UL)) {
return UINT64_MAX;
}
- assert(dram->num_banks > 0UL);
- assert(dram->num_banks <= PLAT_ARM_MAX_MEM_BANKS);
- r = dram->num_banks - 1UL;
+ assert(memory_ptr->num_banks <= PLAT_ARM_MAX_MEM_BANKS);
+ r = memory_ptr->num_banks - 1UL;
/*
* Use a binary search rather than a linear one to locate the bank which
* the address falls within, then use the start_gran_idx (which is a
- * cumulative idx from previous dram banks) to calculate the required
+ * cumulative idx from previous memory banks) to calculate the required
* granule index.
*/
while (l <= r) {
@@ -53,7 +52,7 @@
i = l + ((r - l) / 2UL);
assert(i < PLAT_ARM_MAX_MEM_BANKS);
- bank = &dram->bank[i];
+ bank = &memory_ptr->bank[i];
if (addr < bank->base) {
if (i == 0UL) {
@@ -70,19 +69,52 @@
return UINT64_MAX;
}
-unsigned long plat_granule_idx_to_addr(unsigned long idx)
+unsigned long plat_granule_addr_to_idx(unsigned long addr)
{
- const struct arm_memory_layout *dram = &arm_dram;
+ return granule_addr_to_idx(addr, &arm_dram);
+}
+
+unsigned long plat_dev_granule_addr_to_idx(unsigned long addr, enum dev_type *type)
+{
+ unsigned long ret;
+
+ assert(type != NULL);
+
+ /* Check non-coherent device granules */
+ if (arm_dev_ncoh.num_granules != 0UL) {
+ ret = granule_addr_to_idx(addr, &arm_dev_ncoh);
+ if (ret != UINT64_MAX) {
+ *type = DEV_RANGE_NON_COHERENT;
+ return ret;
+ }
+ }
+
+ /* Check coherent device granules */
+ if (arm_dev_coh.num_granules != 0UL) {
+ ret = granule_addr_to_idx(addr, &arm_dev_coh);
+ if (ret != UINT64_MAX) {
+ *type = DEV_RANGE_COHERENT;
+ return ret;
+ }
+ }
+
+ *type = DEV_RANGE_MAX;
+ return UINT64_MAX;
+}
+
+static unsigned long granule_idx_to_addr(unsigned long idx,
+ struct arm_memory_layout *memory_ptr)
+{
unsigned long r, l = 0UL, addr = 0UL;
- assert(dram->num_banks > 0UL);
- assert(idx < dram->num_granules);
+ assert(memory_ptr->num_banks != 0UL);
+ assert(idx < memory_ptr->num_granules);
- r = dram->num_banks - 1UL;
+ r = memory_ptr->num_banks - 1UL;
/*
* Calculate the start and end granule index of each bank using the
- * start_gran_idx (which is a cumulative idx from previous dram banks)
+ * start_gran_idx (which is a cumulative idx from previous memory banks)
* and then check whether the given index falls within it.
*/
while (l <= r) {
@@ -93,7 +125,7 @@
i = l + ((r - l) / 2UL);
assert(i < PLAT_ARM_MAX_MEM_BANKS);
- bank = &dram->bank[i];
+ bank = &memory_ptr->bank[i];
idx_start = bank->start_gran_idx;
idx_end = idx_start + (bank->size >> GRANULE_SHIFT) - 1UL;
@@ -112,3 +144,19 @@
assert(l <= r);
return addr;
}
+
+unsigned long plat_granule_idx_to_addr(unsigned long idx)
+{
+ return granule_idx_to_addr(idx, &arm_dram);
+}
+
+unsigned long plat_dev_granule_idx_to_addr(unsigned long idx, enum dev_type type)
+{
+ (void)type;
+
+ /* No coherent device memory */
+ assert(type == DEV_RANGE_NON_COHERENT);
+
+ return granule_idx_to_addr(idx, &arm_dev_ncoh);
+}
+
diff --git a/plat/arm/src/arm_memory.c b/plat/arm/src/arm_memory.c
index 98fb144..f48cfbc 100644
--- a/plat/arm/src/arm_memory.c
+++ b/plat/arm/src/arm_memory.c
@@ -50,7 +50,7 @@
arm_set_memory_layout(plat_dram, arm_get_dram_layout());
}
-void arm_set_dev_layout(struct memory_info *plat_dev, enum range_type type)
+void arm_set_dev_layout(struct memory_info *plat_dev, enum dev_type type)
{
struct arm_memory_layout *memory_ptr;
diff --git a/plat/arm/src/arm_setup.c b/plat/arm/src/arm_setup.c
index e4ad2a8..5478dde 100644
--- a/plat/arm/src/arm_setup.c
+++ b/plat/arm/src/arm_setup.c
@@ -55,7 +55,7 @@
struct memory_info *plat_memory_info;
struct console_list *csl_list;
struct console_info *console_ptr;
- const enum range_type type[] = {DEV_RANGE_COHERENT, DEV_RANGE_NON_COHERENT};
+ const enum dev_type type[] = {DEV_RANGE_COHERENT, DEV_RANGE_NON_COHERENT};
/* TBD Initialize UART for early log */
struct xlat_mmap_region plat_regions[] = {
diff --git a/plat/arm/src/include/arm_memory.h b/plat/arm/src/include/arm_memory.h
index 14fb791..6f34f7e 100644
--- a/plat/arm/src/include/arm_memory.h
+++ b/plat/arm/src/include/arm_memory.h
@@ -26,7 +26,7 @@
};
void arm_set_dram_layout(struct memory_info *plat_dram);
-void arm_set_dev_layout(struct memory_info *plat_dev, enum range_type type);
+void arm_set_dev_layout(struct memory_info *plat_dev, enum dev_type type);
struct arm_memory_layout *arm_get_dram_layout(void);
struct arm_memory_layout *arm_get_dev_ncoh_layout(void);
struct arm_memory_layout *arm_get_dev_coh_layout(void);
diff --git a/plat/host/CMakeLists.txt b/plat/host/CMakeLists.txt
index 23c47e4..775f53e 100644
--- a/plat/host/CMakeLists.txt
+++ b/plat/host/CMakeLists.txt
@@ -21,6 +21,6 @@
add_subdirectory("../common" ${RMM_BINARY_DIR}/plat/common)
# Add the HOST_VARIANT directory before common, so that it has the option
-# to override HOST_MEM_SIZE
+# to override HOST_DRAM_SIZE
add_subdirectory("${HOST_VARIANT}")
add_subdirectory("common")
diff --git a/plat/host/common/CMakeLists.txt b/plat/host/common/CMakeLists.txt
index 242475e..ae15a68 100644
--- a/plat/host/common/CMakeLists.txt
+++ b/plat/host/common/CMakeLists.txt
@@ -4,9 +4,16 @@
#
arm_config_option(
- NAME HOST_MEM_SIZE
- HELP "Host memory size that will be used as physical granules"
- DEFAULT "0x40000000")
+ NAME HOST_DRAM_SIZE
+ HELP "Host memory size that will be used as physical DRAM"
+ TYPE STRING
+ DEFAULT "0x20000000")
+
+arm_config_option(
+ NAME HOST_NCOH_DEV_SIZE
+ HELP "Host memory size that will be used as non-coherent device granules"
+ TYPE STRING
+ DEFAULT "0xA000")
add_library(rmm-host-common)
@@ -29,4 +36,7 @@
PRIVATE "src")
target_compile_definitions(rmm-host-common
- PUBLIC "HOST_MEM_SIZE=UL(${HOST_MEM_SIZE})")
+ PUBLIC "HOST_DRAM_SIZE=UL(${HOST_DRAM_SIZE})")
+
+target_compile_definitions(rmm-host-common
+ PUBLIC "HOST_NCOH_DEV_SIZE=UL(${HOST_NCOH_DEV_SIZE})")
diff --git a/plat/host/common/include/host_defs.h b/plat/host/common/include/host_defs.h
index 70a182d..043d1ff 100644
--- a/plat/host/common/include/host_defs.h
+++ b/plat/host/common/include/host_defs.h
@@ -8,7 +8,10 @@
#include <utils_def.h>
-/* Total number of granules on the current platform */
-#define HOST_NR_GRANULES (HOST_MEM_SIZE/GRANULE_SIZE)
+/* Total number of DRAM granules on the current platform */
+#define HOST_NR_GRANULES (HOST_DRAM_SIZE/GRANULE_SIZE)
+
+/* Total number of non-coherent device granules on the current platform */
+#define HOST_NR_NCOH_GRANULES (HOST_NCOH_DEV_SIZE/GRANULE_SIZE)
#endif /* HOST_DEFS_H */
diff --git a/plat/host/common/include/host_utils.h b/plat/host/common/include/host_utils.h
index c90ba48..3444e29 100644
--- a/plat/host/common/include/host_utils.h
+++ b/plat/host/common/include/host_utils.h
@@ -138,6 +138,11 @@
unsigned long host_util_get_granule_base(void);
/*
+ * Return the configured address for the dev granule base.
+ */
+unsigned long host_util_get_dev_granule_base(void);
+
+/*
* Set the current CPU emulated by the platform.
*/
void host_util_set_cpuid(unsigned int cpuid);
diff --git a/plat/host/common/src/host_platform_api_cmn.c b/plat/host/common/src/host_platform_api_cmn.c
index 50b7881..d10955e 100644
--- a/plat/host/common/src/host_platform_api_cmn.c
+++ b/plat/host/common/src/host_platform_api_cmn.c
@@ -65,7 +65,7 @@
unsigned long plat_granule_addr_to_idx(unsigned long addr)
{
if (!(GRANULE_ALIGNED(addr) &&
- (addr < (host_util_get_granule_base() + HOST_MEM_SIZE)) &&
+ (addr < (host_util_get_granule_base() + HOST_DRAM_SIZE)) &&
(addr >= host_util_get_granule_base()))) {
return UINT64_MAX;
}
@@ -78,3 +78,25 @@
assert(idx < HOST_NR_GRANULES);
return host_util_get_granule_base() + (idx * GRANULE_SIZE);
}
+
+unsigned long plat_dev_granule_addr_to_idx(unsigned long addr, enum dev_type *type)
+{
+ if (!(GRANULE_ALIGNED(addr) &&
+ (addr < (host_util_get_dev_granule_base() + HOST_NCOH_DEV_SIZE)) &&
+ (addr >= host_util_get_dev_granule_base()))) {
+ return UINT64_MAX;
+ }
+
+ *type = DEV_RANGE_NON_COHERENT;
+ return (addr - host_util_get_dev_granule_base()) / GRANULE_SIZE;
+}
+
+unsigned long plat_dev_granule_idx_to_addr(unsigned long idx, enum dev_type type)
+{
+ (void)type;
+
+ /* No coherent device memory */
+ assert(type == DEV_RANGE_NON_COHERENT);
+ assert(idx < HOST_NR_NCOH_GRANULES);
+ return host_util_get_dev_granule_base() + (idx * GRANULE_SIZE);
+}
diff --git a/plat/host/common/src/host_utils.c b/plat/host/common/src/host_utils.c
index 5c8c40c..c114763 100644
--- a/plat/host/common/src/host_utils.c
+++ b/plat/host/common/src/host_utils.c
@@ -25,7 +25,10 @@
* Allocate memory to emulate physical memory to initialize the
* granule library.
*/
-IF_NCBMC(static) unsigned char granules_buffer[HOST_MEM_SIZE] __aligned(GRANULE_SIZE);
+IF_NCBMC(static) unsigned char host_dram_buffer[HOST_DRAM_SIZE]
+ __aligned(GRANULE_SIZE);
+IF_NCBMC(static) unsigned char host_dev_ncoh_buffer[HOST_NCOH_DEV_SIZE]
+ __aligned(GRANULE_SIZE);
/*
* Define and set the Boot Interface arguments.
@@ -132,7 +135,12 @@
unsigned long host_util_get_granule_base(void)
{
- return (unsigned long)granules_buffer;
+ return (unsigned long)host_dram_buffer;
+}
+
+unsigned long host_util_get_dev_granule_base(void)
+{
+ return (unsigned long)host_dev_ncoh_buffer;
}
void host_util_set_cpuid(unsigned int cpuid)
diff --git a/plat/host/host_cbmc/include/tb_common.h b/plat/host/host_cbmc/include/tb_common.h
index 9a29474..909bc0f 100644
--- a/plat/host/host_cbmc/include/tb_common.h
+++ b/plat/host/host_cbmc/include/tb_common.h
@@ -115,7 +115,7 @@
/*
- * Return an unused continuous index to both `granules` and `granules_buffer`
+ * Return an unused continuous index to both `granules` and `host_dram_buffer`
* arrays.
*/
size_t next_index(void);
diff --git a/plat/host/host_cbmc/include/tb_granules.h b/plat/host/host_cbmc/include/tb_granules.h
index 4dabfce..26891a5 100644
--- a/plat/host/host_cbmc/include/tb_granules.h
+++ b/plat/host/host_cbmc/include/tb_granules.h
@@ -43,7 +43,7 @@
* CBMC needs access to the below data structures which are not otherwise
* visible outside their respective files.
*/
-extern unsigned char granules_buffer[HOST_MEM_SIZE];
+extern unsigned char host_dram_buffer[HOST_DRAM_SIZE];
extern struct granule granules[RMM_MAX_GRANULES];
/*
diff --git a/plat/host/host_cbmc/src/tb_common.c b/plat/host/host_cbmc/src/tb_common.c
index 8dab5d6..c9f786c 100644
--- a/plat/host/host_cbmc/src/tb_common.c
+++ b/plat/host/host_cbmc/src/tb_common.c
@@ -66,17 +66,17 @@
* NOTE: the explicit pointer to integer type cast is necessary, as CBMC
* check fails without it.
*/
- if (GRANULE_ALIGNED(addr) && (uint64_t)granules_buffer <= addr &&
- addr < (uint64_t)granules_buffer + sizeof(granules_buffer)) {
+ if (GRANULE_ALIGNED(addr) && (uint64_t)host_dram_buffer <= addr &&
+ addr < (uint64_t)host_dram_buffer + sizeof(host_dram_buffer)) {
/*
* Keep these assserts for sanitary check, there was situation
* these asserts fail possibly due to CBMC dislike type
* conversion between number and pointer
*/
ASSERT(GRANULE_ALIGNED(addr), "internal: `_valid_pa`, addr in alignment");
- ASSERT(addr >= (uint64_t)granules_buffer,
+ ASSERT(addr >= (uint64_t)host_dram_buffer,
"internal: `_valid_pa`, addr in lower range");
- ASSERT(addr < (uint64_t)granules_buffer + sizeof(granules_buffer),
+ ASSERT(addr < (uint64_t)host_dram_buffer + sizeof(host_dram_buffer),
"internal: `_valid_pa`, addr in upper range");
return true;
}
@@ -85,7 +85,7 @@
struct granule *pa_to_granule_metadata_ptr(uint64_t addr)
{
- uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+ uint64_t idx = (addr - (uint64_t)host_dram_buffer)/GRANULE_SIZE;
__ASSERT(idx >= 0, "internal: `_pa_to_granule_metadata_ptr`, addr is in lower range");
__ASSERT(idx < RMM_MAX_GRANULES,
@@ -99,29 +99,29 @@
if (!valid_granule_metadata_ptr(g_ptr)) {
return NULL;
}
- return granules_buffer + (g_ptr - granules) * GRANULE_SIZE;
+ return host_dram_buffer + (g_ptr - granules) * GRANULE_SIZE;
}
uint64_t granule_metadata_ptr_to_pa(struct granule *g_ptr)
{
- return (uint64_t)granules_buffer + (g_ptr - granules) * GRANULE_SIZE;
+ return (uint64_t)host_dram_buffer + (g_ptr - granules) * GRANULE_SIZE;
}
void *pa_to_granule_buffer_ptr(uint64_t addr)
{
- __ASSERT((unsigned char *)addr - granules_buffer >= 0,
+ __ASSERT((unsigned char *)addr - host_dram_buffer >= 0,
"internal: `_pa_to_granule_buffer_ptr`, addr is in lower range");
/*
* CBMC has difficulty doing an integer->object mapping, when the
* integer is the address of the expected object, and the integer is not
* derived from a pointer.
* So instead of simply returning addr we need to tell CBMC that the
- * object we are looking for is in the `granules_buffer` array, at an
+ * object we are looking for is in the `host_dram_buffer` array, at an
* offset. To calculate the offset we can use `addr`, and the address of
- * `granules_buffer`.
+ * `host_dram_buffer`.
* For details see https://github.com/diffblue/cbmc/issues/8103
*/
- return (void *)granules_buffer + ((unsigned char *)addr - granules_buffer);
+ return (void *)host_dram_buffer + ((unsigned char *)addr - host_dram_buffer);
}
bool valid_granule_metadata_ptr(struct granule *p)
@@ -132,7 +132,7 @@
/*
* Return the first index of `number` of unused continuous indice to both
- * `granules` and `granules_buffer` arrays.
+ * `granules` and `host_dram_buffer` arrays.
*/
size_t next_index(void)
{
@@ -159,7 +159,7 @@
unsigned long index = next_index();
unsigned long offset = index * GRANULE_SIZE;
- (void)memcpy(granules_buffer + offset, content, size);
+ (void)memcpy(host_dram_buffer + offset, content, size);
used_granules_buffer[index] = true;
}
@@ -179,7 +179,7 @@
}
granules[index] = *granule_metadata;
- (void)memcpy(granules_buffer + offset, src_page, src_size);
+ (void)memcpy(host_dram_buffer + offset, src_page, src_size);
used_granules_buffer[index] = true;
return &granules[index];
}
@@ -195,14 +195,14 @@
enum granule_gpt get_granule_gpt(uint64_t addr)
{
- uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+ uint64_t idx = (addr - (uint64_t)host_dram_buffer)/GRANULE_SIZE;
return granule_gpt_array[idx];
}
void set_granule_gpt(uint64_t addr, enum granule_gpt granule_gpt)
{
- uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+ uint64_t idx = (addr - (uint64_t)host_dram_buffer)/GRANULE_SIZE;
granule_gpt_array[idx] = granule_gpt;
}
diff --git a/runtime/rmi/run.c b/runtime/rmi/run.c
index b1129aa..ca81c7d 100644
--- a/runtime/rmi/run.c
+++ b/runtime/rmi/run.c
@@ -18,6 +18,7 @@
#include <smc-rmi.h>
#include <smc-rsi.h>
#include <smc.h>
+#include <string.h>
static void reset_last_run_info(struct rec *rec)
{
diff --git a/tools/cbmc/CheckCBMC.cmake b/tools/cbmc/CheckCBMC.cmake
index 64df056..89694b9 100644
--- a/tools/cbmc/CheckCBMC.cmake
+++ b/tools/cbmc/CheckCBMC.cmake
@@ -54,8 +54,12 @@
# Configurations for the initial state.
set(GRANULE_SHIFT "9")
set(MAX_NUM_OF_GRANULE "4")
-math(EXPR HOST_MEM_SIZE "(1 << ${GRANULE_SHIFT}) * ${MAX_NUM_OF_GRANULE}")
-set(HOST_MEM_SIZE "${HOST_MEM_SIZE}UL")
+set(MAX_NUM_OF_COH_GRANULE "1")
+set(MAX_NUM_OF_NCOH_GRANULE "4")
+math(EXPR HOST_DRAM_SIZE "(1 << ${GRANULE_SHIFT}) * ${MAX_NUM_OF_GRANULE}")
+set(HOST_DRAM_SIZE "${HOST_DRAM_SIZE}UL")
+math(EXPR HOST_NCOH_DEV_SIZE "(1 << ${GRANULE_SHIFT}) * ${MAX_NUM_OF_NCOH_GRANULE}")
+set(HOST_NCOH_DEV_SIZE "${HOST_NCOH_DEV_SIZE}UL")
set(MAX_RTT_UNWIND "6")
set(MAX_AUX_REC "2")
@@ -93,9 +97,12 @@
"-DGRANULE_SHIFT=${GRANULE_SHIFT}"
"-DXLAT_GRANULARITY_SIZE_SHIFT=${GRANULE_SHIFT}"
"-DRMM_MAX_GRANULES=${MAX_NUM_OF_GRANULE}"
+ "-DRMM_MAX_COH_GRANULES=${MAX_NUM_OF_COH_GRANULE}"
+ "-DRMM_MAX_NCOH_GRANULES=${MAX_NUM_OF_NCOH_GRANULE}"
"-DMAX_CPUS=1"
"-DMAX_RTT_LEVEL=${MAX_RTT_UNWIND}"
- "-DHOST_MEM_SIZE=${HOST_MEM_SIZE}"
+ "-DHOST_DRAM_SIZE=${HOST_DRAM_SIZE}"
+ "-DHOST_NCOH_DEV_SIZE=${HOST_NCOH_DEV_SIZE}"
"-DNAME=\"RMM\""
"-DVERSION=\"CBMC\""
"-DCOMMIT_INFO=\"CBMC\""