feat(lib/granule): Add dev_granule object type
Define dev_granule structure type and add device granule
objects to manage device memory in PCI regions.
Define device granule states:
- DEV_GRANULE_STATE_NS
- DEV_GRANULE_STATE_DELEGATED
- DEV_GRANULE_STATE_MAPPED
Add locking primitives and access functions for
dev_granule objects.
Add dev_granules[RMM_MAX_DEV_GRANULES] array of
dev_granule structures.
Change-Id: I857095a997f78d2c39b3958056460112f3b34595
Signed-off-by: AlexeiFedorov <Alexei.Fedorov@arm.com>
diff --git a/lib/common/include/dev_type.h b/lib/common/include/dev_type.h
new file mode 100644
index 0000000..7f97417
--- /dev/null
+++ b/lib/common/include/dev_type.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef DEV_TYPE_H
+#define DEV_TYPE_H
+
+/* Types of device memory */
+enum dev_type {
+ DEV_RANGE_COHERENT,
+ DEV_RANGE_NON_COHERENT,
+ DEV_RANGE_MAX
+};
+
+#endif /* DEV_TYPE_H */
diff --git a/lib/common/include/platform_api.h b/lib/common/include/platform_api.h
index 6b26ac1..34e53b3 100644
--- a/lib/common/include/platform_api.h
+++ b/lib/common/include/platform_api.h
@@ -5,6 +5,7 @@
#ifndef PLATFORM_API_H
#define PLATFORM_API_H
+#include <dev_type.h>
#include <stdint.h>
void plat_warmboot_setup(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3);
@@ -20,10 +21,26 @@
unsigned long plat_granule_addr_to_idx(unsigned long addr);
/*
+ * Takes an aligned dev_granule address, validates it and if valid returns the
+ * index in the struct dev_granules array or UINT64_MAX in case of an error.
+ *
+ * This function also validates that the dev_granule address is a valid
+ * page address and returns device granule type if the addr is valid.
+ */
+unsigned long plat_dev_granule_addr_to_idx(unsigned long addr, enum dev_type *type);
+
+/*
* Takes an index in the struct granules array and returns the aligned granule
* address. The index must be within the number of granules expected by the
* platform.
*/
unsigned long plat_granule_idx_to_addr(unsigned long idx);
+/*
+ * Takes an index in the struct dev_granules array and returns the aligned
+ * dev_granule address of the specified device type. The index must be within
+ * the number of dev_granules expected by the platform.
+ */
+unsigned long plat_dev_granule_idx_to_addr(unsigned long idx, enum dev_type type);
+
#endif /* PLATFORM_API_H */
diff --git a/lib/granule/CMakeLists.txt b/lib/granule/CMakeLists.txt
index 53609d8..1f93036 100644
--- a/lib/granule/CMakeLists.txt
+++ b/lib/granule/CMakeLists.txt
@@ -15,6 +15,7 @@
"include/${RMM_ARCH}")
target_sources(rmm-lib-granule
- PRIVATE "src/granule.c")
+ PRIVATE "src/dev_granule.c"
+ "src/granule.c")
include (tests/CMakeLists.txt)
diff --git a/lib/granule/include/aarch64/granule_lock.h b/lib/granule/include/aarch64/granule_lock.h
index f23abd7..e2a04d2 100644
--- a/lib/granule/include/aarch64/granule_lock.h
+++ b/lib/granule/include/aarch64/granule_lock.h
@@ -45,4 +45,41 @@
);
}
+static inline void dev_granule_bitlock_acquire(struct dev_granule *g)
+{
+ /* To avoid misra-c2012-2.7 warnings */
+ (void)g;
+ uint32_t tmp;
+ uint32_t mask = DEV_GRN_LOCK_BIT;
+
+ asm volatile(
+ "1: ldsetab %w[mask], %w[tmp], %[lock]\n"
+ " tbz %w[tmp], #%c[bit], 2f\n"
+ " ldxrb %w[tmp], %[lock]\n"
+ " tbz %w[tmp], #%c[bit], 1b\n"
+ " wfe\n"
+ " b 1b\n"
+ "2:\n"
+ : [lock] "+Q" (g->descriptor),
+ [tmp] "=&r" (tmp)
+ : [mask] "r" (mask),
+ [bit] "i" (DEV_GRN_LOCK_SHIFT)
+ : "memory"
+ );
+}
+
+static inline void dev_granule_bitlock_release(struct dev_granule *g)
+{
+ /* To avoid misra-c2012-2.7 warnings */
+ (void)g;
+ uint32_t mask = DEV_GRN_LOCK_BIT;
+
+ asm volatile(
+ " stclrlb %w[mask], %[lock]\n"
+ : [lock] "+Q" (g->descriptor)
+ : [mask] "r" (mask)
+ : "memory"
+ );
+}
+
#endif /* GRANULE_LOCK_H */
diff --git a/lib/granule/include/dev_granule.h b/lib/granule/include/dev_granule.h
new file mode 100644
index 0000000..0525188
--- /dev/null
+++ b/lib/granule/include/dev_granule.h
@@ -0,0 +1,309 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef DEV_GRANULE_H
+#define DEV_GRANULE_H
+
+#include <assert.h>
+#include <atomics.h>
+#include <dev_type.h>
+#include <errno.h>
+#include <granule_lock.h>
+#include <memory.h>
+#include <stdbool.h>
+#include <utils_def.h>
+
+/* Maximum value defined by the 'refcount' field width in dev_granule descriptor */
+#define DEV_REFCOUNT_MAX (unsigned char) \
+ ((U(1) << DEV_GRN_REFCOUNT_WIDTH) - U(1))
+
+/* Dev Granule descriptor fields access macros */
+#define DEV_LOCKED(g) \
+ ((SCA_READ8(&(g)->descriptor) & DEV_GRN_LOCK_BIT) != 0U)
+
+#define DEV_REFCOUNT(g) \
+ (SCA_READ8(&(g)->descriptor) & DEV_REFCOUNT_MASK)
+
+#define DEV_STATE(g) \
+ (unsigned char)EXTRACT(DEV_GRN_STATE, SCA_READ8(&(g)->descriptor))
+
+/*
+ * Return refcount value using atomic read.
+ */
+static inline unsigned char dev_granule_refcount_read(struct dev_granule *g)
+{
+ assert(g != NULL);
+ return DEV_REFCOUNT(g);
+}
+
+/*
+ * Return refcount value using atomic read with acquire semantics.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline unsigned char dev_granule_refcount_read_acquire(struct dev_granule *g)
+{
+ assert((g != NULL) && DEV_LOCKED(g));
+ return SCA_READ8_ACQUIRE(&g->descriptor) & DEV_REFCOUNT_MASK;
+}
+
+/*
+ * Sanity-check unlocked dev_granule invariants.
+ * This check is performed just after acquiring the lock and/or just before
+ * releasing the lock.
+ *
+ * These invariants must hold for any dev_granule which is unlocked.
+ *
+ * These invariants may not hold transiently while a dev_granule is locked (e.g.
+ * when transitioning to/from delegated state).
+ *
+ * Note: this function is purely for debug/documentation purposes, and is not
+ * intended as a mechanism to ensure correctness.
+ */
+static inline void __dev_granule_assert_unlocked_invariants(struct dev_granule *g,
+ unsigned char state)
+{
+ (void)g;
+
+ switch (state) {
+ case DEV_GRANULE_STATE_NS:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ case DEV_GRANULE_STATE_DELEGATED:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ case DEV_GRANULE_STATE_MAPPED:
+ assert(DEV_REFCOUNT(g) == 0U);
+ break;
+ default:
+ /* Unknown dev_granule type */
+ assert(false);
+ }
+}
+
+/*
+ * Return the state of unlocked dev_granule.
+ * This function should be used only for NS dev_granules where RMM performs NS
+ * specific operations on the granule.
+ */
+static inline unsigned char dev_granule_unlocked_state(struct granule *g)
+{
+ assert(g != NULL);
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ return DEV_STATE(g);
+}
+
+/* Must be called with dev_granule lock held */
+static inline unsigned char dev_granule_get_state(struct dev_granule *g)
+{
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ return DEV_STATE(g);
+}
+
+/* Must be called with dev_granule lock held */
+static inline void dev_granule_set_state(struct dev_granule *g, unsigned char state)
+{
+ unsigned char val;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
+ val = g->descriptor & DEV_STATE_MASK;
+
+ /* cppcheck-suppress misra-c2012-10.3 */
+ val ^= state << DEV_GRN_STATE_SHIFT;
+
+ /*
+ * Atomically EOR val while keeping the bits for refcount and
+ * bitlock as 0 which would preserve their values in memory.
+ */
+ (void)atomic_eor_8(&g->descriptor, val);
+}
+
+/*
+ * Acquire the bitlock and then check expected state
+ * Fails if unexpected locking sequence detected.
+ * Also asserts if invariant conditions are met.
+ */
+static inline bool dev_granule_lock_on_state_match(struct dev_granule *g,
+ unsigned char expected_state)
+{
+ dev_granule_bitlock_acquire(g);
+
+ if (dev_granule_get_state(g) != expected_state) {
+ dev_granule_bitlock_release(g);
+ return false;
+ }
+
+ __dev_granule_assert_unlocked_invariants(g, expected_state);
+ return true;
+}
+
+/*
+ * Used when we're certain of the type of an object (e.g. because we hold a
+ * reference to it). In these cases we should never fail to acquire the lock.
+ */
+static inline void dev_granule_lock(struct dev_granule *g,
+ unsigned char expected_state)
+{
+ __unused bool locked = dev_granule_lock_on_state_match(g, expected_state);
+
+ assert(locked);
+}
+
+static inline void dev_granule_unlock(struct dev_granule *g)
+{
+ __dev_granule_assert_unlocked_invariants(g, dev_granule_get_state(g));
+ dev_granule_bitlock_release(g);
+}
+
+/* Transtion state to @new_state and unlock the dev_granule */
+static inline void dev_granule_unlock_transition(struct dev_granule *g,
+ unsigned char new_state)
+{
+ dev_granule_set_state(g, new_state);
+ dev_granule_unlock(g);
+}
+
+/*
+ * Takes a valid pointer to a struct dev_granule, corresponding to dev_type
+ * and returns the dev_granule physical address.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the dev_granule (i.e. whether it is locked, its state or its reference count).
+ */
+unsigned long dev_granule_addr(const struct dev_granule *g, enum dev_type type);
+
+/*
+ * Takes an aligned dev_granule address, returns a pointer to the corresponding
+ * struct dev_granule and sets device granule type in address passed in @type.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+struct dev_granule *addr_to_dev_granule(unsigned long addr, enum dev_type *type);
+
+/*
+ * Verifies whether @addr is a valid dev_granule physical address, returns
+ * a pointer to the corresponding struct dev_granule and sets device granule type.
+ *
+ * This is purely a lookup, and provides no guarantees w.r.t the state of the
+ * granule (e.g. locking).
+ *
+ * Returns:
+ * Pointer to the struct dev_granule if @addr is a valid dev_granule physical
+ * address and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ */
+struct dev_granule *find_dev_granule(unsigned long addr, enum dev_type *type);
+
+/*
+ * Obtain a pointer to a locked dev_granule at @addr if @addr is a valid dev_granule
+ * physical address and the state of the dev_granule at @addr is @expected_state and
+ * set device granule type.
+ *
+ * Returns:
+ * A valid dev_granule pointer if @addr is a valid dev_granule physical address
+ * and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ * - if the state of the dev_granule at @addr is not @expected_state.
+ */
+struct dev_granule *find_lock_dev_granule(unsigned long addr,
+ unsigned char expected_state,
+ enum dev_type *type);
+/*
+ * Refcount field occupies LSB bits of the dev_granule descriptor,
+ * and functions which modify its value can operate directly on
+ * the whole 8-bit word without masking, provided that the result
+ * doesn't exceed DEV_REFCOUNT_MAX or set to negative number.
+ */
+
+/*
+ * Atomically increments the reference counter of the granule by @val.
+ *
+ * Must be called with granule lock held.
+ */
+static inline void dev_granule_refcount_inc(struct dev_granule *g,
+ unsigned char val)
+{
+ uint8_t old_refcount __unused;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+ old_refcount = atomic_load_add_8(&g->descriptor, val) &
+ DEV_REFCOUNT_MASK;
+ assert((old_refcount + val) <= DEV_REFCOUNT_MAX);
+}
+
+/*
+ * Atomically increments the reference counter of the granule.
+ *
+ * Must be called with granule lock held.
+ */
+static inline void atomic_dev_granule_get(struct dev_granule *g)
+{
+ dev_granule_refcount_inc(g, 1U);
+}
+
+/*
+ * Atomically increments the reference counter of the dev_granule by @val.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline void dev_granule_refcount_dec(struct dev_granule *g, unsigned char val)
+{
+ uint8_t old_refcount __unused;
+
+ assert((g != NULL) && DEV_LOCKED(g));
+
+ /* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+ old_refcount = atomic_load_add_8(&g->descriptor, (uint8_t)(-val)) &
+ DEV_REFCOUNT_MASK;
+ assert(old_refcount >= val);
+}
+
+/*
+ * Atomically decrements the reference counter of the dev_granule.
+ *
+ * Must be called with dev_granule lock held.
+ */
+static inline void atomic_dev_granule_put(struct dev_granule *g)
+{
+ dev_granule_refcount_dec(g, 1U);
+}
+
+/*
+ * Atomically decrements the reference counter of the dev_granule.
+ * Stores to memory with release semantics.
+ */
+static inline void atomic_dev_granule_put_release(struct dev_granule *g)
+{
+ uint8_t old_refcount __unused;
+
+ assert(g != NULL);
+ old_refcount = atomic_load_add_release_8(&g->descriptor,
+ (uint8_t)(-1)) & DEV_REFCOUNT_MASK;
+ assert(old_refcount != 0U);
+}
+
+/*
+ * Returns 'true' if dev_granule is locked, 'false' otherwise
+ *
+ * This function is only meant to be used for verification and testing,
+ * and this functionlaity is not required for RMM operations.
+ */
+static inline bool is_dev_granule_locked(struct dev_granule *g)
+{
+ assert(g != NULL);
+ return DEV_LOCKED(g);
+}
+
+#endif /* DEV_GRANULE_H */
diff --git a/lib/granule/include/fake_host/granule_lock.h b/lib/granule/include/fake_host/granule_lock.h
index 49a87c5..bce5962 100644
--- a/lib/granule/include/fake_host/granule_lock.h
+++ b/lib/granule/include/fake_host/granule_lock.h
@@ -25,4 +25,20 @@
g->descriptor &= ~GRN_LOCK_BIT;
}
+static inline void dev_granule_bitlock_acquire(struct dev_granule *g)
+{
+ /*
+ * The fake_host architecture is single threaded and we do not expect
+ * the lock to be already acquired in properly implemented locking
+ * sequence.
+ */
+ assert((g->descriptor & DEV_GRN_LOCK_BIT) == 0);
+ g->descriptor |= DEV_GRN_LOCK_BIT;
+}
+
+static inline void dev_granule_bitlock_release(struct dev_granule *g)
+{
+ g->descriptor &= ~DEV_GRN_LOCK_BIT;
+}
+
#endif /* GRANULE_LOCK_H */
diff --git a/lib/granule/include/granule_types.h b/lib/granule/include/granule_types.h
index e95f921..367340a 100644
--- a/lib/granule/include/granule_types.h
+++ b/lib/granule/include/granule_types.h
@@ -28,6 +28,8 @@
* - GRANULE_STATE_REC
* - GRANULE_STATE_PDEV
* - GRANULE_STATE_VDEV
+ * - DEV_GRANULE_STATE_NS
+ * - DEV_GRANULE_STATE_DELEGATED
*
* Otherwise a granule state is considered `internal`.
*
@@ -38,6 +40,7 @@
* - GRANULE_STATE_REC_AUX
* - GRANULE_STATE_PDEV_AUX
* - GRANULE_STATE_VDEV_AUX
+ * - DEV_GRANULE_STATE_MAPPED
*
* The following locking rules must be followed in all cases:
*
@@ -54,8 +57,9 @@
* 4. Granules in an `internal` state must be locked in order of state:
* 1. `RTT`
* 2. `DATA`
- * 3. `REC_AUX`
- * 4. `PDEV_AUX`
+ * 3. `DEV_MAPPED`
+ * 4. `REC_AUX`
+ * 5. `PDEV_AUX`
*
* 5. Granules in the same `internal` state must be locked in the order defined
* below for that specific state.
@@ -235,7 +239,6 @@
* access.
* [9:0]: refcount
*/
-
struct granule {
uint16_t descriptor;
};
@@ -253,4 +256,70 @@
#define STATE_MASK (unsigned short)MASK(GRN_STATE)
#define REFCOUNT_MASK (unsigned short)MASK(GRN_REFCOUNT)
+/*
+ * Non-Secure dev_granule (external)
+ *
+ * Dev Granule content is not protected by dev_granule::lock, as it is always
+ * subject to reads and writes from the NS world.
+ */
+#define DEV_GRANULE_STATE_NS 0U
+
+/*
+ * Delegated Granule (external)
+ *
+ * Granule content is protected by granule::lock.
+ *
+ * No references are held on this granule type.
+ */
+#define DEV_GRANULE_STATE_DELEGATED 1U
+
+/*
+ * Mapped Dev Granule (internal)
+ *
+ * Dev Granule is assigned to an IPA in a Realm.
+ *
+ * Granule content is not protected by dev_granule::lock, as it is always
+ * subject to reads and writes from within a Realm.
+ *
+ * A granule in this state is always referenced from exactly one entry
+ * in an RTT granule which must be locked before locking this granule.
+ * Only a single Dev granule can be locked at a time.
+ * The complete internal locking order for MAPPED dev. granules is:
+ *
+ * RD -> RTT -> RTT -> ... -> DEV MAPPED
+ * No references are held on this granule type.
+ */
+#define DEV_GRANULE_STATE_MAPPED 2U
+
+struct dev_granule {
+ uint8_t descriptor;
+};
+
+/*
+ * Device granule descriptor bit fields:
+ *
+ * @bit_lock protects the struct dev_granule itself. Take this lock whenever
+ * inspecting or modifying any other fields in this descriptor.
+ * [7]: bit_lock
+ *
+ * [6]: reserved
+ *
+ * @state is the state of the device granule.
+ * [5:3]: state
+ *
+ * @refcount counts RMM and realm references to this device granule.
+ * [2:0]: refcount
+ */
+#define DEV_GRN_LOCK_SHIFT U(7)
+#define DEV_GRN_LOCK_BIT (U(1) << DEV_GRN_LOCK_SHIFT)
+
+#define DEV_GRN_STATE_SHIFT U(3)
+#define DEV_GRN_STATE_WIDTH U(3)
+
+#define DEV_GRN_REFCOUNT_SHIFT U(0)
+#define DEV_GRN_REFCOUNT_WIDTH U(3)
+
+#define DEV_STATE_MASK (unsigned char)MASK(DEV_GRN_STATE)
+#define DEV_REFCOUNT_MASK (unsigned char)MASK(DEV_GRN_REFCOUNT)
+
#endif /* GRANULE_TYPES_H */
diff --git a/lib/granule/src/dev_granule.c b/lib/granule/src/dev_granule.c
new file mode 100644
index 0000000..97337ba
--- /dev/null
+++ b/lib/granule/src/dev_granule.c
@@ -0,0 +1,132 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <dev_granule.h>
+#include <platform_api.h>
+#include <stddef.h>
+#include <utils_def.h>
+
+/* Non-coherent device granules */
+IF_NCBMC(static) struct dev_granule dev_ncoh_granules[RMM_MAX_NCOH_GRANULES]
+ IF_NCBMC(__section("granules_memory"));
+
+/*
+ * Takes a valid pointer to a struct dev_granule, corresponding to dev_type
+ * and returns the dev_granule physical address.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the dev_granule (i.e. whether it is locked, its state or its reference count).
+ */
+unsigned long dev_granule_addr(const struct dev_granule *g, enum dev_type type)
+{
+ (void)type;
+ unsigned long idx;
+
+ assert(g != NULL);
+
+ /* No coherent device granules */
+ assert(type == DEV_RANGE_NON_COHERENT);
+
+ idx = ((unsigned long)g - (unsigned long)dev_ncoh_granules) /
+ sizeof(struct dev_granule);
+ return plat_dev_granule_idx_to_addr(idx, type);
+}
+
+/*
+ * Takes a dev_granule index, corresponding to dev_type
+ * and returns a pointer to the struct dev_granule.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+static struct dev_granule *dev_granule_from_idx(unsigned long idx, enum dev_type type)
+{
+ (void)type;
+
+ /* No coherent device granules */
+ assert((type == DEV_RANGE_NON_COHERENT) && (idx < RMM_MAX_NCOH_GRANULES));
+
+ return &dev_ncoh_granules[idx];
+}
+
+/*
+ * Takes an aligned dev_granule address, returns a pointer to the corresponding
+ * struct dev_granule and sets device granule type in address passed in @type.
+ *
+ * This is purely a lookup, and provides no guarantees about the attributes of
+ * the granule (i.e. whether it is locked, its state or its reference count).
+ */
+struct dev_granule *addr_to_dev_granule(unsigned long addr, enum dev_type *type)
+{
+ unsigned long idx;
+
+ assert(GRANULE_ALIGNED(addr));
+
+ idx = plat_dev_granule_addr_to_idx(addr, type);
+ return dev_granule_from_idx(idx, *type);
+}
+
+/*
+ * Verifies whether @addr is a valid dev_granule physical address, returns
+ * a pointer to the corresponding struct dev_granule and sets device granule type.
+ *
+ * This is purely a lookup, and provides no guarantees w.r.t the state of the
+ * granule (e.g. locking).
+ *
+ * Returns:
+ * Pointer to the struct dev_granule if @addr is a valid dev_granule physical
+ * address and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ */
+struct dev_granule *find_dev_granule(unsigned long addr, enum dev_type *type)
+{
+ unsigned long idx;
+
+ if (!GRANULE_ALIGNED(addr)) {
+ return NULL;
+ }
+
+ idx = plat_dev_granule_addr_to_idx(addr, type);
+
+ if (idx == UINT64_MAX) {
+ return NULL;
+ }
+
+ return dev_granule_from_idx(idx, *type);
+}
+
+/*
+ * Obtain a pointer to a locked dev_granule at @addr if @addr is a valid dev_granule
+ * physical address and the state of the dev_granule at @addr is @expected_state and
+ * set device granule type.
+ *
+ * Returns:
+ * A valid dev-granule pointer if @addr is a valid dev_granule physical address
+ * and device granule type in address passed in @type.
+ * NULL if any of:
+ * - @addr is not aligned to the size of a granule.
+ * - @addr is out of range.
+ * - if the state of the dev_granule at @addr is not @expected_state.
+ */
+struct dev_granule *find_lock_dev_granule(unsigned long addr,
+ unsigned char expected_state,
+ enum dev_type *type)
+{
+ struct dev_granule *g = find_dev_granule(addr, type);
+
+ if (g == NULL) {
+ return NULL;
+ }
+
+ if (!dev_granule_lock_on_state_match(g, expected_state)) {
+ return NULL;
+ }
+
+ return g;
+}
diff --git a/lib/granule/src/granule.c b/lib/granule/src/granule.c
index b5dff23..8a67a87 100644
--- a/lib/granule/src/granule.c
+++ b/lib/granule/src/granule.c
@@ -7,12 +7,8 @@
#include <assert.h>
#include <debug.h>
#include <granule.h>
-#include <mmio.h>
#include <platform_api.h>
#include <stddef.h>
-/* According to the C standard, the memset function used in this file is declared in string.h */
-/* coverity[unnecessary_header: SUPPRESS] */
-#include <string.h>
#include <utils_def.h>
IF_NCBMC(static) struct granule granules[RMM_MAX_GRANULES]
diff --git a/lib/rmm_el3_ifc/include/rmm_el3_ifc.h b/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
index 8308f74..eca8794 100644
--- a/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
+++ b/lib/rmm_el3_ifc/include/rmm_el3_ifc.h
@@ -6,6 +6,10 @@
#ifndef RMM_EL3_IFC_H
#define RMM_EL3_IFC_H
+#ifndef __ASSEMBLER__
+#include <dev_type.h>
+#endif
+
#include <sizes.h>
#include <smc.h>
@@ -249,11 +253,6 @@
/*****************************************************************************
* Boot Manifest definitions, functions and structures (v0.4)
****************************************************************************/
-enum range_type {
- DEV_RANGE_COHERENT,
- DEV_RANGE_NON_COHERENT,
- DEV_RANGE_MAX
-};
/* Console info structure */
struct console_info {
@@ -385,7 +384,7 @@
*/
int rmm_el3_ifc_get_dev_range_validated_pa(unsigned long max_num_banks,
struct memory_info **plat_dev_range_info,
- enum range_type type);
+ enum dev_type type);
/****************************************************************************
* RMM-EL3 Runtime interface APIs
diff --git a/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c b/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
index 738290f..651892e 100644
--- a/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
+++ b/lib/rmm_el3_ifc/src/rmm_el3_ifc_manifest.c
@@ -242,7 +242,7 @@
*/
int rmm_el3_ifc_get_dev_range_validated_pa(unsigned long max_num_banks,
struct memory_info **plat_dev_range_info,
- enum range_type type)
+ enum dev_type type)
{
struct memory_info *plat_memory;
unsigned int max_granules;