blob: 0525188811523ea2ca87a25e94771cd0130ab29b [file] [log] [blame]
AlexeiFedorov037add62024-10-30 15:53:05 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#ifndef DEV_GRANULE_H
7#define DEV_GRANULE_H
8
9#include <assert.h>
10#include <atomics.h>
11#include <dev_type.h>
12#include <errno.h>
13#include <granule_lock.h>
14#include <memory.h>
15#include <stdbool.h>
16#include <utils_def.h>
17
18/* Maximum value defined by the 'refcount' field width in dev_granule descriptor */
19#define DEV_REFCOUNT_MAX (unsigned char) \
20 ((U(1) << DEV_GRN_REFCOUNT_WIDTH) - U(1))
21
22/* Dev Granule descriptor fields access macros */
23#define DEV_LOCKED(g) \
24 ((SCA_READ8(&(g)->descriptor) & DEV_GRN_LOCK_BIT) != 0U)
25
26#define DEV_REFCOUNT(g) \
27 (SCA_READ8(&(g)->descriptor) & DEV_REFCOUNT_MASK)
28
29#define DEV_STATE(g) \
30 (unsigned char)EXTRACT(DEV_GRN_STATE, SCA_READ8(&(g)->descriptor))
31
32/*
33 * Return refcount value using atomic read.
34 */
35static inline unsigned char dev_granule_refcount_read(struct dev_granule *g)
36{
37 assert(g != NULL);
38 return DEV_REFCOUNT(g);
39}
40
41/*
42 * Return refcount value using atomic read with acquire semantics.
43 *
44 * Must be called with dev_granule lock held.
45 */
46static inline unsigned char dev_granule_refcount_read_acquire(struct dev_granule *g)
47{
48 assert((g != NULL) && DEV_LOCKED(g));
49 return SCA_READ8_ACQUIRE(&g->descriptor) & DEV_REFCOUNT_MASK;
50}
51
52/*
53 * Sanity-check unlocked dev_granule invariants.
54 * This check is performed just after acquiring the lock and/or just before
55 * releasing the lock.
56 *
57 * These invariants must hold for any dev_granule which is unlocked.
58 *
59 * These invariants may not hold transiently while a dev_granule is locked (e.g.
60 * when transitioning to/from delegated state).
61 *
62 * Note: this function is purely for debug/documentation purposes, and is not
63 * intended as a mechanism to ensure correctness.
64 */
65static inline void __dev_granule_assert_unlocked_invariants(struct dev_granule *g,
66 unsigned char state)
67{
68 (void)g;
69
70 switch (state) {
71 case DEV_GRANULE_STATE_NS:
72 assert(DEV_REFCOUNT(g) == 0U);
73 break;
74 case DEV_GRANULE_STATE_DELEGATED:
75 assert(DEV_REFCOUNT(g) == 0U);
76 break;
77 case DEV_GRANULE_STATE_MAPPED:
78 assert(DEV_REFCOUNT(g) == 0U);
79 break;
80 default:
81 /* Unknown dev_granule type */
82 assert(false);
83 }
84}
85
86/*
87 * Return the state of unlocked dev_granule.
88 * This function should be used only for NS dev_granules where RMM performs NS
89 * specific operations on the granule.
90 */
91static inline unsigned char dev_granule_unlocked_state(struct granule *g)
92{
93 assert(g != NULL);
94
95 /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
96 return DEV_STATE(g);
97}
98
99/* Must be called with dev_granule lock held */
100static inline unsigned char dev_granule_get_state(struct dev_granule *g)
101{
102 assert((g != NULL) && DEV_LOCKED(g));
103
104 /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
105 return DEV_STATE(g);
106}
107
108/* Must be called with dev_granule lock held */
109static inline void dev_granule_set_state(struct dev_granule *g, unsigned char state)
110{
111 unsigned char val;
112
113 assert((g != NULL) && DEV_LOCKED(g));
114
115 /* NOLINTNEXTLINE(clang-analyzer-core.NullDereference) */
116 val = g->descriptor & DEV_STATE_MASK;
117
118 /* cppcheck-suppress misra-c2012-10.3 */
119 val ^= state << DEV_GRN_STATE_SHIFT;
120
121 /*
122 * Atomically EOR val while keeping the bits for refcount and
123 * bitlock as 0 which would preserve their values in memory.
124 */
125 (void)atomic_eor_8(&g->descriptor, val);
126}
127
128/*
129 * Acquire the bitlock and then check expected state
130 * Fails if unexpected locking sequence detected.
131 * Also asserts if invariant conditions are met.
132 */
133static inline bool dev_granule_lock_on_state_match(struct dev_granule *g,
134 unsigned char expected_state)
135{
136 dev_granule_bitlock_acquire(g);
137
138 if (dev_granule_get_state(g) != expected_state) {
139 dev_granule_bitlock_release(g);
140 return false;
141 }
142
143 __dev_granule_assert_unlocked_invariants(g, expected_state);
144 return true;
145}
146
147/*
148 * Used when we're certain of the type of an object (e.g. because we hold a
149 * reference to it). In these cases we should never fail to acquire the lock.
150 */
151static inline void dev_granule_lock(struct dev_granule *g,
152 unsigned char expected_state)
153{
154 __unused bool locked = dev_granule_lock_on_state_match(g, expected_state);
155
156 assert(locked);
157}
158
159static inline void dev_granule_unlock(struct dev_granule *g)
160{
161 __dev_granule_assert_unlocked_invariants(g, dev_granule_get_state(g));
162 dev_granule_bitlock_release(g);
163}
164
165/* Transtion state to @new_state and unlock the dev_granule */
166static inline void dev_granule_unlock_transition(struct dev_granule *g,
167 unsigned char new_state)
168{
169 dev_granule_set_state(g, new_state);
170 dev_granule_unlock(g);
171}
172
173/*
174 * Takes a valid pointer to a struct dev_granule, corresponding to dev_type
175 * and returns the dev_granule physical address.
176 *
177 * This is purely a lookup, and provides no guarantees about the attributes of
178 * the dev_granule (i.e. whether it is locked, its state or its reference count).
179 */
180unsigned long dev_granule_addr(const struct dev_granule *g, enum dev_type type);
181
182/*
183 * Takes an aligned dev_granule address, returns a pointer to the corresponding
184 * struct dev_granule and sets device granule type in address passed in @type.
185 *
186 * This is purely a lookup, and provides no guarantees about the attributes of
187 * the granule (i.e. whether it is locked, its state or its reference count).
188 */
189struct dev_granule *addr_to_dev_granule(unsigned long addr, enum dev_type *type);
190
191/*
192 * Verifies whether @addr is a valid dev_granule physical address, returns
193 * a pointer to the corresponding struct dev_granule and sets device granule type.
194 *
195 * This is purely a lookup, and provides no guarantees w.r.t the state of the
196 * granule (e.g. locking).
197 *
198 * Returns:
199 * Pointer to the struct dev_granule if @addr is a valid dev_granule physical
200 * address and device granule type in address passed in @type.
201 * NULL if any of:
202 * - @addr is not aligned to the size of a granule.
203 * - @addr is out of range.
204 */
205struct dev_granule *find_dev_granule(unsigned long addr, enum dev_type *type);
206
207/*
208 * Obtain a pointer to a locked dev_granule at @addr if @addr is a valid dev_granule
209 * physical address and the state of the dev_granule at @addr is @expected_state and
210 * set device granule type.
211 *
212 * Returns:
213 * A valid dev_granule pointer if @addr is a valid dev_granule physical address
214 * and device granule type in address passed in @type.
215 * NULL if any of:
216 * - @addr is not aligned to the size of a granule.
217 * - @addr is out of range.
218 * - if the state of the dev_granule at @addr is not @expected_state.
219 */
220struct dev_granule *find_lock_dev_granule(unsigned long addr,
221 unsigned char expected_state,
222 enum dev_type *type);
223/*
224 * Refcount field occupies LSB bits of the dev_granule descriptor,
225 * and functions which modify its value can operate directly on
226 * the whole 8-bit word without masking, provided that the result
227 * doesn't exceed DEV_REFCOUNT_MAX or set to negative number.
228 */
229
230/*
231 * Atomically increments the reference counter of the granule by @val.
232 *
233 * Must be called with granule lock held.
234 */
235static inline void dev_granule_refcount_inc(struct dev_granule *g,
236 unsigned char val)
237{
238 uint8_t old_refcount __unused;
239
240 assert((g != NULL) && DEV_LOCKED(g));
241 old_refcount = atomic_load_add_8(&g->descriptor, val) &
242 DEV_REFCOUNT_MASK;
243 assert((old_refcount + val) <= DEV_REFCOUNT_MAX);
244}
245
246/*
247 * Atomically increments the reference counter of the granule.
248 *
249 * Must be called with granule lock held.
250 */
251static inline void atomic_dev_granule_get(struct dev_granule *g)
252{
253 dev_granule_refcount_inc(g, 1U);
254}
255
256/*
257 * Atomically increments the reference counter of the dev_granule by @val.
258 *
259 * Must be called with dev_granule lock held.
260 */
261static inline void dev_granule_refcount_dec(struct dev_granule *g, unsigned char val)
262{
263 uint8_t old_refcount __unused;
264
265 assert((g != NULL) && DEV_LOCKED(g));
266
267 /* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
268 old_refcount = atomic_load_add_8(&g->descriptor, (uint8_t)(-val)) &
269 DEV_REFCOUNT_MASK;
270 assert(old_refcount >= val);
271}
272
273/*
274 * Atomically decrements the reference counter of the dev_granule.
275 *
276 * Must be called with dev_granule lock held.
277 */
278static inline void atomic_dev_granule_put(struct dev_granule *g)
279{
280 dev_granule_refcount_dec(g, 1U);
281}
282
283/*
284 * Atomically decrements the reference counter of the dev_granule.
285 * Stores to memory with release semantics.
286 */
287static inline void atomic_dev_granule_put_release(struct dev_granule *g)
288{
289 uint8_t old_refcount __unused;
290
291 assert(g != NULL);
292 old_refcount = atomic_load_add_release_8(&g->descriptor,
293 (uint8_t)(-1)) & DEV_REFCOUNT_MASK;
294 assert(old_refcount != 0U);
295}
296
297/*
298 * Returns 'true' if dev_granule is locked, 'false' otherwise
299 *
300 * This function is only meant to be used for verification and testing,
301 * and this functionlaity is not required for RMM operations.
302 */
303static inline bool is_dev_granule_locked(struct dev_granule *g)
304{
305 assert(g != NULL);
306 return DEV_LOCKED(g);
307}
308
309#endif /* DEV_GRANULE_H */