Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame^] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <stdalign.h> |
| 10 | |
| 11 | #include <gmock/gmock.h> |
| 12 | |
| 13 | extern "C" { |
| 14 | #include "hf/mpool.h" |
| 15 | } |
| 16 | |
| 17 | namespace |
| 18 | { |
Andrew Scull | 63d1f3f | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 19 | using ::testing::Eq; |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 20 | using ::testing::IsNull; |
| 21 | using ::testing::NotNull; |
| 22 | |
| 23 | /** |
| 24 | * Checks that the given allocations come from the given chunks. |
| 25 | */ |
| 26 | bool check_allocs(std::vector<std::unique_ptr<char[]>>& chunks, |
| 27 | std::vector<uintptr_t>& allocs, size_t entries_per_chunk, |
| 28 | size_t entry_size) |
| 29 | { |
| 30 | size_t i, j; |
| 31 | |
| 32 | if (allocs.size() != chunks.size() * entries_per_chunk) { |
| 33 | return false; |
| 34 | } |
| 35 | |
| 36 | sort(allocs.begin(), allocs.end()); |
| 37 | sort(chunks.begin(), chunks.end(), |
| 38 | [](const std::unique_ptr<char[]>& a, |
| 39 | const std::unique_ptr<char[]>& b) { |
| 40 | return a.get() < b.get(); |
| 41 | }); |
| 42 | |
| 43 | for (i = 0; i < chunks.size(); i++) { |
| 44 | if ((uintptr_t)chunks[i].get() != |
| 45 | allocs[i * entries_per_chunk]) { |
| 46 | return false; |
| 47 | } |
| 48 | |
| 49 | for (j = 1; j < entries_per_chunk; j++) { |
| 50 | size_t k = i * entries_per_chunk + j; |
| 51 | if (allocs[k] != allocs[k - 1] + entry_size) { |
| 52 | return false; |
| 53 | } |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | return true; |
| 58 | } |
| 59 | |
| 60 | /** |
| 61 | * Add chunks to the given mem pool and chunk vector. |
| 62 | */ |
| 63 | static void add_chunks(std::vector<std::unique_ptr<char[]>>& chunks, |
| 64 | struct mpool* p, size_t count, size_t size) |
| 65 | { |
| 66 | size_t i; |
| 67 | |
| 68 | for (i = 0; i < count; i++) { |
| 69 | chunks.emplace_back(std::make_unique<char[]>(size)); |
| 70 | mpool_add_chunk(p, chunks.back().get(), size); |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | /** |
| 75 | * Validates allocations from a memory pool. |
| 76 | */ |
| 77 | TEST(mpool, allocation) |
| 78 | { |
| 79 | struct mpool p; |
| 80 | constexpr size_t entry_size = 16; |
| 81 | constexpr size_t entries_per_chunk = 10; |
| 82 | constexpr size_t chunk_count = 10; |
| 83 | std::vector<std::unique_ptr<char[]>> chunks; |
| 84 | std::vector<uintptr_t> allocs; |
| 85 | void* ret; |
| 86 | |
| 87 | mpool_init(&p, entry_size); |
| 88 | |
| 89 | /* Allocate from an empty pool. */ |
| 90 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 91 | |
| 92 | /* |
| 93 | * Add a chunk that is too small, it should be ignored, and allocation |
| 94 | * should return NULL. |
| 95 | */ |
| 96 | mpool_add_chunk(&p, NULL, entry_size - 1); |
| 97 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 98 | |
| 99 | /* Allocate a number of chunks and add them to the pool. */ |
| 100 | add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size); |
| 101 | |
| 102 | /* Allocate from the pool until we run out of memory. */ |
| 103 | while ((ret = mpool_alloc(&p))) { |
| 104 | allocs.push_back((uintptr_t)ret); |
| 105 | } |
| 106 | |
| 107 | /* Check that returned entries are within chunks that were added. */ |
| 108 | ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size), |
| 109 | true); |
| 110 | } |
| 111 | |
| 112 | /** |
| 113 | * Validates frees into a memory pool. |
| 114 | */ |
| 115 | TEST(mpool, freeing) |
| 116 | { |
| 117 | struct mpool p; |
| 118 | constexpr size_t entry_size = 16; |
Andrew Scull | 63d1f3f | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 119 | constexpr size_t entries_per_chunk = 12; |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 120 | constexpr size_t chunk_count = 10; |
| 121 | std::vector<std::unique_ptr<char[]>> chunks; |
| 122 | std::vector<uintptr_t> allocs; |
| 123 | size_t i; |
| 124 | alignas(entry_size) char entry[entry_size]; |
| 125 | void* ret; |
| 126 | |
| 127 | mpool_init(&p, entry_size); |
| 128 | |
| 129 | /* Allocate from an empty pool. */ |
| 130 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 131 | |
| 132 | /* Free an entry into the pool, then allocate it back. */ |
| 133 | mpool_free(&p, &entry[0]); |
| 134 | EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]); |
| 135 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 136 | |
| 137 | /* Allocate a number of chunks and add them to the pool. */ |
| 138 | add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size); |
| 139 | |
| 140 | /* |
| 141 | * Free again into the pool. Ensure that we get entry back on next |
| 142 | * allocation instead of something from the chunks. |
| 143 | */ |
| 144 | mpool_free(&p, &entry[0]); |
| 145 | EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]); |
| 146 | |
| 147 | /* Allocate from the pool until we run out of memory. */ |
| 148 | while ((ret = mpool_alloc(&p))) { |
| 149 | allocs.push_back((uintptr_t)ret); |
| 150 | } |
| 151 | |
| 152 | /* |
| 153 | * Free again into the pool. Ensure that we get entry back on next |
| 154 | * allocation instead of something from the chunks. |
| 155 | */ |
| 156 | mpool_free(&p, &entry[0]); |
| 157 | EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]); |
| 158 | |
| 159 | /* Add entries back to the pool by freeing them. */ |
| 160 | for (i = 0; i < allocs.size(); i++) { |
| 161 | mpool_free(&p, (void*)allocs[i]); |
| 162 | } |
| 163 | allocs.clear(); |
| 164 | |
| 165 | /* Allocate from the pool until we run out of memory. */ |
| 166 | while ((ret = mpool_alloc(&p))) { |
| 167 | allocs.push_back((uintptr_t)ret); |
| 168 | } |
| 169 | |
| 170 | /* Check that returned entries are within chunks that were added. */ |
| 171 | ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size), |
| 172 | true); |
| 173 | } |
| 174 | |
| 175 | /** |
| 176 | * Initialises a memory pool from an existing one. |
| 177 | */ |
| 178 | TEST(mpool, init_from) |
| 179 | { |
| 180 | struct mpool p, q; |
| 181 | constexpr size_t entry_size = 16; |
| 182 | constexpr size_t entries_per_chunk = 10; |
| 183 | constexpr size_t chunk_count = 10; |
| 184 | std::vector<std::unique_ptr<char[]>> chunks; |
| 185 | std::vector<uintptr_t> allocs; |
| 186 | size_t i; |
| 187 | void* ret; |
| 188 | |
| 189 | mpool_init(&p, entry_size); |
| 190 | |
| 191 | /* Allocate a number of chunks and add them to the pool. */ |
| 192 | add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size); |
| 193 | |
| 194 | /* Allocate half of the elements. */ |
| 195 | for (i = 0; i < entries_per_chunk * chunk_count / 2; i++) { |
| 196 | void* ret = mpool_alloc(&p); |
| 197 | ASSERT_THAT(ret, NotNull()); |
| 198 | allocs.push_back((uintptr_t)ret); |
| 199 | } |
| 200 | |
| 201 | /* Add entries back to the pool by freeing them. */ |
| 202 | for (i = 0; i < allocs.size(); i++) { |
| 203 | mpool_free(&p, (void*)allocs[i]); |
| 204 | } |
| 205 | allocs.clear(); |
| 206 | |
| 207 | /* Initialise q from p. */ |
| 208 | mpool_init_from(&q, &p); |
| 209 | |
| 210 | /* Allocation from p must now fail. */ |
| 211 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 212 | |
| 213 | /* Allocate from q until we run out of memory. */ |
| 214 | while ((ret = mpool_alloc(&q))) { |
| 215 | allocs.push_back((uintptr_t)ret); |
| 216 | } |
| 217 | |
| 218 | /* Check that returned entries are within chunks that were added. */ |
| 219 | ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size), |
| 220 | true); |
| 221 | } |
| 222 | |
| 223 | /** |
| 224 | * Initialises a memory pool from an existing one. |
| 225 | */ |
| 226 | TEST(mpool, alloc_contiguous) |
| 227 | { |
| 228 | struct mpool p; |
| 229 | constexpr size_t entry_size = 16; |
Andrew Scull | f0f6be5 | 2018-12-21 14:44:33 +0000 | [diff] [blame] | 230 | constexpr size_t entries_per_chunk = 12; |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 231 | constexpr size_t chunk_count = 10; |
| 232 | std::vector<std::unique_ptr<char[]>> chunks; |
| 233 | std::vector<uintptr_t> allocs; |
| 234 | size_t i; |
| 235 | void* ret; |
| 236 | uintptr_t next; |
| 237 | |
| 238 | mpool_init(&p, entry_size); |
| 239 | |
| 240 | /* Allocate a number of chunks and add them to the pool. */ |
| 241 | add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size); |
| 242 | |
| 243 | /* |
| 244 | * Allocate entries until the remaining chunk is aligned to 2 entries, |
| 245 | * but not aligned to 4 entries. |
| 246 | */ |
| 247 | do { |
| 248 | ret = mpool_alloc(&p); |
| 249 | ASSERT_THAT(ret, NotNull()); |
| 250 | allocs.push_back((uintptr_t)ret); |
Andrew Scull | 63d1f3f | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 251 | next = ((uintptr_t)ret / entry_size) + 1; |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 252 | } while ((next % 4) != 2); |
| 253 | |
| 254 | /* Allocate 5 entries with an alignment of 4. So two must be skipped. */ |
| 255 | ret = mpool_alloc_contiguous(&p, 5, 4); |
| 256 | ASSERT_THAT(ret, NotNull()); |
| 257 | ASSERT_THAT((uintptr_t)ret, (next + 2) * entry_size); |
| 258 | for (i = 0; i < 5; i++) { |
| 259 | allocs.push_back((uintptr_t)ret + i * entry_size); |
| 260 | } |
| 261 | |
| 262 | /* Allocate a whole chunk. */ |
| 263 | ret = mpool_alloc_contiguous(&p, entries_per_chunk, 1); |
| 264 | ASSERT_THAT(ret, NotNull()); |
| 265 | for (i = 0; i < entries_per_chunk; i++) { |
| 266 | allocs.push_back((uintptr_t)ret + i * entry_size); |
| 267 | } |
| 268 | |
| 269 | /* Allocate 2 entries that are already aligned. */ |
| 270 | ret = mpool_alloc_contiguous(&p, 2, 1); |
| 271 | ASSERT_THAT(ret, NotNull()); |
| 272 | allocs.push_back((uintptr_t)ret); |
| 273 | allocs.push_back((uintptr_t)ret + entry_size); |
| 274 | |
| 275 | /* Allocate from p until we run out of memory. */ |
| 276 | while ((ret = mpool_alloc(&p))) { |
| 277 | allocs.push_back((uintptr_t)ret); |
| 278 | } |
| 279 | |
| 280 | /* Check that returned entries are within chunks that were added. */ |
| 281 | ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size), |
| 282 | true); |
| 283 | } |
| 284 | |
Andrew Scull | 63d1f3f | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 285 | TEST(mpool, allocation_with_fallback) |
| 286 | { |
| 287 | struct mpool fallback; |
| 288 | struct mpool p; |
| 289 | constexpr size_t entry_size = 16; |
| 290 | constexpr size_t entries_per_chunk = 10; |
| 291 | constexpr size_t chunk_count = 10; |
| 292 | std::vector<std::unique_ptr<char[]>> chunks; |
| 293 | std::vector<uintptr_t> allocs; |
| 294 | void* ret; |
| 295 | |
| 296 | mpool_init(&fallback, entry_size); |
| 297 | mpool_init_with_fallback(&p, &fallback); |
| 298 | |
| 299 | /* Allocate from an empty pool. */ |
| 300 | EXPECT_THAT(mpool_alloc(&p), IsNull()); |
| 301 | |
| 302 | /* Allocate a number of chunks and add them to the fallback pool. */ |
| 303 | add_chunks(chunks, &fallback, chunk_count, |
| 304 | entries_per_chunk * entry_size); |
| 305 | |
| 306 | /* Allocate from the pool until we run out of memory. */ |
| 307 | while ((ret = mpool_alloc(&p))) { |
| 308 | allocs.push_back((uintptr_t)ret); |
| 309 | } |
| 310 | |
| 311 | /* Check that returned entries are within chunks that were added. */ |
| 312 | ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size), |
| 313 | true); |
| 314 | } |
| 315 | |
| 316 | TEST(mpool, free_with_fallback) |
| 317 | { |
| 318 | struct mpool fallback; |
| 319 | struct mpool p; |
| 320 | constexpr size_t entry_size = 16; |
| 321 | constexpr size_t entries_per_chunk = 1; |
| 322 | constexpr size_t chunk_count = 1; |
| 323 | std::vector<std::unique_ptr<char[]>> chunks; |
| 324 | std::vector<uintptr_t> allocs; |
| 325 | void* ret; |
| 326 | |
| 327 | mpool_init(&fallback, entry_size); |
| 328 | mpool_init_with_fallback(&p, &fallback); |
| 329 | |
| 330 | /* Allocate a number of chunks and add them to the fallback pool. */ |
| 331 | add_chunks(chunks, &fallback, chunk_count, |
| 332 | entries_per_chunk * entry_size); |
| 333 | |
| 334 | /* Allocate, making use of the fallback and free again. */ |
| 335 | ret = mpool_alloc(&p); |
| 336 | mpool_free(&p, ret); |
| 337 | |
| 338 | /* The entry is not available in the fallback. */ |
| 339 | EXPECT_THAT(mpool_alloc(&fallback), IsNull()); |
| 340 | |
| 341 | /* The entry will be allocated by the local pool. */ |
| 342 | EXPECT_THAT(mpool_alloc(&p), Eq(ret)); |
| 343 | |
| 344 | /* Return the memory to the local pool and then to the fallback. */ |
| 345 | mpool_free(&p, ret); |
| 346 | mpool_fini(&p); |
| 347 | |
| 348 | /* The fallback can now allocate the entry. */ |
| 349 | EXPECT_THAT(mpool_alloc(&fallback), Eq(ret)); |
| 350 | } |
| 351 | |
Wedson Almeida Filho | 11a9b0b | 2018-11-30 18:21:51 +0000 | [diff] [blame] | 352 | } /* namespace */ |