Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 17 | #include <gmock/gmock.h> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 18 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 19 | extern "C" { |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 20 | #include "hf/arch/mm.h" |
| 21 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 22 | #include "hf/mm.h" |
| 23 | #include "hf/mpool.h" |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 24 | } |
| 25 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 26 | #include <limits> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 27 | #include <memory> |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 28 | #include <span> |
| 29 | #include <vector> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 30 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 31 | namespace |
| 32 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 33 | using namespace ::std::placeholders; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 34 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 35 | using ::testing::AllOf; |
| 36 | using ::testing::Contains; |
| 37 | using ::testing::Each; |
| 38 | using ::testing::Eq; |
| 39 | using ::testing::SizeIs; |
| 40 | using ::testing::Truly; |
| 41 | |
| 42 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16; |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 43 | const int TOP_LEVEL = arch_mm_max_level(0); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 44 | const paddr_t VM_MEM_END = pa_init(0x200'0000'0000); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 45 | |
| 46 | /** |
| 47 | * Calculates the size of the address space represented by a page table entry at |
| 48 | * the given level. |
| 49 | */ |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 50 | size_t mm_entry_size(int level) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 51 | { |
| 52 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
| 53 | } |
| 54 | |
| 55 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 56 | * Get an STL representation of the page table. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 57 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 58 | std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 59 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 60 | auto table = reinterpret_cast<struct mm_page_table *>( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 61 | ptr_from_va(va_from_pa(pa))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 62 | return std::span<pte_t>(table->entries, std::end(table->entries)); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 66 | * Get an STL representation of the ptable. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 67 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 68 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable( |
| 69 | const struct mm_ptable &ptable, int mode) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 70 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 71 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all; |
| 72 | const uint8_t root_table_count = arch_mm_root_table_count(mode); |
| 73 | for (uint8_t i = 0; i < root_table_count; ++i) { |
| 74 | all.push_back(get_table( |
| 75 | pa_add(ptable.root, i * sizeof(struct mm_page_table)))); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 76 | } |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 77 | return all; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 78 | } |
| 79 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 80 | class mm : public ::testing::Test |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 81 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 82 | void SetUp() override |
| 83 | { |
| 84 | /* |
| 85 | * TODO: replace with direct use of stdlib allocator so |
| 86 | * sanitizers are more effective. |
| 87 | */ |
| 88 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 89 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 90 | mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 91 | } |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 92 | |
| 93 | std::unique_ptr<uint8_t[]> test_heap; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 94 | |
| 95 | protected: |
| 96 | struct mpool ppool; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 97 | }; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 98 | |
| 99 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 100 | * A new table is initially empty. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 101 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 102 | TEST_F(mm, ptable_init_empty) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 103 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 104 | constexpr int mode = MM_MODE_STAGE1; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 105 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 106 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 107 | EXPECT_THAT( |
| 108 | get_ptable(ptable, mode), |
| 109 | AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 110 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 114 | * Each new concatenated table is initially empty. |
| 115 | */ |
| 116 | TEST_F(mm, ptable_init_concatenated_empty) |
| 117 | { |
| 118 | constexpr int mode = 0; |
| 119 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 120 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 121 | EXPECT_THAT( |
| 122 | get_ptable(ptable, mode), |
| 123 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 124 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /** |
| 128 | * Only the first page is mapped with all others left absent. |
| 129 | */ |
| 130 | TEST_F(mm, map_first_page) |
| 131 | { |
| 132 | constexpr int mode = 0; |
| 133 | const paddr_t page_begin = pa_init(0); |
| 134 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 135 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 136 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 137 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 138 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 139 | |
| 140 | auto tables = get_ptable(ptable, mode); |
| 141 | EXPECT_THAT(tables, SizeIs(4)); |
| 142 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 143 | |
| 144 | /* Check that the first page is mapped and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 145 | EXPECT_THAT(std::span(tables).last(3), |
| 146 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 147 | |
| 148 | auto table_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 149 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 150 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL)); |
| 151 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 152 | auto table_l1 = |
| 153 | get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL)); |
| 154 | EXPECT_THAT(table_l1.subspan(1), |
| 155 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 156 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1)); |
| 157 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 158 | auto table_l0 = |
| 159 | get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1)); |
| 160 | EXPECT_THAT(table_l0.subspan(1), |
| 161 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 162 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 163 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 164 | Eq(pa_addr(page_begin))); |
| 165 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 166 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | /** |
| 170 | * The start address is rounded down and the end address is rounded up to page |
| 171 | * boundaries. |
| 172 | */ |
| 173 | TEST_F(mm, map_round_to_page) |
| 174 | { |
| 175 | constexpr int mode = 0; |
| 176 | const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23); |
| 177 | const paddr_t map_end = pa_add(map_begin, 268); |
| 178 | ipaddr_t ipa = ipa_init(-1); |
| 179 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 180 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 181 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa, |
| 182 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 183 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin))); |
| 184 | |
| 185 | auto tables = get_ptable(ptable, mode); |
| 186 | EXPECT_THAT(tables, SizeIs(4)); |
| 187 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 188 | |
| 189 | /* Check that the last page is mapped, and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 190 | EXPECT_THAT(std::span(tables).first(3), |
| 191 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 192 | |
| 193 | auto table_l2 = tables.back(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 194 | EXPECT_THAT(table_l2.first(table_l2.size() - 1), |
| 195 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 196 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL)); |
| 197 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 198 | auto table_l1 = get_table( |
| 199 | arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL)); |
| 200 | EXPECT_THAT(table_l1.first(table_l1.size() - 1), |
| 201 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 202 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 203 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 204 | auto table_l0 = get_table( |
| 205 | arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 206 | EXPECT_THAT(table_l0.first(table_l0.size() - 1), |
| 207 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 208 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 209 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0], |
| 210 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 211 | Eq(0x200'0000'0000 - PAGE_SIZE)); |
| 212 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 213 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | /** |
| 217 | * Map a two page range over the boundary of two tables. |
| 218 | */ |
| 219 | TEST_F(mm, map_across_tables) |
| 220 | { |
| 221 | constexpr int mode = 0; |
| 222 | const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE); |
| 223 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 224 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 225 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 226 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
| 227 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 228 | |
| 229 | auto tables = get_ptable(ptable, mode); |
| 230 | EXPECT_THAT(tables, SizeIs(4)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 231 | EXPECT_THAT(std::span(tables).last(2), |
| 232 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 233 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 234 | |
| 235 | /* Check only the last page of the first table is mapped. */ |
| 236 | auto table0_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 237 | EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), |
| 238 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 239 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL)); |
| 240 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 241 | auto table0_l1 = get_table( |
| 242 | arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL)); |
| 243 | EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), |
| 244 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 245 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 246 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 247 | auto table0_l0 = get_table( |
| 248 | arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 249 | EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), |
| 250 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 251 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 252 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0], |
| 253 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 254 | Eq(pa_addr(map_begin))); |
| 255 | |
| 256 | /* Checl only the first page of the second table is mapped. */ |
| 257 | auto table1_l2 = tables[1]; |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 258 | EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 259 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL)); |
| 260 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 261 | auto table1_l1 = |
| 262 | get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL)); |
| 263 | EXPECT_THAT(table1_l1.subspan(1), |
| 264 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 265 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1)); |
| 266 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 267 | auto table1_l0 = |
| 268 | get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1)); |
| 269 | EXPECT_THAT(table1_l0.subspan(1), |
| 270 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 271 | ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 272 | EXPECT_THAT( |
| 273 | pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)), |
| 274 | Eq(pa_addr(pa_add(map_begin, PAGE_SIZE)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 275 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 276 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | /** |
| 280 | * Mapping all of memory creates blocks at the highest level. |
| 281 | */ |
| 282 | TEST_F(mm, map_all_at_top_level) |
| 283 | { |
| 284 | constexpr int mode = 0; |
| 285 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 286 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 287 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 288 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 289 | auto tables = get_ptable(ptable, mode); |
| 290 | EXPECT_THAT( |
| 291 | tables, |
| 292 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 293 | _1, TOP_LEVEL)))))); |
| 294 | for (uint64_t i = 0; i < tables.size(); ++i) { |
| 295 | for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 296 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j], |
| 297 | TOP_LEVEL)), |
| 298 | Eq((i * mm_entry_size(TOP_LEVEL + 1)) + |
| 299 | (j * mm_entry_size(TOP_LEVEL)))) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 300 | << "i=" << i << " j=" << j; |
| 301 | } |
| 302 | } |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 303 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | /** |
| 307 | * Map all memory then trying to map a page again doesn't introduce a special |
| 308 | * mapping for that particular page. |
| 309 | */ |
| 310 | TEST_F(mm, map_already_mapped) |
| 311 | { |
| 312 | constexpr int mode = 0; |
| 313 | ipaddr_t ipa = ipa_init(-1); |
| 314 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 315 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 316 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 317 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 318 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 319 | mode, &ipa, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 320 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
| 321 | EXPECT_THAT( |
| 322 | get_ptable(ptable, mode), |
| 323 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 324 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 325 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | /** |
| 329 | * Mapping a reverse range, i.e. the end comes before the start, is treated as |
| 330 | * an empty range so no mappings are made. |
| 331 | */ |
| 332 | TEST_F(mm, map_reverse_range) |
| 333 | { |
| 334 | constexpr int mode = 0; |
| 335 | ipaddr_t ipa = ipa_init(-1); |
| 336 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 337 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 338 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 339 | pa_init(0x5000), mode, &ipa, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 340 | EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 341 | EXPECT_THAT( |
| 342 | get_ptable(ptable, mode), |
| 343 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 344 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | /** |
| 348 | * Mapping a reverse range in the same page will map the page because the start |
| 349 | * of the range is rounded down and the end is rounded up. |
| 350 | * |
| 351 | * This serves as a form of documentation of behaviour rather than a |
| 352 | * requirement. Check whether any code relies on this before changing it. |
| 353 | */ |
| 354 | TEST_F(mm, map_reverse_range_quirk) |
| 355 | { |
| 356 | constexpr int mode = 0; |
| 357 | ipaddr_t ipa = ipa_init(-1); |
| 358 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 359 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 360 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 361 | &ipa, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 362 | EXPECT_THAT(ipa_addr(ipa), Eq(20)); |
| 363 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 364 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 365 | } |
| 366 | |
| 367 | /** |
| 368 | * Mapping a range up to the maximum address causes the range end to wrap to |
| 369 | * zero as it is rounded up to a page boundary meaning no memory is mapped. |
| 370 | * |
| 371 | * This serves as a form of documentation of behaviour rather than a |
| 372 | * requirement. Check whether any code relies on this before changing it. |
| 373 | */ |
| 374 | TEST_F(mm, map_last_address_quirk) |
| 375 | { |
| 376 | constexpr int mode = 0; |
| 377 | ipaddr_t ipa = ipa_init(-1); |
| 378 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 379 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 380 | ASSERT_TRUE(mm_vm_identity_map( |
| 381 | &ptable, pa_init(0), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 382 | pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa, |
| 383 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 384 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 385 | EXPECT_THAT( |
| 386 | get_ptable(ptable, mode), |
| 387 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 388 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | /** |
| 392 | * Mapping a range that goes beyond the available memory clamps to the available |
| 393 | * range. |
| 394 | */ |
| 395 | TEST_F(mm, map_clamp_to_range) |
| 396 | { |
| 397 | constexpr int mode = 0; |
| 398 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 399 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 400 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), |
| 401 | pa_init(0xf32'0000'0000'0000), mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 402 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 403 | EXPECT_THAT( |
| 404 | get_ptable(ptable, mode), |
| 405 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 406 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 407 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | /** |
| 411 | * Mapping a range outside of the available memory is ignored and doesn't alter |
| 412 | * the page tables. |
| 413 | */ |
| 414 | TEST_F(mm, map_ignore_out_of_range) |
| 415 | { |
| 416 | constexpr int mode = 0; |
| 417 | ipaddr_t ipa = ipa_init(-1); |
| 418 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 419 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 420 | ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END, |
| 421 | pa_init(0xf0'0000'0000'0000), mode, &ipa, |
| 422 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 423 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END))); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 424 | EXPECT_THAT( |
| 425 | get_ptable(ptable, mode), |
| 426 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 427 | mm_ptable_fini(&ptable, 0, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | /** |
| 431 | * Map a single page and then map all of memory which replaces the single page |
| 432 | * mapping with a higher level block mapping. |
| 433 | */ |
| 434 | TEST_F(mm, map_block_replaces_table) |
| 435 | { |
| 436 | constexpr int mode = 0; |
| 437 | const paddr_t page_begin = pa_init(34567 * PAGE_SIZE); |
| 438 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 439 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 440 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 441 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 442 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 443 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 444 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 445 | EXPECT_THAT( |
| 446 | get_ptable(ptable, mode), |
| 447 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 448 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 449 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | /** |
| 453 | * Map all memory at the top level, unmapping a page and remapping at a lower |
| 454 | * level does not result in all memory being mapped at the top level again. |
| 455 | */ |
| 456 | TEST_F(mm, map_does_not_defrag) |
| 457 | { |
| 458 | constexpr int mode = 0; |
| 459 | const paddr_t page_begin = pa_init(12000 * PAGE_SIZE); |
| 460 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 461 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 462 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 463 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 464 | nullptr, &ppool)); |
| 465 | ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 466 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 467 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 468 | EXPECT_THAT(get_ptable(ptable, mode), |
| 469 | AllOf(SizeIs(4), |
| 470 | Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1, |
| 471 | TOP_LEVEL)))), |
| 472 | Contains(Contains(Truly(std::bind( |
| 473 | arch_mm_pte_is_block, _1, TOP_LEVEL)))), |
| 474 | Contains(Contains(Truly(std::bind( |
| 475 | arch_mm_pte_is_table, _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 476 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 477 | } |
| 478 | |
| 479 | /** |
| 480 | * If nothing is mapped, unmapping the hypervisor has no effect. |
| 481 | */ |
| 482 | TEST_F(mm, vm_unmap_hypervisor_not_mapped) |
| 483 | { |
| 484 | constexpr int mode = 0; |
| 485 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 486 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 487 | EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 488 | EXPECT_THAT( |
| 489 | get_ptable(ptable, mode), |
| 490 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 491 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | /** |
| 495 | * If range is not mapped, unmapping has no effect. |
| 496 | */ |
| 497 | TEST_F(mm, unmap_not_mapped) |
| 498 | { |
| 499 | constexpr int mode = 0; |
| 500 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 501 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 502 | EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode, |
| 503 | &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 504 | EXPECT_THAT( |
| 505 | get_ptable(ptable, mode), |
| 506 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 507 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | /** |
| 511 | * Unmapping everything should result in an empty page table with no subtables. |
| 512 | */ |
| 513 | TEST_F(mm, unmap_all) |
| 514 | { |
| 515 | constexpr int mode = 0; |
| 516 | const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE); |
| 517 | const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE); |
| 518 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 519 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 520 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 521 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 522 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr, |
| 523 | &ppool)); |
| 524 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr, |
| 525 | &ppool)); |
| 526 | EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 527 | EXPECT_THAT( |
| 528 | get_ptable(ptable, mode), |
| 529 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 530 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | /** |
| 534 | * Unmap range is rounded to the containing pages. |
| 535 | */ |
| 536 | TEST_F(mm, unmap_round_to_page) |
| 537 | { |
| 538 | constexpr int mode = 0; |
| 539 | const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE); |
| 540 | const paddr_t map_end = pa_add(map_begin, PAGE_SIZE); |
| 541 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 542 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 543 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
| 544 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 545 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 546 | pa_add(map_begin, 99), mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 547 | EXPECT_THAT( |
| 548 | get_ptable(ptable, mode), |
| 549 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 550 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | /** |
| 554 | * Unmap a range that of page mappings that spans multiple concatenated tables. |
| 555 | */ |
| 556 | TEST_F(mm, unmap_across_tables) |
| 557 | { |
| 558 | constexpr int mode = 0; |
| 559 | const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE); |
| 560 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 561 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 562 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 563 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
| 564 | nullptr, &ppool)); |
| 565 | ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 566 | EXPECT_THAT( |
| 567 | get_ptable(ptable, mode), |
| 568 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 569 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | /** |
| 573 | * Unmapping outside the range of memory had no effect. |
| 574 | */ |
| 575 | TEST_F(mm, unmap_out_of_range) |
| 576 | { |
| 577 | constexpr int mode = 0; |
| 578 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 579 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 580 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 581 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 582 | ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 583 | mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 584 | EXPECT_THAT( |
| 585 | get_ptable(ptable, mode), |
| 586 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 587 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 588 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | /** |
| 592 | * Unmapping a reverse range, i.e. the end comes before the start, is treated as |
| 593 | * an empty range so no change is made. |
| 594 | */ |
| 595 | TEST_F(mm, unmap_reverse_range) |
| 596 | { |
| 597 | constexpr int mode = 0; |
| 598 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 599 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 600 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 601 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 602 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 603 | mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 604 | EXPECT_THAT( |
| 605 | get_ptable(ptable, mode), |
| 606 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 607 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 608 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 609 | } |
| 610 | |
| 611 | /** |
| 612 | * Unmapping a reverse range in the same page will unmap the page because the |
| 613 | * start of the range is rounded down and the end is rounded up. |
| 614 | * |
| 615 | * This serves as a form of documentation of behaviour rather than a |
| 616 | * requirement. Check whether any code relies on this before changing it. |
| 617 | */ |
| 618 | TEST_F(mm, unmap_reverse_range_quirk) |
| 619 | { |
| 620 | constexpr int mode = 0; |
| 621 | const paddr_t page_begin = pa_init(0x180'0000'0000); |
| 622 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 623 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 624 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 625 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 626 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 627 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 628 | pa_add(page_begin, 50), mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 629 | EXPECT_THAT( |
| 630 | get_ptable(ptable, mode), |
| 631 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 632 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | /** |
| 636 | * Unmapping a range up to the maximum address causes the range end to wrap to |
| 637 | * zero as it is rounded up to a page boundary meaning no change is made. |
| 638 | * |
| 639 | * This serves as a form of documentation of behaviour rather than a |
| 640 | * requirement. Check whether any code relies on this before changing it. |
| 641 | */ |
| 642 | TEST_F(mm, unmap_last_address_quirk) |
| 643 | { |
| 644 | constexpr int mode = 0; |
| 645 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 646 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 647 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 648 | nullptr, &ppool)); |
| 649 | ASSERT_TRUE( |
| 650 | mm_vm_unmap(&ptable, pa_init(0), |
| 651 | pa_init(std::numeric_limits<uintpaddr_t>::max()), |
| 652 | mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 653 | EXPECT_THAT( |
| 654 | get_ptable(ptable, mode), |
| 655 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 656 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 657 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 658 | } |
| 659 | |
| 660 | /** |
| 661 | * Mapping then unmapping a page does not defrag the table. |
| 662 | */ |
| 663 | TEST_F(mm, unmap_does_not_defrag) |
| 664 | { |
| 665 | constexpr int mode = 0; |
| 666 | const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE); |
| 667 | const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE); |
| 668 | const paddr_t l1_begin = pa_init(666 * mm_entry_size(1)); |
| 669 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 670 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 671 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 672 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr, |
| 673 | &ppool)); |
| 674 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr, |
| 675 | &ppool)); |
| 676 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool)); |
| 677 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 678 | EXPECT_THAT( |
| 679 | get_ptable(ptable, mode), |
| 680 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 681 | mm_ptable_fini(&ptable, MM_MODE_STAGE1, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 682 | } |
| 683 | |
| 684 | /** |
| 685 | * Nothing is mapped in an empty table. |
| 686 | */ |
| 687 | TEST_F(mm, is_mapped_empty) |
| 688 | { |
| 689 | constexpr int mode = 0; |
| 690 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 691 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 692 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode)); |
| 693 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode)); |
| 694 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 695 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 696 | } |
| 697 | |
| 698 | /** |
| 699 | * Everything is mapped in a full table. |
| 700 | */ |
| 701 | TEST_F(mm, is_mapped_all) |
| 702 | { |
| 703 | constexpr int mode = 0; |
| 704 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 705 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 706 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 707 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 708 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode)); |
| 709 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode)); |
| 710 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 711 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 712 | } |
| 713 | |
| 714 | /** |
| 715 | * A page is mapped for the range [begin, end). |
| 716 | */ |
| 717 | TEST_F(mm, is_mapped_page) |
| 718 | { |
| 719 | constexpr int mode = 0; |
| 720 | const paddr_t page_begin = pa_init(0x100'0000'0000); |
| 721 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 722 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 723 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 724 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 725 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 726 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode)); |
| 727 | EXPECT_TRUE(mm_vm_is_mapped( |
| 728 | &ptable, ipa_from_pa(pa_add(page_begin, 127)), mode)); |
| 729 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 730 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 731 | } |
| 732 | |
| 733 | /** |
| 734 | * Everything out of range is not mapped. |
| 735 | */ |
| 736 | TEST_F(mm, is_mapped_out_of_range) |
| 737 | { |
| 738 | constexpr int mode = 0; |
| 739 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 740 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 741 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 742 | nullptr, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 743 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode)); |
| 744 | EXPECT_FALSE( |
| 745 | mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode)); |
| 746 | EXPECT_FALSE(mm_vm_is_mapped( |
| 747 | &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()), |
| 748 | mode)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 749 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 750 | } |
| 751 | |
| 752 | /** |
| 753 | * Defragging an entirely empty table has no effect. |
| 754 | */ |
| 755 | TEST_F(mm, defrag_empty) |
| 756 | { |
| 757 | constexpr int mode = 0; |
| 758 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 759 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 760 | mm_ptable_defrag(&ptable, mode, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 761 | EXPECT_THAT( |
| 762 | get_ptable(ptable, mode), |
| 763 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 764 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 765 | } |
| 766 | |
| 767 | /** |
| 768 | * Defragging a table with some empty subtables (even nested) results in |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 769 | * an empty table. |
| 770 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 771 | TEST_F(mm, defrag_empty_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 772 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 773 | constexpr int mode = 0; |
| 774 | const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE); |
| 775 | const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE); |
| 776 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 777 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 778 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 779 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
| 780 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr, |
| 781 | &ppool)); |
| 782 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr, |
| 783 | &ppool)); |
| 784 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool)); |
| 785 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool)); |
| 786 | mm_ptable_defrag(&ptable, 0, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 787 | EXPECT_THAT( |
| 788 | get_ptable(ptable, mode), |
| 789 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 790 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 791 | } |
| 792 | |
| 793 | /** |
| 794 | * Any subtable with all blocks with the same attributes should be replaced |
| 795 | * with a single block. |
| 796 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 797 | TEST_F(mm, defrag_block_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 798 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 799 | constexpr int mode = 0; |
| 800 | const paddr_t begin = pa_init(39456 * mm_entry_size(1)); |
| 801 | const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE); |
| 802 | const paddr_t end = pa_add(begin, 4 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 803 | struct mm_ptable ptable; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 804 | ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 805 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 806 | nullptr, &ppool)); |
| 807 | ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool)); |
| 808 | ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr, |
| 809 | &ppool)); |
| 810 | ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr, |
| 811 | &ppool)); |
| 812 | mm_ptable_defrag(&ptable, 0, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 813 | EXPECT_THAT( |
| 814 | get_ptable(ptable, mode), |
| 815 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 816 | _1, TOP_LEVEL)))))); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 817 | mm_ptable_fini(&ptable, mode, &ppool); |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 818 | } |
| 819 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 820 | } /* namespace */ |