Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 17 | #include <gmock/gmock.h> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 18 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 19 | extern "C" { |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 20 | #include "hf/arch/mm.h" |
| 21 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 22 | #include "hf/mm.h" |
| 23 | #include "hf/mpool.h" |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 24 | } |
| 25 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 26 | #include <limits> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 27 | #include <memory> |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 28 | #include <span> |
| 29 | #include <vector> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 30 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 31 | #include "mm_test.hh" |
| 32 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 33 | namespace |
| 34 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 35 | using namespace ::std::placeholders; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 36 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 37 | using ::testing::AllOf; |
| 38 | using ::testing::Contains; |
| 39 | using ::testing::Each; |
| 40 | using ::testing::Eq; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 41 | using ::testing::Not; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 42 | using ::testing::SizeIs; |
| 43 | using ::testing::Truly; |
| 44 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 45 | using ::mm_test::get_ptable; |
| 46 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 47 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 48 | const int TOP_LEVEL = arch_mm_stage2_max_level(); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 49 | const paddr_t VM_MEM_END = pa_init(0x200'0000'0000); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 50 | |
| 51 | /** |
| 52 | * Calculates the size of the address space represented by a page table entry at |
| 53 | * the given level. |
| 54 | */ |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 55 | size_t mm_entry_size(int level) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 56 | { |
| 57 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
| 58 | } |
| 59 | |
| 60 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 61 | * Checks whether the address is mapped in the address space. |
| 62 | */ |
| 63 | bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa) |
| 64 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 65 | uint32_t mode; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 66 | return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) && |
| 67 | (mode & MM_MODE_INVALID) == 0; |
| 68 | } |
| 69 | |
| 70 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 71 | * Get an STL representation of the page table. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 72 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 73 | std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 74 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 75 | auto table = reinterpret_cast<struct mm_page_table *>( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 76 | ptr_from_va(va_from_pa(pa))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 77 | return std::span<pte_t>(table->entries, std::end(table->entries)); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 78 | } |
| 79 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 80 | class mm : public ::testing::Test |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 81 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 82 | void SetUp() override |
| 83 | { |
| 84 | /* |
| 85 | * TODO: replace with direct use of stdlib allocator so |
| 86 | * sanitizers are more effective. |
| 87 | */ |
| 88 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 89 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 90 | mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 91 | } |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 92 | |
| 93 | std::unique_ptr<uint8_t[]> test_heap; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 94 | |
| 95 | protected: |
| 96 | struct mpool ppool; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 97 | }; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 98 | |
| 99 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 100 | * A new table is initially empty. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 101 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 102 | TEST_F(mm, ptable_init_empty) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 103 | { |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 104 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 105 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 106 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 107 | get_ptable(ptable), |
| 108 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
| 109 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 113 | * Each new concatenated table is initially empty. |
| 114 | */ |
| 115 | TEST_F(mm, ptable_init_concatenated_empty) |
| 116 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 117 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 118 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 119 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 120 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 121 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 122 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | /** |
| 126 | * Only the first page is mapped with all others left absent. |
| 127 | */ |
| 128 | TEST_F(mm, map_first_page) |
| 129 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 130 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 131 | const paddr_t page_begin = pa_init(0); |
| 132 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 133 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 134 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 135 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 136 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 137 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 138 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 139 | EXPECT_THAT(tables, SizeIs(4)); |
| 140 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 141 | |
| 142 | /* Check that the first page is mapped and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 143 | EXPECT_THAT(std::span(tables).last(3), |
| 144 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 145 | |
| 146 | auto table_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 147 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 148 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL)); |
| 149 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 150 | auto table_l1 = |
| 151 | get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL)); |
| 152 | EXPECT_THAT(table_l1.subspan(1), |
| 153 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 154 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1)); |
| 155 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 156 | auto table_l0 = |
| 157 | get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1)); |
| 158 | EXPECT_THAT(table_l0.subspan(1), |
| 159 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 160 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 161 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 162 | Eq(pa_addr(page_begin))); |
| 163 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 164 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | /** |
| 168 | * The start address is rounded down and the end address is rounded up to page |
| 169 | * boundaries. |
| 170 | */ |
| 171 | TEST_F(mm, map_round_to_page) |
| 172 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 173 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 174 | const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23); |
| 175 | const paddr_t map_end = pa_add(map_begin, 268); |
| 176 | ipaddr_t ipa = ipa_init(-1); |
| 177 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 178 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 179 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
| 180 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 181 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin))); |
| 182 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 183 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 184 | EXPECT_THAT(tables, SizeIs(4)); |
| 185 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 186 | |
| 187 | /* Check that the last page is mapped, and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 188 | EXPECT_THAT(std::span(tables).first(3), |
| 189 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 190 | |
| 191 | auto table_l2 = tables.back(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 192 | EXPECT_THAT(table_l2.first(table_l2.size() - 1), |
| 193 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 194 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL)); |
| 195 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 196 | auto table_l1 = get_table( |
| 197 | arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL)); |
| 198 | EXPECT_THAT(table_l1.first(table_l1.size() - 1), |
| 199 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 200 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 201 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 202 | auto table_l0 = get_table( |
| 203 | arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 204 | EXPECT_THAT(table_l0.first(table_l0.size() - 1), |
| 205 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 206 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 207 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0], |
| 208 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 209 | Eq(0x200'0000'0000 - PAGE_SIZE)); |
| 210 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 211 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | /** |
| 215 | * Map a two page range over the boundary of two tables. |
| 216 | */ |
| 217 | TEST_F(mm, map_across_tables) |
| 218 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 219 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 220 | const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE); |
| 221 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 222 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 223 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 224 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 225 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 226 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 227 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 228 | EXPECT_THAT(tables, SizeIs(4)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 229 | EXPECT_THAT(std::span(tables).last(2), |
| 230 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 231 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 232 | |
| 233 | /* Check only the last page of the first table is mapped. */ |
| 234 | auto table0_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 235 | EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), |
| 236 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 237 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL)); |
| 238 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 239 | auto table0_l1 = get_table( |
| 240 | arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL)); |
| 241 | EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), |
| 242 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 243 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 244 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 245 | auto table0_l0 = get_table( |
| 246 | arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 247 | EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), |
| 248 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 249 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 250 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0], |
| 251 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 252 | Eq(pa_addr(map_begin))); |
| 253 | |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 254 | /* Check only the first page of the second table is mapped. */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 255 | auto table1_l2 = tables[1]; |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 256 | EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 257 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL)); |
| 258 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 259 | auto table1_l1 = |
| 260 | get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL)); |
| 261 | EXPECT_THAT(table1_l1.subspan(1), |
| 262 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 263 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1)); |
| 264 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 265 | auto table1_l0 = |
| 266 | get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1)); |
| 267 | EXPECT_THAT(table1_l0.subspan(1), |
| 268 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 269 | ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 270 | EXPECT_THAT( |
| 271 | pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)), |
| 272 | Eq(pa_addr(pa_add(map_begin, PAGE_SIZE)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 273 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 274 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | /** |
| 278 | * Mapping all of memory creates blocks at the highest level. |
| 279 | */ |
| 280 | TEST_F(mm, map_all_at_top_level) |
| 281 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 282 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 283 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 284 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 285 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 286 | &ppool, nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 287 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 288 | EXPECT_THAT( |
| 289 | tables, |
| 290 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 291 | _1, TOP_LEVEL)))))); |
| 292 | for (uint64_t i = 0; i < tables.size(); ++i) { |
| 293 | for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 294 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j], |
| 295 | TOP_LEVEL)), |
| 296 | Eq((i * mm_entry_size(TOP_LEVEL + 1)) + |
| 297 | (j * mm_entry_size(TOP_LEVEL)))) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 298 | << "i=" << i << " j=" << j; |
| 299 | } |
| 300 | } |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 301 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | /** |
| 305 | * Map all memory then trying to map a page again doesn't introduce a special |
| 306 | * mapping for that particular page. |
| 307 | */ |
| 308 | TEST_F(mm, map_already_mapped) |
| 309 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 310 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 311 | ipaddr_t ipa = ipa_init(-1); |
| 312 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 313 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 314 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 315 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 316 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 317 | mode, &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 318 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
| 319 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 320 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 321 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 322 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 323 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /** |
| 327 | * Mapping a reverse range, i.e. the end comes before the start, is treated as |
| 328 | * an empty range so no mappings are made. |
| 329 | */ |
| 330 | TEST_F(mm, map_reverse_range) |
| 331 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 332 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 333 | ipaddr_t ipa = ipa_init(-1); |
| 334 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 335 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 336 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 337 | pa_init(0x5000), mode, &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 338 | EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 339 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 340 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 341 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 342 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | /** |
| 346 | * Mapping a reverse range in the same page will map the page because the start |
| 347 | * of the range is rounded down and the end is rounded up. |
| 348 | * |
| 349 | * This serves as a form of documentation of behaviour rather than a |
| 350 | * requirement. Check whether any code relies on this before changing it. |
| 351 | */ |
| 352 | TEST_F(mm, map_reverse_range_quirk) |
| 353 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 354 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 355 | ipaddr_t ipa = ipa_init(-1); |
| 356 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 357 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 358 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 359 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 360 | EXPECT_THAT(ipa_addr(ipa), Eq(20)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 361 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 362 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | /** |
| 366 | * Mapping a range up to the maximum address causes the range end to wrap to |
| 367 | * zero as it is rounded up to a page boundary meaning no memory is mapped. |
| 368 | * |
| 369 | * This serves as a form of documentation of behaviour rather than a |
| 370 | * requirement. Check whether any code relies on this before changing it. |
| 371 | */ |
| 372 | TEST_F(mm, map_last_address_quirk) |
| 373 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 374 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 375 | ipaddr_t ipa = ipa_init(-1); |
| 376 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 377 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 378 | ASSERT_TRUE(mm_vm_identity_map( |
| 379 | &ptable, pa_init(0), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 380 | pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool, |
| 381 | &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 382 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 383 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 384 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 385 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 386 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 387 | } |
| 388 | |
| 389 | /** |
| 390 | * Mapping a range that goes beyond the available memory clamps to the available |
| 391 | * range. |
| 392 | */ |
| 393 | TEST_F(mm, map_clamp_to_range) |
| 394 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 395 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 396 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 397 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 398 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), |
| 399 | pa_init(0xf32'0000'0000'0000), mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 400 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 401 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 402 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 403 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 404 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 405 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | /** |
| 409 | * Mapping a range outside of the available memory is ignored and doesn't alter |
| 410 | * the page tables. |
| 411 | */ |
| 412 | TEST_F(mm, map_ignore_out_of_range) |
| 413 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 414 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 415 | ipaddr_t ipa = ipa_init(-1); |
| 416 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 417 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 418 | ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 419 | pa_init(0xf0'0000'0000'0000), mode, |
| 420 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 421 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END))); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 422 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 423 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 424 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 425 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 426 | } |
| 427 | |
| 428 | /** |
| 429 | * Map a single page and then map all of memory which replaces the single page |
| 430 | * mapping with a higher level block mapping. |
| 431 | */ |
| 432 | TEST_F(mm, map_block_replaces_table) |
| 433 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 434 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 435 | const paddr_t page_begin = pa_init(34567 * PAGE_SIZE); |
| 436 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 437 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 438 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 439 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 440 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 441 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 442 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 443 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 444 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 445 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 446 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 447 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /** |
| 451 | * Map all memory at the top level, unmapping a page and remapping at a lower |
| 452 | * level does not result in all memory being mapped at the top level again. |
| 453 | */ |
| 454 | TEST_F(mm, map_does_not_defrag) |
| 455 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 456 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 457 | const paddr_t page_begin = pa_init(12000 * PAGE_SIZE); |
| 458 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 459 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 460 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 461 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 462 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 463 | ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 464 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 465 | &ppool, nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 466 | EXPECT_THAT(get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 467 | AllOf(SizeIs(4), |
| 468 | Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1, |
| 469 | TOP_LEVEL)))), |
| 470 | Contains(Contains(Truly(std::bind( |
| 471 | arch_mm_pte_is_block, _1, TOP_LEVEL)))), |
| 472 | Contains(Contains(Truly(std::bind( |
| 473 | arch_mm_pte_is_table, _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 474 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 475 | } |
| 476 | |
| 477 | /** |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 478 | * Mapping with a mode that indicates unmapping results in the addresses being |
| 479 | * unmapped with absent entries. |
| 480 | */ |
| 481 | TEST_F(mm, map_to_unmap) |
| 482 | { |
| 483 | constexpr uint32_t mode = 0; |
| 484 | const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE); |
| 485 | const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE); |
| 486 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 487 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 488 | struct mm_ptable ptable; |
| 489 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 490 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 491 | nullptr)); |
| 492 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 493 | nullptr)); |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 494 | EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 495 | MM_MODE_UNMAPPED_MASK, &ppool, nullptr)); |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 496 | EXPECT_THAT( |
| 497 | get_ptable(ptable), |
| 498 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
| 499 | mm_vm_fini(&ptable, &ppool); |
| 500 | } |
| 501 | |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 502 | /* |
| 503 | * Preparing and committing an address range works the same as mapping it. |
| 504 | */ |
| 505 | TEST_F(mm, prepare_and_commit_first_page) |
| 506 | { |
| 507 | constexpr uint32_t mode = 0; |
| 508 | const paddr_t page_begin = pa_init(0); |
| 509 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 510 | struct mm_ptable ptable; |
| 511 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
| 512 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode, |
| 513 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 514 | mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool, |
| 515 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 516 | |
| 517 | auto tables = get_ptable(ptable); |
| 518 | EXPECT_THAT(tables, SizeIs(4)); |
| 519 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 520 | |
| 521 | /* Check that the first page is mapped and nothing else. */ |
| 522 | EXPECT_THAT(std::span(tables).last(3), |
| 523 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 524 | |
| 525 | auto table_l2 = tables.front(); |
| 526 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 527 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL)); |
| 528 | |
| 529 | auto table_l1 = |
| 530 | get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL)); |
| 531 | EXPECT_THAT(table_l1.subspan(1), |
| 532 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 533 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1)); |
| 534 | |
| 535 | auto table_l0 = |
| 536 | get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1)); |
| 537 | EXPECT_THAT(table_l0.subspan(1), |
| 538 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 539 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2)); |
| 540 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)), |
| 541 | Eq(pa_addr(page_begin))); |
| 542 | |
| 543 | mm_vm_fini(&ptable, &ppool); |
| 544 | } |
| 545 | |
| 546 | /** |
| 547 | * Disjoint address ranges can be prepared and committed together. |
| 548 | */ |
| 549 | TEST_F(mm, prepare_and_commit_disjoint_regions) |
| 550 | { |
| 551 | constexpr uint32_t mode = 0; |
| 552 | const paddr_t first_begin = pa_init(0); |
| 553 | const paddr_t first_end = pa_add(first_begin, PAGE_SIZE); |
| 554 | const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE); |
| 555 | const paddr_t last_end = VM_MEM_END; |
| 556 | struct mm_ptable ptable; |
| 557 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
| 558 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end, |
| 559 | mode, &ppool)); |
| 560 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode, |
| 561 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 562 | mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool, |
| 563 | nullptr); |
| 564 | mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool, |
| 565 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 566 | |
| 567 | auto tables = get_ptable(ptable); |
| 568 | EXPECT_THAT(tables, SizeIs(4)); |
| 569 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 570 | |
| 571 | /* Check that the first and last pages are mapped and nothing else. */ |
| 572 | EXPECT_THAT(std::span(tables).subspan(1, 2), |
| 573 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 574 | |
| 575 | /* Check the first page. */ |
| 576 | auto table0_l2 = tables.front(); |
| 577 | EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 578 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL)); |
| 579 | |
| 580 | auto table0_l1 = |
| 581 | get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL)); |
| 582 | EXPECT_THAT(table0_l1.subspan(1), |
| 583 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 584 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1)); |
| 585 | |
| 586 | auto table0_l0 = |
| 587 | get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1)); |
| 588 | EXPECT_THAT(table0_l0.subspan(1), |
| 589 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 590 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2)); |
| 591 | EXPECT_THAT( |
| 592 | pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)), |
| 593 | Eq(pa_addr(first_begin))); |
| 594 | |
| 595 | /* Check the last page. */ |
| 596 | auto table3_l2 = tables.back(); |
| 597 | EXPECT_THAT(table3_l2.first(table3_l2.size() - 1), |
| 598 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 599 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL)); |
| 600 | |
| 601 | auto table3_l1 = get_table( |
| 602 | arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL)); |
| 603 | EXPECT_THAT(table3_l1.first(table3_l1.size() - 1), |
| 604 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 605 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1)); |
| 606 | |
| 607 | auto table3_l0 = get_table( |
| 608 | arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1)); |
| 609 | EXPECT_THAT(table3_l0.first(table3_l0.size() - 1), |
| 610 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 611 | ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2)); |
| 612 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0], |
| 613 | TOP_LEVEL - 2)), |
| 614 | Eq(pa_addr(last_begin))); |
| 615 | |
| 616 | mm_vm_fini(&ptable, &ppool); |
| 617 | } |
| 618 | |
| 619 | /** |
| 620 | * Overlapping address ranges can be prepared and committed together. |
| 621 | */ |
| 622 | TEST_F(mm, prepare_and_commit_overlapping_regions) |
| 623 | { |
| 624 | constexpr uint32_t mode = 0; |
| 625 | const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE); |
| 626 | const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE); |
| 627 | const paddr_t map_end = pa_add(high_begin, PAGE_SIZE); |
| 628 | struct mm_ptable ptable; |
| 629 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
| 630 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode, |
| 631 | &ppool)); |
| 632 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode, |
| 633 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 634 | mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool, |
| 635 | nullptr); |
| 636 | mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool, |
| 637 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 638 | |
| 639 | auto tables = get_ptable(ptable); |
| 640 | EXPECT_THAT(tables, SizeIs(4)); |
| 641 | EXPECT_THAT(std::span(tables).last(2), |
| 642 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 643 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 644 | |
| 645 | /* Check only the last page of the first table is mapped. */ |
| 646 | auto table0_l2 = tables.front(); |
| 647 | EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), |
| 648 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 649 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL)); |
| 650 | |
| 651 | auto table0_l1 = get_table( |
| 652 | arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL)); |
| 653 | EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), |
| 654 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 655 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 656 | |
| 657 | auto table0_l0 = get_table( |
| 658 | arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 659 | EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), |
| 660 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 661 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2)); |
| 662 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0], |
| 663 | TOP_LEVEL - 2)), |
| 664 | Eq(pa_addr(low_begin))); |
| 665 | |
| 666 | /* Check only the first page of the second table is mapped. */ |
| 667 | auto table1_l2 = tables[1]; |
| 668 | EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 669 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL)); |
| 670 | |
| 671 | auto table1_l1 = |
| 672 | get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL)); |
| 673 | EXPECT_THAT(table1_l1.subspan(1), |
| 674 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 675 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1)); |
| 676 | |
| 677 | auto table1_l0 = |
| 678 | get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1)); |
| 679 | EXPECT_THAT(table1_l0.subspan(1), |
| 680 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 681 | ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2)); |
| 682 | EXPECT_THAT( |
| 683 | pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)), |
| 684 | Eq(pa_addr(high_begin))); |
| 685 | |
| 686 | mm_vm_fini(&ptable, &ppool); |
| 687 | } |
| 688 | |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 689 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 690 | * If range is not mapped, unmapping has no effect. |
| 691 | */ |
| 692 | TEST_F(mm, unmap_not_mapped) |
| 693 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 694 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 695 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 696 | EXPECT_TRUE( |
| 697 | mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 698 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 699 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 700 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 701 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 702 | } |
| 703 | |
| 704 | /** |
| 705 | * Unmapping everything should result in an empty page table with no subtables. |
| 706 | */ |
| 707 | TEST_F(mm, unmap_all) |
| 708 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 709 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 710 | const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE); |
| 711 | const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE); |
| 712 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 713 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 714 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 715 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 716 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 717 | nullptr)); |
| 718 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 719 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 720 | EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 721 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 722 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 723 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 724 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /** |
| 728 | * Unmap range is rounded to the containing pages. |
| 729 | */ |
| 730 | TEST_F(mm, unmap_round_to_page) |
| 731 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 732 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 733 | const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE); |
| 734 | const paddr_t map_end = pa_add(map_begin, PAGE_SIZE); |
| 735 | struct mm_ptable ptable; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 736 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 737 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 738 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 739 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 740 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 741 | pa_add(map_begin, 99), &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 742 | |
| 743 | auto tables = get_ptable(ptable); |
| 744 | constexpr auto l3_index = 2; |
| 745 | |
| 746 | /* Check all other top level entries are empty... */ |
| 747 | EXPECT_THAT(std::span(tables).first(l3_index), |
| 748 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 749 | EXPECT_THAT(std::span(tables).subspan(l3_index + 1), |
| 750 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 751 | |
| 752 | /* Except the mapped page which is absent. */ |
| 753 | auto table_l2 = tables[l3_index]; |
| 754 | constexpr auto l2_index = 384; |
| 755 | EXPECT_THAT(table_l2.first(l2_index), |
| 756 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 757 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL)); |
| 758 | EXPECT_THAT(table_l2.subspan(l2_index + 1), |
| 759 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 760 | |
| 761 | auto table_l1 = get_table( |
| 762 | arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL)); |
| 763 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 764 | EXPECT_THAT(table_l1.subspan(1), |
| 765 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 766 | |
| 767 | auto table_l0 = get_table( |
| 768 | arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 769 | EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 770 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 771 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 772 | } |
| 773 | |
| 774 | /** |
| 775 | * Unmap a range that of page mappings that spans multiple concatenated tables. |
| 776 | */ |
| 777 | TEST_F(mm, unmap_across_tables) |
| 778 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 779 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 780 | const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE); |
| 781 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 782 | struct mm_ptable ptable; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 783 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 784 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 785 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 786 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 787 | ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 788 | |
| 789 | auto tables = get_ptable(ptable); |
| 790 | |
| 791 | /* Check the untouched tables are empty. */ |
| 792 | EXPECT_THAT(std::span(tables).first(2), |
| 793 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 794 | |
| 795 | /* Check the last page is explicity marked as absent. */ |
| 796 | auto table2_l2 = tables[2]; |
| 797 | EXPECT_THAT(table2_l2.first(table2_l2.size() - 1), |
| 798 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 799 | ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL)); |
| 800 | |
| 801 | auto table2_l1 = get_table( |
| 802 | arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL)); |
| 803 | EXPECT_THAT(table2_l1.first(table2_l1.size() - 1), |
| 804 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 805 | ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1)); |
| 806 | |
| 807 | auto table2_l0 = get_table( |
| 808 | arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1)); |
| 809 | EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 810 | |
| 811 | /* Check the first page is explicitly marked as absent. */ |
| 812 | auto table3_l2 = tables[3]; |
| 813 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL)); |
| 814 | EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 815 | |
| 816 | auto table3_l1 = get_table( |
| 817 | arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL)); |
| 818 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1)); |
| 819 | EXPECT_THAT(table3_l1.subspan(1), |
| 820 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 821 | |
| 822 | auto table3_l0 = get_table( |
| 823 | arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1)); |
| 824 | EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 825 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 826 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 827 | } |
| 828 | |
| 829 | /** |
| 830 | * Unmapping outside the range of memory had no effect. |
| 831 | */ |
| 832 | TEST_F(mm, unmap_out_of_range) |
| 833 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 834 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 835 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 836 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 837 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 838 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 839 | ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 840 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 841 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 842 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 843 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 844 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 845 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 846 | } |
| 847 | |
| 848 | /** |
| 849 | * Unmapping a reverse range, i.e. the end comes before the start, is treated as |
| 850 | * an empty range so no change is made. |
| 851 | */ |
| 852 | TEST_F(mm, unmap_reverse_range) |
| 853 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 854 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 855 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 856 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 857 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 858 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 859 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 860 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 861 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 862 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 863 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 864 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 865 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 866 | } |
| 867 | |
| 868 | /** |
| 869 | * Unmapping a reverse range in the same page will unmap the page because the |
| 870 | * start of the range is rounded down and the end is rounded up. |
| 871 | * |
| 872 | * This serves as a form of documentation of behaviour rather than a |
| 873 | * requirement. Check whether any code relies on this before changing it. |
| 874 | */ |
| 875 | TEST_F(mm, unmap_reverse_range_quirk) |
| 876 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 877 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 878 | const paddr_t page_begin = pa_init(0x180'0000'0000); |
| 879 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 880 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 881 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 882 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 883 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 884 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 885 | pa_add(page_begin, 50), &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 886 | |
| 887 | auto tables = get_ptable(ptable); |
| 888 | constexpr auto l3_index = 3; |
| 889 | |
| 890 | /* Check all other top level entries are empty... */ |
| 891 | EXPECT_THAT(std::span(tables).first(l3_index), |
| 892 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 893 | |
| 894 | /* Except the mapped page which is absent. */ |
| 895 | auto table_l2 = tables[l3_index]; |
| 896 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL)); |
| 897 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 898 | |
| 899 | auto table_l1 = get_table( |
| 900 | arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL)); |
| 901 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 902 | EXPECT_THAT(table_l1.subspan(1), |
| 903 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 904 | |
| 905 | auto table_l0 = get_table( |
| 906 | arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 907 | EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 908 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 909 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 910 | } |
| 911 | |
| 912 | /** |
| 913 | * Unmapping a range up to the maximum address causes the range end to wrap to |
| 914 | * zero as it is rounded up to a page boundary meaning no change is made. |
| 915 | * |
| 916 | * This serves as a form of documentation of behaviour rather than a |
| 917 | * requirement. Check whether any code relies on this before changing it. |
| 918 | */ |
| 919 | TEST_F(mm, unmap_last_address_quirk) |
| 920 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 921 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 922 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 923 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 924 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 925 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 926 | ASSERT_TRUE(mm_vm_unmap( |
| 927 | &ptable, pa_init(0), |
| 928 | pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 929 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 930 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 931 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 932 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 933 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 934 | } |
| 935 | |
| 936 | /** |
| 937 | * Mapping then unmapping a page does not defrag the table. |
| 938 | */ |
| 939 | TEST_F(mm, unmap_does_not_defrag) |
| 940 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 941 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 942 | const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE); |
| 943 | const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE); |
| 944 | const paddr_t l1_begin = pa_init(666 * mm_entry_size(1)); |
| 945 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 946 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 947 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 948 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 949 | nullptr)); |
| 950 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 951 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 952 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool)); |
| 953 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 954 | EXPECT_THAT(get_ptable(ptable), |
| 955 | AllOf(SizeIs(4), |
| 956 | Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 957 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 958 | } |
| 959 | |
| 960 | /** |
| 961 | * Nothing is mapped in an empty table. |
| 962 | */ |
| 963 | TEST_F(mm, is_mapped_empty) |
| 964 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 965 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 966 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 967 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0))); |
| 968 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344))); |
| 969 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 970 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | /** |
| 974 | * Everything is mapped in a full table. |
| 975 | */ |
| 976 | TEST_F(mm, is_mapped_all) |
| 977 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 978 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 979 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 980 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 981 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 982 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 983 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0))); |
| 984 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3))); |
| 985 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 986 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 987 | } |
| 988 | |
| 989 | /** |
| 990 | * A page is mapped for the range [begin, end). |
| 991 | */ |
| 992 | TEST_F(mm, is_mapped_page) |
| 993 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 994 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 995 | const paddr_t page_begin = pa_init(0x100'0000'0000); |
| 996 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 997 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 998 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 999 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1000 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1001 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin))); |
| 1002 | EXPECT_TRUE( |
| 1003 | mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127)))); |
| 1004 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1005 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1006 | } |
| 1007 | |
| 1008 | /** |
| 1009 | * Everything out of range is not mapped. |
| 1010 | */ |
| 1011 | TEST_F(mm, is_mapped_out_of_range) |
| 1012 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1013 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1014 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1015 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1016 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1017 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1018 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END))); |
| 1019 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1020 | EXPECT_FALSE(mm_vm_is_mapped( |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1021 | &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1022 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1023 | } |
| 1024 | |
| 1025 | /** |
| 1026 | * The mode of unmapped addresses can be retrieved and is set to invalid, |
| 1027 | * unowned and shared. |
| 1028 | */ |
| 1029 | TEST_F(mm, get_mode_empty) |
| 1030 | { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1031 | constexpr int default_mode = |
| 1032 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 1033 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1034 | uint32_t read_mode; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1035 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1036 | |
| 1037 | read_mode = 0; |
| 1038 | EXPECT_TRUE( |
| 1039 | mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode)); |
| 1040 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1041 | |
| 1042 | read_mode = 0; |
| 1043 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d), |
| 1044 | ipa_init(0x3c97'e000), &read_mode)); |
| 1045 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1046 | |
| 1047 | read_mode = 0; |
| 1048 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff), |
| 1049 | ipa_init(0x1ff'ffff'ffff), &read_mode)); |
| 1050 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1051 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1052 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1053 | } |
| 1054 | |
| 1055 | /** |
| 1056 | * Get the mode of a range comprised of individual pages which are either side |
| 1057 | * of a root table boundary. |
| 1058 | */ |
| 1059 | TEST_F(mm, get_mode_pages_across_tables) |
| 1060 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1061 | constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1062 | const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE); |
| 1063 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 1064 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1065 | uint32_t read_mode; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1066 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1067 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1068 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1069 | |
| 1070 | read_mode = 0; |
| 1071 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin), |
| 1072 | ipa_from_pa(pa_add(map_begin, PAGE_SIZE)), |
| 1073 | &read_mode)); |
| 1074 | EXPECT_THAT(read_mode, Eq(mode)); |
| 1075 | |
| 1076 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0), |
| 1077 | ipa_from_pa(pa_add(map_begin, PAGE_SIZE)), |
| 1078 | &read_mode)); |
| 1079 | |
| 1080 | read_mode = 0; |
| 1081 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin), |
| 1082 | ipa_from_pa(map_end), &read_mode)); |
| 1083 | EXPECT_THAT(read_mode, Eq(mode)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1084 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1085 | } |
| 1086 | |
| 1087 | /** |
| 1088 | * Anything out of range fail to retrieve the mode. |
| 1089 | */ |
| 1090 | TEST_F(mm, get_mode_out_of_range) |
| 1091 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1092 | constexpr uint32_t mode = MM_MODE_UNOWNED; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1093 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1094 | uint32_t read_mode; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1095 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1096 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1097 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1098 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0), |
| 1099 | ipa_from_pa(pa_add(VM_MEM_END, 1)), |
| 1100 | &read_mode)); |
| 1101 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END), |
| 1102 | ipa_from_pa(pa_add(VM_MEM_END, 1)), |
| 1103 | &read_mode)); |
| 1104 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234), |
| 1105 | ipa_init(2'0000'0000'0000), &read_mode)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1106 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1107 | } |
| 1108 | |
| 1109 | /** |
| 1110 | * Defragging an entirely empty table has no effect. |
| 1111 | */ |
| 1112 | TEST_F(mm, defrag_empty) |
| 1113 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1114 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1115 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
| 1116 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1117 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1118 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1119 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1120 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1121 | } |
| 1122 | |
| 1123 | /** |
| 1124 | * Defragging a table with some empty subtables (even nested) results in |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1125 | * an empty table. |
| 1126 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1127 | TEST_F(mm, defrag_empty_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1128 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1129 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1130 | const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE); |
| 1131 | const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE); |
| 1132 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 1133 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1134 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1135 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1136 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 1137 | nullptr)); |
| 1138 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 1139 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 1140 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool)); |
| 1141 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1142 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1143 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1144 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1145 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1146 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1147 | } |
| 1148 | |
| 1149 | /** |
| 1150 | * Any subtable with all blocks with the same attributes should be replaced |
| 1151 | * with a single block. |
| 1152 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1153 | TEST_F(mm, defrag_block_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1154 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1155 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1156 | const paddr_t begin = pa_init(39456 * mm_entry_size(1)); |
| 1157 | const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE); |
| 1158 | const paddr_t end = pa_add(begin, 4 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1159 | struct mm_ptable ptable; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1160 | ASSERT_TRUE(mm_vm_init(&ptable, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1161 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1162 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 1163 | ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1164 | ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool, |
| 1165 | nullptr)); |
| 1166 | ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool, |
| 1167 | nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1168 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1169 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1170 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1171 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 1172 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1173 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 1174 | } |
| 1175 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 1176 | } /* namespace */ |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1177 | |
| 1178 | namespace mm_test |
| 1179 | { |
| 1180 | /** |
| 1181 | * Get an STL representation of the ptable. |
| 1182 | */ |
| 1183 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable( |
| 1184 | const struct mm_ptable &ptable) |
| 1185 | { |
| 1186 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all; |
| 1187 | const uint8_t root_table_count = arch_mm_stage2_root_table_count(); |
| 1188 | for (uint8_t i = 0; i < root_table_count; ++i) { |
| 1189 | all.push_back(get_table( |
| 1190 | pa_add(ptable.root, i * sizeof(struct mm_page_table)))); |
| 1191 | } |
| 1192 | return all; |
| 1193 | } |
| 1194 | |
| 1195 | } /* namespace mm_test */ |