Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 9 | #include <gmock/gmock.h> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 10 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 11 | extern "C" { |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 12 | #include "hf/arch/mm.h" |
| 13 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 14 | #include "hf/mm.h" |
| 15 | #include "hf/mpool.h" |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 16 | } |
| 17 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 18 | #include <limits> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 19 | #include <memory> |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 20 | #include <span> |
| 21 | #include <vector> |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 22 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 23 | #include "mm_test.hh" |
| 24 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 25 | namespace |
| 26 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 27 | using namespace ::std::placeholders; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 28 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 29 | using ::testing::AllOf; |
| 30 | using ::testing::Contains; |
| 31 | using ::testing::Each; |
| 32 | using ::testing::Eq; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 33 | using ::testing::Not; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 34 | using ::testing::SizeIs; |
| 35 | using ::testing::Truly; |
| 36 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 37 | using ::mm_test::get_ptable; |
| 38 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 39 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 40 | const int TOP_LEVEL = arch_mm_stage2_max_level(); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 41 | const paddr_t VM_MEM_END = pa_init(0x200'0000'0000); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 42 | |
| 43 | /** |
| 44 | * Calculates the size of the address space represented by a page table entry at |
| 45 | * the given level. |
| 46 | */ |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 47 | size_t mm_entry_size(int level) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 48 | { |
| 49 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
| 50 | } |
| 51 | |
| 52 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 53 | * Checks whether the address is mapped in the address space. |
| 54 | */ |
| 55 | bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa) |
| 56 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 57 | uint32_t mode; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 58 | return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) && |
| 59 | (mode & MM_MODE_INVALID) == 0; |
| 60 | } |
| 61 | |
| 62 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 63 | * Get an STL representation of the page table. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 64 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 65 | std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 66 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 67 | auto table = reinterpret_cast<struct mm_page_table *>( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 68 | ptr_from_va(va_from_pa(pa))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 69 | return std::span<pte_t>(table->entries, std::end(table->entries)); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 72 | class mm : public ::testing::Test |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 73 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 74 | void SetUp() override |
| 75 | { |
| 76 | /* |
| 77 | * TODO: replace with direct use of stdlib allocator so |
| 78 | * sanitizers are more effective. |
| 79 | */ |
| 80 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 81 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 82 | mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 83 | } |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 84 | |
| 85 | std::unique_ptr<uint8_t[]> test_heap; |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 86 | |
| 87 | protected: |
| 88 | struct mpool ppool; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 89 | }; |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 90 | |
| 91 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 92 | * A new table is initially empty. |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 93 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 94 | TEST_F(mm, ptable_init_empty) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 95 | { |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 96 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 97 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 98 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 99 | get_ptable(ptable), |
| 100 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
| 101 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 105 | * Each new concatenated table is initially empty. |
| 106 | */ |
| 107 | TEST_F(mm, ptable_init_concatenated_empty) |
| 108 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 109 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 110 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 111 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 112 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 113 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 114 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | /** |
| 118 | * Only the first page is mapped with all others left absent. |
| 119 | */ |
| 120 | TEST_F(mm, map_first_page) |
| 121 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 122 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 123 | const paddr_t page_begin = pa_init(0); |
| 124 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 125 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 126 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 127 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 128 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 129 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 130 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 131 | EXPECT_THAT(tables, SizeIs(4)); |
| 132 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 133 | |
| 134 | /* Check that the first page is mapped and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 135 | EXPECT_THAT(std::span(tables).last(3), |
| 136 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 137 | |
| 138 | auto table_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 139 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 140 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL)); |
| 141 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 142 | auto table_l1 = |
| 143 | get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL)); |
| 144 | EXPECT_THAT(table_l1.subspan(1), |
| 145 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 146 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1)); |
| 147 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 148 | auto table_l0 = |
| 149 | get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1)); |
| 150 | EXPECT_THAT(table_l0.subspan(1), |
| 151 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 152 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 153 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 154 | Eq(pa_addr(page_begin))); |
| 155 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 156 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | /** |
| 160 | * The start address is rounded down and the end address is rounded up to page |
| 161 | * boundaries. |
| 162 | */ |
| 163 | TEST_F(mm, map_round_to_page) |
| 164 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 165 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 166 | const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23); |
| 167 | const paddr_t map_end = pa_add(map_begin, 268); |
| 168 | ipaddr_t ipa = ipa_init(-1); |
| 169 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 170 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 171 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
| 172 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 173 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin))); |
| 174 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 175 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 176 | EXPECT_THAT(tables, SizeIs(4)); |
| 177 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 178 | |
| 179 | /* Check that the last page is mapped, and nothing else. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 180 | EXPECT_THAT(std::span(tables).first(3), |
| 181 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 182 | |
| 183 | auto table_l2 = tables.back(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 184 | EXPECT_THAT(table_l2.first(table_l2.size() - 1), |
| 185 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 186 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL)); |
| 187 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 188 | auto table_l1 = get_table( |
| 189 | arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL)); |
| 190 | EXPECT_THAT(table_l1.first(table_l1.size() - 1), |
| 191 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 192 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 193 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 194 | auto table_l0 = get_table( |
| 195 | arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1)); |
| 196 | EXPECT_THAT(table_l0.first(table_l0.size() - 1), |
| 197 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 198 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 199 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0], |
| 200 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 201 | Eq(0x200'0000'0000 - PAGE_SIZE)); |
| 202 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 203 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | /** |
| 207 | * Map a two page range over the boundary of two tables. |
| 208 | */ |
| 209 | TEST_F(mm, map_across_tables) |
| 210 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 211 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 212 | const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE); |
| 213 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 214 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 215 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 216 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 217 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 218 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 219 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 220 | EXPECT_THAT(tables, SizeIs(4)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 221 | EXPECT_THAT(std::span(tables).last(2), |
| 222 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 223 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 224 | |
| 225 | /* Check only the last page of the first table is mapped. */ |
| 226 | auto table0_l2 = tables.front(); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 227 | EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), |
| 228 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 229 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL)); |
| 230 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 231 | auto table0_l1 = get_table( |
| 232 | arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL)); |
| 233 | EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), |
| 234 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 235 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 236 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 237 | auto table0_l0 = get_table( |
| 238 | arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 239 | EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), |
| 240 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 241 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 242 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0], |
| 243 | TOP_LEVEL - 2)), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 244 | Eq(pa_addr(map_begin))); |
| 245 | |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 246 | /* Check only the first page of the second table is mapped. */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 247 | auto table1_l2 = tables[1]; |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 248 | EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 249 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL)); |
| 250 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 251 | auto table1_l1 = |
| 252 | get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL)); |
| 253 | EXPECT_THAT(table1_l1.subspan(1), |
| 254 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 255 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1)); |
| 256 | |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 257 | auto table1_l0 = |
| 258 | get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1)); |
| 259 | EXPECT_THAT(table1_l0.subspan(1), |
| 260 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 261 | ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 262 | EXPECT_THAT( |
| 263 | pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)), |
| 264 | Eq(pa_addr(pa_add(map_begin, PAGE_SIZE)))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 265 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 266 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | /** |
| 270 | * Mapping all of memory creates blocks at the highest level. |
| 271 | */ |
| 272 | TEST_F(mm, map_all_at_top_level) |
| 273 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 274 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 275 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 276 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 277 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 278 | &ppool, nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 279 | auto tables = get_ptable(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 280 | EXPECT_THAT( |
| 281 | tables, |
| 282 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 283 | _1, TOP_LEVEL)))))); |
| 284 | for (uint64_t i = 0; i < tables.size(); ++i) { |
| 285 | for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 286 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j], |
| 287 | TOP_LEVEL)), |
| 288 | Eq((i * mm_entry_size(TOP_LEVEL + 1)) + |
| 289 | (j * mm_entry_size(TOP_LEVEL)))) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 290 | << "i=" << i << " j=" << j; |
| 291 | } |
| 292 | } |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 293 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | /** |
| 297 | * Map all memory then trying to map a page again doesn't introduce a special |
| 298 | * mapping for that particular page. |
| 299 | */ |
| 300 | TEST_F(mm, map_already_mapped) |
| 301 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 302 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 303 | ipaddr_t ipa = ipa_init(-1); |
| 304 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 305 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 306 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 307 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 308 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 309 | mode, &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 310 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
| 311 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 312 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 313 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 314 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 315 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /** |
| 319 | * Mapping a reverse range, i.e. the end comes before the start, is treated as |
| 320 | * an empty range so no mappings are made. |
| 321 | */ |
| 322 | TEST_F(mm, map_reverse_range) |
| 323 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 324 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 325 | ipaddr_t ipa = ipa_init(-1); |
| 326 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 327 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 328 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 329 | pa_init(0x5000), mode, &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 330 | EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 331 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 332 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 333 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 334 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | /** |
| 338 | * Mapping a reverse range in the same page will map the page because the start |
| 339 | * of the range is rounded down and the end is rounded up. |
| 340 | * |
| 341 | * This serves as a form of documentation of behaviour rather than a |
| 342 | * requirement. Check whether any code relies on this before changing it. |
| 343 | */ |
| 344 | TEST_F(mm, map_reverse_range_quirk) |
| 345 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 346 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 347 | ipaddr_t ipa = ipa_init(-1); |
| 348 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 349 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 350 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 351 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 352 | EXPECT_THAT(ipa_addr(ipa), Eq(20)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 353 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 354 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 355 | } |
| 356 | |
| 357 | /** |
| 358 | * Mapping a range up to the maximum address causes the range end to wrap to |
| 359 | * zero as it is rounded up to a page boundary meaning no memory is mapped. |
| 360 | * |
| 361 | * This serves as a form of documentation of behaviour rather than a |
| 362 | * requirement. Check whether any code relies on this before changing it. |
| 363 | */ |
| 364 | TEST_F(mm, map_last_address_quirk) |
| 365 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 366 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 367 | ipaddr_t ipa = ipa_init(-1); |
| 368 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 369 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 370 | ASSERT_TRUE(mm_vm_identity_map( |
| 371 | &ptable, pa_init(0), |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 372 | pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool, |
| 373 | &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 374 | EXPECT_THAT(ipa_addr(ipa), Eq(0)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 375 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 376 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 377 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 378 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | /** |
| 382 | * Mapping a range that goes beyond the available memory clamps to the available |
| 383 | * range. |
| 384 | */ |
| 385 | TEST_F(mm, map_clamp_to_range) |
| 386 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 387 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 388 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 389 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 390 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), |
| 391 | pa_init(0xf32'0000'0000'0000), mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 392 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 393 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 394 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 395 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 396 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 397 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | /** |
| 401 | * Mapping a range outside of the available memory is ignored and doesn't alter |
| 402 | * the page tables. |
| 403 | */ |
| 404 | TEST_F(mm, map_ignore_out_of_range) |
| 405 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 406 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 407 | ipaddr_t ipa = ipa_init(-1); |
| 408 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 409 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 410 | ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 411 | pa_init(0xf0'0000'0000'0000), mode, |
| 412 | &ppool, &ipa)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 413 | EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END))); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 414 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 415 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 416 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 417 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | /** |
| 421 | * Map a single page and then map all of memory which replaces the single page |
| 422 | * mapping with a higher level block mapping. |
| 423 | */ |
| 424 | TEST_F(mm, map_block_replaces_table) |
| 425 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 426 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 427 | const paddr_t page_begin = pa_init(34567 * PAGE_SIZE); |
| 428 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 429 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 430 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 431 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 432 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 433 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 434 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 435 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 436 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 437 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 438 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 439 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | /** |
| 443 | * Map all memory at the top level, unmapping a page and remapping at a lower |
| 444 | * level does not result in all memory being mapped at the top level again. |
| 445 | */ |
| 446 | TEST_F(mm, map_does_not_defrag) |
| 447 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 448 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 449 | const paddr_t page_begin = pa_init(12000 * PAGE_SIZE); |
| 450 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 451 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 452 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 453 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 454 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 455 | ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 456 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 457 | &ppool, nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 458 | EXPECT_THAT(get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 459 | AllOf(SizeIs(4), |
| 460 | Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1, |
| 461 | TOP_LEVEL)))), |
| 462 | Contains(Contains(Truly(std::bind( |
| 463 | arch_mm_pte_is_block, _1, TOP_LEVEL)))), |
| 464 | Contains(Contains(Truly(std::bind( |
| 465 | arch_mm_pte_is_table, _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 466 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 467 | } |
| 468 | |
| 469 | /** |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 470 | * Mapping with a mode that indicates unmapping results in the addresses being |
| 471 | * unmapped with absent entries. |
| 472 | */ |
| 473 | TEST_F(mm, map_to_unmap) |
| 474 | { |
| 475 | constexpr uint32_t mode = 0; |
| 476 | const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE); |
| 477 | const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE); |
| 478 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 479 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 480 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 481 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 482 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 483 | nullptr)); |
| 484 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 485 | nullptr)); |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 486 | EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 487 | MM_MODE_UNMAPPED_MASK, &ppool, nullptr)); |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 488 | EXPECT_THAT( |
| 489 | get_ptable(ptable), |
| 490 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
| 491 | mm_vm_fini(&ptable, &ppool); |
| 492 | } |
| 493 | |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 494 | /* |
| 495 | * Preparing and committing an address range works the same as mapping it. |
| 496 | */ |
| 497 | TEST_F(mm, prepare_and_commit_first_page) |
| 498 | { |
| 499 | constexpr uint32_t mode = 0; |
| 500 | const paddr_t page_begin = pa_init(0); |
| 501 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 502 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 503 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 504 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode, |
| 505 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 506 | mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool, |
| 507 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 508 | |
| 509 | auto tables = get_ptable(ptable); |
| 510 | EXPECT_THAT(tables, SizeIs(4)); |
| 511 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 512 | |
| 513 | /* Check that the first page is mapped and nothing else. */ |
| 514 | EXPECT_THAT(std::span(tables).last(3), |
| 515 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 516 | |
| 517 | auto table_l2 = tables.front(); |
| 518 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 519 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL)); |
| 520 | |
| 521 | auto table_l1 = |
| 522 | get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL)); |
| 523 | EXPECT_THAT(table_l1.subspan(1), |
| 524 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 525 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1)); |
| 526 | |
| 527 | auto table_l0 = |
| 528 | get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1)); |
| 529 | EXPECT_THAT(table_l0.subspan(1), |
| 530 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 531 | ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2)); |
| 532 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)), |
| 533 | Eq(pa_addr(page_begin))); |
| 534 | |
| 535 | mm_vm_fini(&ptable, &ppool); |
| 536 | } |
| 537 | |
| 538 | /** |
| 539 | * Disjoint address ranges can be prepared and committed together. |
| 540 | */ |
| 541 | TEST_F(mm, prepare_and_commit_disjoint_regions) |
| 542 | { |
| 543 | constexpr uint32_t mode = 0; |
| 544 | const paddr_t first_begin = pa_init(0); |
| 545 | const paddr_t first_end = pa_add(first_begin, PAGE_SIZE); |
| 546 | const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE); |
| 547 | const paddr_t last_end = VM_MEM_END; |
| 548 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 549 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 550 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end, |
| 551 | mode, &ppool)); |
| 552 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode, |
| 553 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 554 | mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool, |
| 555 | nullptr); |
| 556 | mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool, |
| 557 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 558 | |
| 559 | auto tables = get_ptable(ptable); |
| 560 | EXPECT_THAT(tables, SizeIs(4)); |
| 561 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 562 | |
| 563 | /* Check that the first and last pages are mapped and nothing else. */ |
| 564 | EXPECT_THAT(std::span(tables).subspan(1, 2), |
| 565 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 566 | |
| 567 | /* Check the first page. */ |
| 568 | auto table0_l2 = tables.front(); |
| 569 | EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 570 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL)); |
| 571 | |
| 572 | auto table0_l1 = |
| 573 | get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL)); |
| 574 | EXPECT_THAT(table0_l1.subspan(1), |
| 575 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 576 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1)); |
| 577 | |
| 578 | auto table0_l0 = |
| 579 | get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1)); |
| 580 | EXPECT_THAT(table0_l0.subspan(1), |
| 581 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 582 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2)); |
| 583 | EXPECT_THAT( |
| 584 | pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)), |
| 585 | Eq(pa_addr(first_begin))); |
| 586 | |
| 587 | /* Check the last page. */ |
| 588 | auto table3_l2 = tables.back(); |
| 589 | EXPECT_THAT(table3_l2.first(table3_l2.size() - 1), |
| 590 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 591 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL)); |
| 592 | |
| 593 | auto table3_l1 = get_table( |
| 594 | arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL)); |
| 595 | EXPECT_THAT(table3_l1.first(table3_l1.size() - 1), |
| 596 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 597 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1)); |
| 598 | |
| 599 | auto table3_l0 = get_table( |
| 600 | arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1)); |
| 601 | EXPECT_THAT(table3_l0.first(table3_l0.size() - 1), |
| 602 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 603 | ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2)); |
| 604 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0], |
| 605 | TOP_LEVEL - 2)), |
| 606 | Eq(pa_addr(last_begin))); |
| 607 | |
| 608 | mm_vm_fini(&ptable, &ppool); |
| 609 | } |
| 610 | |
| 611 | /** |
| 612 | * Overlapping address ranges can be prepared and committed together. |
| 613 | */ |
| 614 | TEST_F(mm, prepare_and_commit_overlapping_regions) |
| 615 | { |
| 616 | constexpr uint32_t mode = 0; |
| 617 | const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE); |
| 618 | const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE); |
| 619 | const paddr_t map_end = pa_add(high_begin, PAGE_SIZE); |
| 620 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 621 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 622 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode, |
| 623 | &ppool)); |
| 624 | ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode, |
| 625 | &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 626 | mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool, |
| 627 | nullptr); |
| 628 | mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool, |
| 629 | nullptr); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 630 | |
| 631 | auto tables = get_ptable(ptable); |
| 632 | EXPECT_THAT(tables, SizeIs(4)); |
| 633 | EXPECT_THAT(std::span(tables).last(2), |
| 634 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 635 | ASSERT_THAT(TOP_LEVEL, Eq(2)); |
| 636 | |
| 637 | /* Check only the last page of the first table is mapped. */ |
| 638 | auto table0_l2 = tables.front(); |
| 639 | EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), |
| 640 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 641 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL)); |
| 642 | |
| 643 | auto table0_l1 = get_table( |
| 644 | arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL)); |
| 645 | EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), |
| 646 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 647 | ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 648 | |
| 649 | auto table0_l0 = get_table( |
| 650 | arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1)); |
| 651 | EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), |
| 652 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 653 | ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2)); |
| 654 | EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0], |
| 655 | TOP_LEVEL - 2)), |
| 656 | Eq(pa_addr(low_begin))); |
| 657 | |
| 658 | /* Check only the first page of the second table is mapped. */ |
| 659 | auto table1_l2 = tables[1]; |
| 660 | EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 661 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL)); |
| 662 | |
| 663 | auto table1_l1 = |
| 664 | get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL)); |
| 665 | EXPECT_THAT(table1_l1.subspan(1), |
| 666 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 667 | ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1)); |
| 668 | |
| 669 | auto table1_l0 = |
| 670 | get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1)); |
| 671 | EXPECT_THAT(table1_l0.subspan(1), |
| 672 | Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 673 | ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2)); |
| 674 | EXPECT_THAT( |
| 675 | pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)), |
| 676 | Eq(pa_addr(high_begin))); |
| 677 | |
| 678 | mm_vm_fini(&ptable, &ppool); |
| 679 | } |
| 680 | |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 681 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 682 | * If range is not mapped, unmapping has no effect. |
| 683 | */ |
| 684 | TEST_F(mm, unmap_not_mapped) |
| 685 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 686 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 687 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 688 | EXPECT_TRUE( |
| 689 | mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 690 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 691 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 692 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 693 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 694 | } |
| 695 | |
| 696 | /** |
| 697 | * Unmapping everything should result in an empty page table with no subtables. |
| 698 | */ |
| 699 | TEST_F(mm, unmap_all) |
| 700 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 701 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 702 | const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE); |
| 703 | const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE); |
| 704 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 705 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 706 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 707 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 708 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 709 | nullptr)); |
| 710 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 711 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 712 | EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool)); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 713 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 714 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 715 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 716 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | /** |
| 720 | * Unmap range is rounded to the containing pages. |
| 721 | */ |
| 722 | TEST_F(mm, unmap_round_to_page) |
| 723 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 724 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 725 | const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE); |
| 726 | const paddr_t map_end = pa_add(map_begin, PAGE_SIZE); |
| 727 | struct mm_ptable ptable; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 728 | |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 729 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 730 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 731 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 732 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 733 | pa_add(map_begin, 99), &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 734 | |
| 735 | auto tables = get_ptable(ptable); |
| 736 | constexpr auto l3_index = 2; |
| 737 | |
| 738 | /* Check all other top level entries are empty... */ |
| 739 | EXPECT_THAT(std::span(tables).first(l3_index), |
| 740 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 741 | EXPECT_THAT(std::span(tables).subspan(l3_index + 1), |
| 742 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 743 | |
| 744 | /* Except the mapped page which is absent. */ |
| 745 | auto table_l2 = tables[l3_index]; |
| 746 | constexpr auto l2_index = 384; |
| 747 | EXPECT_THAT(table_l2.first(l2_index), |
| 748 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 749 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL)); |
| 750 | EXPECT_THAT(table_l2.subspan(l2_index + 1), |
| 751 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 752 | |
| 753 | auto table_l1 = get_table( |
| 754 | arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL)); |
| 755 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 756 | EXPECT_THAT(table_l1.subspan(1), |
| 757 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 758 | |
| 759 | auto table_l0 = get_table( |
| 760 | arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 761 | EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 762 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 763 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 764 | } |
| 765 | |
| 766 | /** |
| 767 | * Unmap a range that of page mappings that spans multiple concatenated tables. |
| 768 | */ |
| 769 | TEST_F(mm, unmap_across_tables) |
| 770 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 771 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 772 | const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE); |
| 773 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 774 | struct mm_ptable ptable; |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 775 | |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 776 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 777 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 778 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 779 | ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 780 | |
| 781 | auto tables = get_ptable(ptable); |
| 782 | |
| 783 | /* Check the untouched tables are empty. */ |
| 784 | EXPECT_THAT(std::span(tables).first(2), |
| 785 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 786 | |
| 787 | /* Check the last page is explicity marked as absent. */ |
| 788 | auto table2_l2 = tables[2]; |
| 789 | EXPECT_THAT(table2_l2.first(table2_l2.size() - 1), |
| 790 | Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 791 | ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL)); |
| 792 | |
| 793 | auto table2_l1 = get_table( |
| 794 | arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL)); |
| 795 | EXPECT_THAT(table2_l1.first(table2_l1.size() - 1), |
| 796 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 797 | ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1)); |
| 798 | |
| 799 | auto table2_l0 = get_table( |
| 800 | arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1)); |
| 801 | EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 802 | |
| 803 | /* Check the first page is explicitly marked as absent. */ |
| 804 | auto table3_l2 = tables[3]; |
| 805 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL)); |
| 806 | EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 807 | |
| 808 | auto table3_l1 = get_table( |
| 809 | arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL)); |
| 810 | ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1)); |
| 811 | EXPECT_THAT(table3_l1.subspan(1), |
| 812 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 813 | |
| 814 | auto table3_l0 = get_table( |
| 815 | arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1)); |
| 816 | EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 817 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 818 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 819 | } |
| 820 | |
| 821 | /** |
| 822 | * Unmapping outside the range of memory had no effect. |
| 823 | */ |
| 824 | TEST_F(mm, unmap_out_of_range) |
| 825 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 826 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 827 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 828 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 829 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 830 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 831 | ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 832 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 833 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 834 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 835 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 836 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 837 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 838 | } |
| 839 | |
| 840 | /** |
| 841 | * Unmapping a reverse range, i.e. the end comes before the start, is treated as |
| 842 | * an empty range so no change is made. |
| 843 | */ |
| 844 | TEST_F(mm, unmap_reverse_range) |
| 845 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 846 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 847 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 848 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 849 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 850 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 851 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 852 | &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 853 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 854 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 855 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 856 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 857 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 858 | } |
| 859 | |
| 860 | /** |
| 861 | * Unmapping a reverse range in the same page will unmap the page because the |
| 862 | * start of the range is rounded down and the end is rounded up. |
| 863 | * |
| 864 | * This serves as a form of documentation of behaviour rather than a |
| 865 | * requirement. Check whether any code relies on this before changing it. |
| 866 | */ |
| 867 | TEST_F(mm, unmap_reverse_range_quirk) |
| 868 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 869 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 870 | const paddr_t page_begin = pa_init(0x180'0000'0000); |
| 871 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 872 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 873 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 874 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 875 | &ppool, nullptr)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 876 | ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100), |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 877 | pa_add(page_begin, 50), &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 878 | |
| 879 | auto tables = get_ptable(ptable); |
| 880 | constexpr auto l3_index = 3; |
| 881 | |
| 882 | /* Check all other top level entries are empty... */ |
| 883 | EXPECT_THAT(std::span(tables).first(l3_index), |
| 884 | Each(Each(arch_mm_absent_pte(TOP_LEVEL)))); |
| 885 | |
| 886 | /* Except the mapped page which is absent. */ |
| 887 | auto table_l2 = tables[l3_index]; |
| 888 | ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL)); |
| 889 | EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL))); |
| 890 | |
| 891 | auto table_l1 = get_table( |
| 892 | arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL)); |
| 893 | ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 894 | EXPECT_THAT(table_l1.subspan(1), |
| 895 | Each(arch_mm_absent_pte(TOP_LEVEL - 1))); |
| 896 | |
| 897 | auto table_l0 = get_table( |
| 898 | arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1)); |
| 899 | EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2))); |
| 900 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 901 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 902 | } |
| 903 | |
| 904 | /** |
| 905 | * Unmapping a range up to the maximum address causes the range end to wrap to |
| 906 | * zero as it is rounded up to a page boundary meaning no change is made. |
| 907 | * |
| 908 | * This serves as a form of documentation of behaviour rather than a |
| 909 | * requirement. Check whether any code relies on this before changing it. |
| 910 | */ |
| 911 | TEST_F(mm, unmap_last_address_quirk) |
| 912 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 913 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 914 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 915 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 916 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 917 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 918 | ASSERT_TRUE(mm_vm_unmap( |
| 919 | &ptable, pa_init(0), |
| 920 | pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 921 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 922 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 923 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 924 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 925 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 926 | } |
| 927 | |
| 928 | /** |
| 929 | * Mapping then unmapping a page does not defrag the table. |
| 930 | */ |
| 931 | TEST_F(mm, unmap_does_not_defrag) |
| 932 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 933 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 934 | const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE); |
| 935 | const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE); |
| 936 | const paddr_t l1_begin = pa_init(666 * mm_entry_size(1)); |
| 937 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
| 938 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 939 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 940 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 941 | nullptr)); |
| 942 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 943 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 944 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool)); |
| 945 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool)); |
Andrew Scull | 164f815 | 2019-11-19 14:29:55 +0000 | [diff] [blame] | 946 | EXPECT_THAT(get_ptable(ptable), |
| 947 | AllOf(SizeIs(4), |
| 948 | Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 949 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 950 | } |
| 951 | |
| 952 | /** |
| 953 | * Nothing is mapped in an empty table. |
| 954 | */ |
| 955 | TEST_F(mm, is_mapped_empty) |
| 956 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 957 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 958 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 959 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0))); |
| 960 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344))); |
| 961 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 962 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 963 | } |
| 964 | |
| 965 | /** |
| 966 | * Everything is mapped in a full table. |
| 967 | */ |
| 968 | TEST_F(mm, is_mapped_all) |
| 969 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 970 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 971 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 972 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 973 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 974 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 975 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0))); |
| 976 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3))); |
| 977 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 978 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 979 | } |
| 980 | |
| 981 | /** |
| 982 | * A page is mapped for the range [begin, end). |
| 983 | */ |
| 984 | TEST_F(mm, is_mapped_page) |
| 985 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 986 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 987 | const paddr_t page_begin = pa_init(0x100'0000'0000); |
| 988 | const paddr_t page_end = pa_add(page_begin, PAGE_SIZE); |
| 989 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 990 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 991 | ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 992 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 993 | EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin))); |
| 994 | EXPECT_TRUE( |
| 995 | mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127)))); |
| 996 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 997 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 998 | } |
| 999 | |
| 1000 | /** |
| 1001 | * Everything out of range is not mapped. |
| 1002 | */ |
| 1003 | TEST_F(mm, is_mapped_out_of_range) |
| 1004 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1005 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1006 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1007 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1008 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1009 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1010 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END))); |
| 1011 | EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123))); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1012 | EXPECT_FALSE(mm_vm_is_mapped( |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1013 | &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1014 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1015 | } |
| 1016 | |
| 1017 | /** |
| 1018 | * The mode of unmapped addresses can be retrieved and is set to invalid, |
| 1019 | * unowned and shared. |
| 1020 | */ |
| 1021 | TEST_F(mm, get_mode_empty) |
| 1022 | { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1023 | constexpr int default_mode = |
| 1024 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 1025 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1026 | uint32_t read_mode; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1027 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1028 | |
| 1029 | read_mode = 0; |
| 1030 | EXPECT_TRUE( |
| 1031 | mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode)); |
| 1032 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1033 | |
| 1034 | read_mode = 0; |
| 1035 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d), |
| 1036 | ipa_init(0x3c97'e000), &read_mode)); |
| 1037 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1038 | |
| 1039 | read_mode = 0; |
| 1040 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff), |
| 1041 | ipa_init(0x1ff'ffff'ffff), &read_mode)); |
| 1042 | EXPECT_THAT(read_mode, Eq(default_mode)); |
| 1043 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1044 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1045 | } |
| 1046 | |
| 1047 | /** |
| 1048 | * Get the mode of a range comprised of individual pages which are either side |
| 1049 | * of a root table boundary. |
| 1050 | */ |
| 1051 | TEST_F(mm, get_mode_pages_across_tables) |
| 1052 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1053 | constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1054 | const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE); |
| 1055 | const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE); |
| 1056 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1057 | uint32_t read_mode; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1058 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1059 | ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1060 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1061 | |
| 1062 | read_mode = 0; |
| 1063 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin), |
| 1064 | ipa_from_pa(pa_add(map_begin, PAGE_SIZE)), |
| 1065 | &read_mode)); |
| 1066 | EXPECT_THAT(read_mode, Eq(mode)); |
| 1067 | |
| 1068 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0), |
| 1069 | ipa_from_pa(pa_add(map_begin, PAGE_SIZE)), |
| 1070 | &read_mode)); |
| 1071 | |
| 1072 | read_mode = 0; |
| 1073 | EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin), |
| 1074 | ipa_from_pa(map_end), &read_mode)); |
| 1075 | EXPECT_THAT(read_mode, Eq(mode)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1076 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1077 | } |
| 1078 | |
| 1079 | /** |
| 1080 | * Anything out of range fail to retrieve the mode. |
| 1081 | */ |
| 1082 | TEST_F(mm, get_mode_out_of_range) |
| 1083 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1084 | constexpr uint32_t mode = MM_MODE_UNOWNED; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1085 | struct mm_ptable ptable; |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1086 | uint32_t read_mode; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1087 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1088 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1089 | &ppool, nullptr)); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1090 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0), |
| 1091 | ipa_from_pa(pa_add(VM_MEM_END, 1)), |
| 1092 | &read_mode)); |
| 1093 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END), |
| 1094 | ipa_from_pa(pa_add(VM_MEM_END, 1)), |
| 1095 | &read_mode)); |
| 1096 | EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234), |
| 1097 | ipa_init(2'0000'0000'0000), &read_mode)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1098 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1099 | } |
| 1100 | |
| 1101 | /** |
| 1102 | * Defragging an entirely empty table has no effect. |
| 1103 | */ |
| 1104 | TEST_F(mm, defrag_empty) |
| 1105 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1106 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1107 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1108 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1109 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1110 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1111 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1112 | mm_vm_fini(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1113 | } |
| 1114 | |
| 1115 | /** |
| 1116 | * Defragging a table with some empty subtables (even nested) results in |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1117 | * an empty table. |
| 1118 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1119 | TEST_F(mm, defrag_empty_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1120 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1121 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1122 | const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE); |
| 1123 | const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE); |
| 1124 | const paddr_t l1_begin = pa_init(3 * mm_entry_size(1)); |
| 1125 | const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1126 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1127 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1128 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool, |
| 1129 | nullptr)); |
| 1130 | ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool, |
| 1131 | nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 1132 | ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool)); |
| 1133 | ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1134 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1135 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1136 | get_ptable(ptable), |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 1137 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1138 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1139 | } |
| 1140 | |
| 1141 | /** |
| 1142 | * Any subtable with all blocks with the same attributes should be replaced |
| 1143 | * with a single block. |
| 1144 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1145 | TEST_F(mm, defrag_block_subtables) |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1146 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1147 | constexpr uint32_t mode = 0; |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1148 | const paddr_t begin = pa_init(39456 * mm_entry_size(1)); |
| 1149 | const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE); |
| 1150 | const paddr_t end = pa_add(begin, 4 * mm_entry_size(1)); |
Andrew Walbran | 9fa106c | 2018-09-28 14:19:29 +0100 | [diff] [blame] | 1151 | struct mm_ptable ptable; |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame^] | 1152 | ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1153 | ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1154 | &ppool, nullptr)); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 1155 | ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool)); |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1156 | ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool, |
| 1157 | nullptr)); |
| 1158 | ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool, |
| 1159 | nullptr)); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1160 | mm_vm_defrag(&ptable, &ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1161 | EXPECT_THAT( |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1162 | get_ptable(ptable), |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 1163 | AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block, |
| 1164 | _1, TOP_LEVEL)))))); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1165 | mm_vm_fini(&ptable, &ppool); |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 1166 | } |
| 1167 | |
Andrew Scull | 232d560 | 2018-10-15 11:07:45 +0100 | [diff] [blame] | 1168 | } /* namespace */ |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1169 | |
| 1170 | namespace mm_test |
| 1171 | { |
| 1172 | /** |
| 1173 | * Get an STL representation of the ptable. |
| 1174 | */ |
| 1175 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable( |
| 1176 | const struct mm_ptable &ptable) |
| 1177 | { |
| 1178 | std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all; |
| 1179 | const uint8_t root_table_count = arch_mm_stage2_root_table_count(); |
| 1180 | for (uint8_t i = 0; i < root_table_count; ++i) { |
| 1181 | all.push_back(get_table( |
| 1182 | pa_add(ptable.root, i * sizeof(struct mm_page_table)))); |
| 1183 | } |
| 1184 | return all; |
| 1185 | } |
| 1186 | |
| 1187 | } /* namespace mm_test */ |