blob: 4986cebf20194dd6a3672bf8a4f6b77e3344d87e [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
10use core::arch::asm;
11use core::iter::zip;
12use core::{fmt, panic};
13
14use alloc::boxed::Box;
15use alloc::format;
16use alloc::string::{String, ToString};
17use alloc::vec::Vec;
18use log::debug;
19
20use bitflags::bitflags;
21use packed_struct::prelude::*;
22
23use self::descriptor::DescriptorType;
24
25use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
26use self::kernel_space::KernelSpace;
27use self::page_pool::{Page, PagePool, Pages};
28use self::region::{PhysicalRegion, VirtualRegion};
29use self::region_pool::{Region, RegionPool, RegionPoolError};
30
31mod descriptor;
32pub mod kernel_space;
33pub mod page_pool;
34mod region;
35mod region_pool;
36
37/// The first level of memory descriptors table which
38#[repr(C, align(512))]
39pub struct BaseTable {
40 pub descriptors: [Descriptor; 64],
41}
42
43impl BaseTable {
44 pub fn new() -> Self {
45 BaseTable {
46 descriptors: unsafe { core::mem::transmute([0u64; 64]) },
47 }
48 }
49}
50
51/// Translation table error type
52#[derive(Debug)]
53pub enum XlatError {
54 InvalidParameterError(String),
55 AllocationError(String),
56 AlignmentError(String),
57 Overflow,
58 InvalidOperation(String),
59 Overlap,
60 NotFound,
61 RegionPoolError(RegionPoolError),
62}
63
64/// Memory attributes
65///
66/// MAIR_EL1 should be configured in the same way in startup.s
67#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
68pub enum MemoryAttributesIndex {
69 #[default]
70 Device_nGnRnE = 0x00,
71 Normal_IWBWA_OWBWA = 0x01,
72}
73
74bitflags! {
75 #[derive(Debug, Clone, Copy)]
76 pub struct MemoryAccessRights : u32 {
77 const R = 0b00000001;
78 const W = 0b00000010;
79 const X = 0b00000100;
80 const NS = 0b00001000;
81
82 const RW = Self::R.bits() | Self::W.bits();
83 const RX = Self::R.bits() | Self::X.bits();
84 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
85
86 const USER = 0b00010000;
87 const DEVICE = 0b00100000;
88 }
89}
90
91impl From<MemoryAccessRights> for Attributes {
92 fn from(access_rights: MemoryAccessRights) -> Self {
93 let data_access_permissions = match (
94 access_rights.contains(MemoryAccessRights::USER),
95 access_rights.contains(MemoryAccessRights::W),
96 ) {
97 (false, false) => DataAccessPermissions::ReadOnly_None,
98 (false, true) => DataAccessPermissions::ReadWrite_None,
99 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
100 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
101 };
102
103 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
104 MemoryAttributesIndex::Device_nGnRnE
105 } else {
106 MemoryAttributesIndex::Normal_IWBWA_OWBWA
107 };
108
109 Attributes {
110 uxn: !access_rights.contains(MemoryAccessRights::X)
111 || !access_rights.contains(MemoryAccessRights::USER),
112 pxn: !access_rights.contains(MemoryAccessRights::X)
113 || access_rights.contains(MemoryAccessRights::USER),
114 contiguous: false,
115 not_global: true,
116 access_flag: true,
117 shareability: Shareability::NonShareable,
118 data_access_permissions,
119 non_secure: access_rights.contains(MemoryAccessRights::NS),
120 mem_attr_index,
121 }
122 }
123}
124
125#[derive(PartialEq)]
126struct Block {
127 pa: usize,
128 va: usize,
129 granule: usize,
130}
131
132impl Block {
133 fn new(pa: usize, va: usize, granule: usize) -> Self {
134 assert!(Xlat::GRANULE_SIZES.contains(&granule));
135 Self { pa, va, granule }
136 }
137}
138
139impl fmt::Debug for Block {
140 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
141 f.debug_struct("Block")
142 .field("pa", &format_args!("{:#010x}", self.pa))
143 .field("va", &format_args!("{:#010x}", self.va))
144 .field("granule", &format_args!("{:#010x}", self.granule))
145 .finish()
146 }
147}
148
149pub struct Xlat {
150 base_table: Box<BaseTable>,
151 page_pool: PagePool,
152 regions: RegionPool<VirtualRegion>,
153}
154
155/// Memory translation table handling
156/// # High level interface
157/// * allocate and map zero initialized region (with or without VA)
158/// * allocate and map memory region and load contents (with or without VA)
159/// * map memory region by PA (with or without VA)
160/// * unmap memory region by PA
161/// * query PA by VA
162/// * set access rights of mapped memory areas
163/// * active mapping
164///
165/// # Debug features
166/// * print translation table details
167///
168/// # Region level interface
169/// * map regions
170/// * unmap region
171/// * find a mapped region which contains
172/// * find empty area for region
173/// * set access rights for a region
174/// * create blocks by region
175///
176/// # Block level interface
177/// * map block
178/// * unmap block
179/// * set access rights of block
180impl Xlat {
181 const BASE_VA: usize = 0x4000_0000;
182 pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
183
184 pub fn new(page_pool: PagePool) -> Self {
185 let mut regions = RegionPool::new();
186 regions
187 .add(VirtualRegion::new(
188 Self::BASE_VA,
189 0x1_0000_0000 - Self::BASE_VA,
190 ))
191 .unwrap();
192 Self {
193 base_table: Box::new(BaseTable::new()),
194 page_pool,
195 regions,
196 }
197 }
198
199 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
200 /// initial data
201 /// # Arguments
202 /// * va: Virtual address of the memory area
203 /// * data: Data to be loaded to the memory area
204 /// * access_rights: Memory access rights of the area
205 /// # Return value
206 /// * Virtual address of the mapped memory
207 pub fn allocate_initalized_range(
208 &mut self,
209 va: Option<usize>,
210 data: &[u8],
211 access_rights: MemoryAccessRights,
212 ) -> Result<usize, XlatError> {
213 let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
214 XlatError::AllocationError(format!(
215 "Cannot allocate pages for {} bytes ({:?})",
216 data.len(),
217 e
218 ))
219 })?;
220
221 pages.copy_data_to_page(data);
222
223 let pages_length = pages.length();
224 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
225 let region = if let Some(required_va) = va {
226 self.regions
227 .acquire(required_va, pages_length, physical_region)
228 } else {
229 self.regions.allocate(pages_length, physical_region)
230 }
231 .map_err(XlatError::RegionPoolError)?;
232
233 self.map_region(region, access_rights.into())
234 }
235
236 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
237 /// # Arguments
238 /// * va: Virtual address of the memory area
239 /// * length: Length of the memory area in bytes
240 /// * access_rights: Memory access rights of the area
241 /// # Return value
242 /// * Virtual address of the mapped memory
243 pub fn allocate_zero_init_range(
244 &mut self,
245 va: Option<usize>,
246 length: usize,
247 access_rights: MemoryAccessRights,
248 ) -> Result<usize, XlatError> {
249 let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
250 XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
251 })?;
252
253 pages.zero_init();
254
255 let pages_length = pages.length();
256 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
257 let region = if let Some(required_va) = va {
258 self.regions
259 .acquire(required_va, pages_length, physical_region)
260 } else {
261 self.regions.allocate(pages_length, physical_region)
262 }
263 .map_err(XlatError::RegionPoolError)?;
264
265 self.map_region(region, access_rights.into())
266 }
267
268 /// Map memory area by physical address
269 /// # Arguments
270 /// * va: Virtual address of the memory area
271 /// * pa: Physical address of the memory area
272 /// * length: Length of the memory area in bytes
273 /// * access_rights: Memory access rights of the area
274 /// # Return value
275 /// * Virtual address of the mapped memory
276 pub fn map_physical_address_range(
277 &mut self,
278 va: Option<usize>,
279 pa: usize,
280 length: usize,
281 access_rights: MemoryAccessRights,
282 ) -> Result<usize, XlatError> {
283 let resource = PhysicalRegion::PhysicalAddress(pa);
284 let region = if let Some(required_va) = va {
285 self.regions.acquire(required_va, length, resource)
286 } else {
287 self.regions.allocate(length, resource)
288 }
289 .map_err(XlatError::RegionPoolError)?;
290
291 self.map_region(region, access_rights.into())
292 }
293
294 /// Unmap memory area by virtual address
295 /// # Arguments
296 /// * va: Virtual address
297 /// * length: Length of the memory area in bytes
298 pub fn unmap_virtual_address_range(
299 &mut self,
300 va: usize,
301 length: usize,
302 ) -> Result<(), XlatError> {
303 let pa = self.get_pa_by_va(va, length)?;
304
305 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
306
307 self.unmap_region(&region_to_release)?;
308
309 self.regions
310 .release(region_to_release)
311 .map_err(XlatError::RegionPoolError)
312 }
313
314 /// Query physical address by virtual address range. Only returns a value if the memory area
315 /// mapped as continuous area.
316 /// # Arguments
317 /// * va: Virtual address of the memory area
318 /// * length: Length of the memory area in bytes
319 /// # Return value
320 /// * Physical address of the mapped memory
321 pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
322 let containing_region = self
323 .find_containing_region(va, length)
324 .ok_or(XlatError::NotFound)?;
325
326 if !containing_region.used() {
327 return Err(XlatError::NotFound);
328 }
329
330 Ok(containing_region.get_pa_for_va(va))
331 }
332
333 /// Sets the memory access right of memory area
334 /// # Arguments
335 /// * va: Virtual address of the memory area
336 /// * length: Length of the memory area in bytes
337 /// * access_rights: New memory access rights of the area
338 pub fn set_access_rights(
339 &mut self,
340 va: usize,
341 length: usize,
342 access_rights: MemoryAccessRights,
343 ) -> Result<(), XlatError> {
344 let containing_region = self
345 .find_containing_region(va, length)
346 .ok_or(XlatError::NotFound)?;
347
348 if !containing_region.used() {
349 return Err(XlatError::NotFound);
350 }
351
352 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
353 self.map_region(region, access_rights.into())?;
354
355 Ok(())
356 }
357
358 /// Activate memory mapping represented by the object
359 /// # Arguments
360 /// * asid: ASID of the table base address
361 pub fn activate(&self, asid: u8) {
362 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
363 let ttbr = ((asid as u64) << 48) | base_table_pa;
364 unsafe {
365 #[cfg(target_arch = "aarch64")]
366 asm!(
367 "msr ttbr0_el1, {0}
368 isb",
369 in(reg) ttbr)
370 };
371 }
372
373 /// Prints the translation tables to debug console recursively
374 pub fn print(&self) {
375 debug!(
376 "Xlat table -> {:#010x}",
377 self.base_table.descriptors.as_ptr() as u64
378 );
379 Self::print_table(1, 0, &self.base_table.descriptors);
380 }
381
382 /// Prints a single translation table to the debug console
383 /// # Arguments
384 /// * level: Level of the translation table
385 /// * va: Base virtual address of the table
386 /// * table: Table entries
387 pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
388 let level_prefix = match level {
389 0 | 1 => "|-",
390 2 => "| |-",
391 _ => "| | |-",
392 };
393
394 for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
395 match descriptor.get_descriptor_type(level) {
396 DescriptorType::Block => debug!(
397 "{} {:#010x} Block -> {:#010x}",
398 level_prefix,
399 va,
400 descriptor.get_block_output_address(level)
401 ),
402 DescriptorType::Table => {
403 let next_level_table = unsafe { descriptor.get_next_level_table(level) };
404 debug!(
405 "{} {:#010x} Table -> {:#010x}",
406 level_prefix,
407 va,
408 next_level_table.as_ptr() as usize
409 );
410 Self::print_table(level + 1, va, next_level_table);
411 }
412 _ => {}
413 }
414 }
415 }
416
417 /// Adds memory region from the translation table. The function splits the region to blocks and
418 /// uses the block level functions to do the mapping.
419 /// # Arguments
420 /// * region: Memory region object
421 /// # Return value
422 /// * Virtual address of the mapped memory
423 fn map_region(
424 &mut self,
425 region: VirtualRegion,
426 attributes: Attributes,
427 ) -> Result<usize, XlatError> {
428 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
429 for block in blocks {
430 self.map_block(block, attributes.clone());
431 }
432
433 Ok(region.base())
434 }
435
436 /// Remove memory region from the translation table. The function splits the region to blocks
437 /// and uses the block level functions to do the unmapping.
438 /// # Arguments
439 /// * region: Memory region object
440 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
441 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
442 for block in blocks {
443 self.unmap_block(block);
444 }
445
446 Ok(())
447 }
448
449 /// Find mapped region that contains the whole region
450 /// # Arguments
451 /// * region: Virtual address to look for
452 /// # Return value
453 /// * Reference to virtual region if found
454 fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
455 self.regions.find_containing_region(va, length).ok()
456 }
457
458 /// Splits memory region to blocks that matches the granule size of the translation table.
459 /// # Arguments
460 /// * pa: Physical address
461 /// * va: Virtual address
462 /// * length: Region size in bytes
463 /// # Return value
464 /// * Vector of granule sized blocks
465 fn split_region_to_blocks(
466 mut pa: usize,
467 mut va: usize,
468 mut length: usize,
469 ) -> Result<Vec<Block>, XlatError> {
470 let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
471
472 if length == 0 {
473 return Err(XlatError::InvalidParameterError(
474 "Length cannot be 0".to_string(),
475 ));
476 }
477
478 if pa & min_granule_mask != 0
479 || va & min_granule_mask != 0
480 || length & min_granule_mask != 0
481 {
482 return Err(XlatError::InvalidParameterError(format!(
483 "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
484 )));
485 }
486
487 let mut pages = Vec::new();
488
489 while length > 0 {
490 for granule in &Self::GRANULE_SIZES {
491 if (pa | va) & (*granule - 1) == 0 && length >= *granule {
492 pages.push(Block::new(pa, va, *granule));
493 pa += *granule;
494 va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
495
496 length -= *granule;
497 break;
498 }
499 }
500 }
501
502 Ok(pages)
503 }
504
505 /// Add block to memory mapping
506 /// # Arguments
507 /// * block: Memory block that can be represented by a single translation table entry
508 /// * attributes: Memory block's permissions, flags
509 fn map_block(&mut self, block: Block, attributes: Attributes) {
510 Self::set_block_descriptor_recursively(
511 attributes,
512 block.pa,
513 block.va,
514 block.granule,
515 1,
516 self.base_table.descriptors.as_mut_slice(),
517 &self.page_pool,
518 );
519 }
520
521 /// Adds the block descriptor to the translation table along all the intermediate tables the
522 /// reach the required granule.
523 /// # Arguments
524 /// * attributes: Memory block's permssions, flags
525 /// * pa: Physical address
526 /// * va: Virtual address
527 /// * granule: Translation granule in bytes
528 /// * level: Translation table level
529 /// * table: Translation table on the given level
530 /// * page_pool: Page pool where the function can allocate pages for the translation tables
531 fn set_block_descriptor_recursively(
532 attributes: Attributes,
533 pa: usize,
534 va: usize,
535 granule: usize,
536 level: usize,
537 table: &mut [Descriptor],
538 page_pool: &PagePool,
539 ) {
540 // Get descriptor of the current level
541 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
542
543 // We reached the required granule level
544 if Self::GRANULE_SIZES[level] == granule {
545 descriptor.set_block_descriptor(level, pa, attributes);
546 return;
547 }
548
549 // Need to iterate forward
550 match descriptor.get_descriptor_type(level) {
551 DescriptorType::Invalid => {
552 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
553 unsafe {
554 let next_table = page.get_as_slice();
555 descriptor.set_table_descriptor(level, next_table, None);
556 }
557 Self::set_block_descriptor_recursively(
558 attributes,
559 pa,
560 va & (Self::GRANULE_SIZES[level] - 1),
561 granule,
562 level + 1,
563 unsafe { descriptor.get_next_level_table_mut(level) },
564 page_pool,
565 )
566 }
567 DescriptorType::Block => {
568 // Saving current descriptor details
569 let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
570 let current_pa = descriptor.get_block_output_address(level);
571 let current_attributes = descriptor.get_block_attributes(level);
572
573 // Replace block descriptor by table descriptor
574 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
575 unsafe {
576 let next_table = page.get_as_slice();
577 descriptor.set_table_descriptor(level, next_table, None);
578 }
579
580 // Explode block descriptor to table entries
581 for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
582 .step_by(Self::GRANULE_SIZES[level + 1])
583 {
584 let offset = exploded_va - current_va;
585 Self::set_block_descriptor_recursively(
586 current_attributes.clone(),
587 current_pa + offset,
588 exploded_va & (Self::GRANULE_SIZES[level] - 1),
589 Self::GRANULE_SIZES[level + 1],
590 level + 1,
591 unsafe { descriptor.get_next_level_table_mut(level) },
592 page_pool,
593 )
594 }
595
596 // Invoke self to continue recursion on the newly created level
597 Self::set_block_descriptor_recursively(
598 attributes, pa, va, granule, level, table, page_pool,
599 );
600 }
601 DescriptorType::Table => Self::set_block_descriptor_recursively(
602 attributes,
603 pa,
604 va & (Self::GRANULE_SIZES[level] - 1),
605 granule,
606 level + 1,
607 unsafe { descriptor.get_next_level_table_mut(level) },
608 page_pool,
609 ),
610 }
611 }
612
613 /// Remove block from memory mapping
614 /// # Arguments
615 /// * block: memory block that can be represented by a single translation entry
616 fn unmap_block(&mut self, block: Block) {
617 Self::remove_block_descriptor_recursively(
618 block.va,
619 block.granule,
620 1,
621 self.base_table.descriptors.as_mut_slice(),
622 &self.page_pool,
623 );
624 }
625
626 /// Removes block descriptor from the translation table along all the intermediate tables which
627 /// become empty during the removal process.
628 /// # Arguments
629 /// * va: Virtual address
630 /// * granule: Translation granule in bytes
631 /// * level: Translation table level
632 /// * table: Translation table on the given level
633 /// * page_pool: Page pool where the function can release the pages of empty tables
634 fn remove_block_descriptor_recursively(
635 va: usize,
636 granule: usize,
637 level: usize,
638 table: &mut [Descriptor],
639 page_pool: &PagePool,
640 ) {
641 // Get descriptor of the current level
642 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
643
644 // We reached the required granule level
645 if Self::GRANULE_SIZES[level] == granule {
646 descriptor.set_block_descriptor_to_invalid(level);
647 return;
648 }
649
650 // Need to iterate forward
651 match descriptor.get_descriptor_type(level) {
652 DescriptorType::Invalid => {
653 panic!("Cannot remove block from non-existing table");
654 }
655 DescriptorType::Block => {
656 panic!("Cannot remove block with different granule");
657 }
658 DescriptorType::Table => {
659 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
660 Self::remove_block_descriptor_recursively(
661 va & (Self::GRANULE_SIZES[level] - 1),
662 granule,
663 level + 1,
664 next_level_table,
665 page_pool,
666 );
667
668 if next_level_table.iter().all(|d| !d.is_valid()) {
669 // Empty table
670 let mut page = unsafe {
671 Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
672 };
673 page.zero_init();
674 page_pool.release_pages(page).unwrap();
675 }
676 }
677 }
678 }
679
680 fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
681 Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
682 }
683
684 fn walk_descriptors(
685 va: usize,
686 granule: usize,
687 level: usize,
688 table: &mut [Descriptor],
689 ) -> &mut Descriptor {
690 // Get descriptor of the current level
691 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
692
693 if Self::GRANULE_SIZES[level] == granule {
694 return descriptor;
695 }
696
697 // Need to iterate forward
698 match descriptor.get_descriptor_type(level) {
699 DescriptorType::Invalid => {
700 panic!("Invalid descriptor");
701 }
702 DescriptorType::Block => {
703 panic!("Cannot split existing block descriptor to table");
704 }
705 DescriptorType::Table => Self::walk_descriptors(
706 va & (Self::GRANULE_SIZES[level] - 1),
707 granule,
708 level + 1,
709 unsafe { descriptor.get_next_level_table_mut(level) },
710 ),
711 }
712 }
713}
714
715#[test]
716fn test_split_to_pages() {
717 let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
718 assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
719 assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
720 assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
721 assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
722 assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
723 assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
724 assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
725}
726
727#[test]
728fn test_split_to_pages_unaligned() {
729 let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
730 for (i, block) in pages.iter().enumerate().take(512) {
731 assert_eq!(
732 Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
733 *block
734 );
735 }
736}