Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | fbc938a | 2018-08-20 14:09:28 +0100 | [diff] [blame] | 9 | #pragma once |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 11 | #include <stdalign.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 12 | #include <stdbool.h> |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 13 | #include <stdint.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 14 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 15 | #include "hf/arch/mm.h" |
| 16 | |
| 17 | #include "hf/addr.h" |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 18 | #include "hf/mpool.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 19 | #include "hf/static_assert.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 20 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 21 | /* Keep macro alignment */ |
| 22 | /* clang-format off */ |
| 23 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 24 | #define PAGE_SIZE (1 << PAGE_BITS) |
| 25 | #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t)) |
| 26 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 27 | /* The following are arch-independent page mapping modes. */ |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 28 | #define MM_MODE_R UINT32_C(0x0001) /* read */ |
| 29 | #define MM_MODE_W UINT32_C(0x0002) /* write */ |
| 30 | #define MM_MODE_X UINT32_C(0x0004) /* execute */ |
| 31 | #define MM_MODE_D UINT32_C(0x0008) /* device */ |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 32 | |
| 33 | /* |
| 34 | * Memory in stage-1 is either valid (present) or invalid (absent). |
| 35 | * |
| 36 | * Memory in stage-2 has more states to track sharing, borrowing and giving of |
| 37 | * memory. The states are made up of three parts: |
| 38 | * |
| 39 | * 1. V = valid/invalid : Whether the memory is part of the VM's address |
| 40 | * space. A fault will be generated if accessed when |
| 41 | * invalid. |
| 42 | * 2. O = owned/unowned : Whether the memory is owned by the VM. |
| 43 | * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared |
| 44 | * with at most one other. |
| 45 | * |
| 46 | * These parts compose to form the following state: |
| 47 | * |
| 48 | * - V O X : Owner of memory with exclusive access. |
| 49 | * - V O !X : Owner of memory with access shared with at most one other VM. |
| 50 | * - V !O X : Borrower of memory with exclusive access. |
| 51 | * - V !O !X : Borrower of memory where access is shared with the owner. |
| 52 | * - !V O X : Owner of memory lent to a VM that has exclusive access. |
| 53 | * |
| 54 | * - !V O !X : Unused. Owner of shared memory always has access. |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 55 | * - !V !O X : Unused. Next entry is used for invalid memory. |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 56 | * |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 57 | * - !V !O !X : Invalid memory. Memory is unrelated to the VM. |
| 58 | * |
| 59 | * Modes are selected so that owner of exclusive memory is the default. |
| 60 | */ |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 61 | #define MM_MODE_INVALID UINT32_C(0x0010) |
| 62 | #define MM_MODE_UNOWNED UINT32_C(0x0020) |
| 63 | #define MM_MODE_SHARED UINT32_C(0x0040) |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 64 | |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 65 | /* The mask for a mode that is considered unmapped. */ |
| 66 | #define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED) |
| 67 | |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 68 | #define MM_FLAG_COMMIT 0x01 |
| 69 | #define MM_FLAG_UNMAP 0x02 |
| 70 | #define MM_FLAG_STAGE1 0x04 |
| 71 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 72 | /* clang-format on */ |
| 73 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 74 | #define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table) |
| 75 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 76 | struct mm_page_table { |
| 77 | alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE]; |
| 78 | }; |
| 79 | static_assert(sizeof(struct mm_page_table) == PAGE_SIZE, |
| 80 | "A page table must take exactly one page."); |
| 81 | static_assert(alignof(struct mm_page_table) == PAGE_SIZE, |
| 82 | "A page table must be page aligned."); |
| 83 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 84 | struct mm_ptable { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 85 | /** Address of the root of the page table. */ |
| 86 | paddr_t root; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 87 | }; |
| 88 | |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 89 | /** The type of addresses stored in the page table. */ |
| 90 | typedef uintvaddr_t ptable_addr_t; |
| 91 | |
Andrew Scull | ba79b0a | 2019-07-03 11:26:53 +0100 | [diff] [blame] | 92 | /** Represents the currently locked stage-1 page table of the hypervisor. */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 93 | struct mm_stage1_locked { |
| 94 | struct mm_ptable *ptable; |
| 95 | }; |
| 96 | |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 97 | void mm_vm_enable_invalidation(void); |
| 98 | |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 99 | bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool); |
| 100 | ptable_addr_t mm_ptable_addr_space_end(int flags); |
| 101 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 102 | bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool); |
| 103 | void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool); |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame^] | 104 | |
| 105 | bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 106 | uint32_t mode, struct mpool *ppool); |
| 107 | void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 108 | uint32_t mode, struct mpool *ppool); |
| 109 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 110 | bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 111 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 112 | bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 113 | uint32_t mode, struct mpool *ppool); |
| 114 | void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 115 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa); |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 116 | bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 117 | struct mpool *ppool); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 118 | void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool); |
| 119 | void mm_vm_dump(struct mm_ptable *t); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 120 | bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 121 | uint32_t *mode); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 122 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 123 | struct mm_stage1_locked mm_lock_stage1(void); |
| 124 | void mm_unlock_stage1(struct mm_stage1_locked *lock); |
| 125 | void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 126 | paddr_t end, uint32_t mode, struct mpool *ppool); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 127 | bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end, |
| 128 | struct mpool *ppool); |
| 129 | void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool); |
| 130 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 131 | bool mm_init(struct mpool *ppool); |