blob: 44b51b84771f7659a9507fd904a6ed866c6cb8c1 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull4e5f8142018-10-12 14:37:19 +010019#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdbool.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010021#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010022
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/arch/mm.h"
24
25#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000026#include "hf/mpool.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010027#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010028
Andrew Scullc66a04d2018-12-07 13:41:56 +000029/* Keep macro alignment */
30/* clang-format off */
31
Andrew Scull4e5f8142018-10-12 14:37:19 +010032#define PAGE_SIZE (1 << PAGE_BITS)
33#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
34
Andrew Scullc66a04d2018-12-07 13:41:56 +000035/* The following are arch-independent page mapping modes. */
36#define MM_MODE_R 0x0001 /* read */
37#define MM_MODE_W 0x0002 /* write */
38#define MM_MODE_X 0x0004 /* execute */
39#define MM_MODE_D 0x0008 /* device */
40
41/*
42 * Memory in stage-1 is either valid (present) or invalid (absent).
43 *
44 * Memory in stage-2 has more states to track sharing, borrowing and giving of
45 * memory. The states are made up of three parts:
46 *
47 * 1. V = valid/invalid : Whether the memory is part of the VM's address
48 * space. A fault will be generated if accessed when
49 * invalid.
50 * 2. O = owned/unowned : Whether the memory is owned by the VM.
51 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
52 * with at most one other.
53 *
54 * These parts compose to form the following state:
55 *
56 * - V O X : Owner of memory with exclusive access.
57 * - V O !X : Owner of memory with access shared with at most one other VM.
58 * - V !O X : Borrower of memory with exclusive access.
59 * - V !O !X : Borrower of memory where access is shared with the owner.
60 * - !V O X : Owner of memory lent to a VM that has exclusive access.
61 *
62 * - !V O !X : Unused. Owner of shared memory always has access.
63 *
64 * - !V !O X : Invalid memory. Memory is unrelated to the VM.
65 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
66 *
67 * Modes are selected so that owner of exclusive memory is the default.
68 */
69#define MM_MODE_INVALID 0x0010
70#define MM_MODE_UNOWNED 0x0020
71#define MM_MODE_SHARED 0x0040
72
David Brazdil711fbe92019-08-06 13:39:58 +010073#define MM_FLAG_COMMIT 0x01
74#define MM_FLAG_UNMAP 0x02
75#define MM_FLAG_STAGE1 0x04
76
Andrew Scullc66a04d2018-12-07 13:41:56 +000077/* clang-format on */
78
Andrew Scull4e5f8142018-10-12 14:37:19 +010079struct mm_page_table {
80 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
81};
82static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
83 "A page table must take exactly one page.");
84static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
85 "A page table must be page aligned.");
86
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010087struct mm_ptable {
Andrew Scull1ba470e2018-10-31 15:14:31 +000088 /** Address of the root of the page table. */
89 paddr_t root;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090};
91
David Brazdil711fbe92019-08-06 13:39:58 +010092/** The type of addresses stored in the page table. */
93typedef uintvaddr_t ptable_addr_t;
94
Andrew Scullba79b0a2019-07-03 11:26:53 +010095/** Represents the currently locked stage-1 page table of the hypervisor. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010096struct mm_stage1_locked {
97 struct mm_ptable *ptable;
98};
99
Andrew Scullda241972019-01-05 18:17:48 +0000100void mm_vm_enable_invalidation(void);
101
David Brazdil711fbe92019-08-06 13:39:58 +0100102bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool);
103ptable_addr_t mm_ptable_addr_space_end(int flags);
104
Andrew Scullda3df7f2019-01-05 17:49:27 +0000105bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
106void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100107bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000108 int mode, ipaddr_t *ipa, struct mpool *ppool);
Andrew Scullda241972019-01-05 18:17:48 +0000109bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000110 struct mpool *ppool);
Andrew Scullda241972019-01-05 18:17:48 +0000111bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000112void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
113void mm_vm_dump(struct mm_ptable *t);
Andrew Scull81e85092018-12-12 12:56:20 +0000114bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
115 int *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100116
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100117struct mm_stage1_locked mm_lock_stage1(void);
118void mm_unlock_stage1(struct mm_stage1_locked *lock);
119void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
120 paddr_t end, int mode, struct mpool *ppool);
121bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
122 struct mpool *ppool);
123void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
124
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000125bool mm_init(struct mpool *ppool);