blob: 209346d17581b69fd24fadd431cc16fc2258e8a8 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
Andrew Scull4e5f8142018-10-12 14:37:19 +010011#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#include <stdbool.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010013#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010014
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/arch/mm.h"
16
17#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000018#include "hf/mpool.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010019#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020
Andrew Scullc66a04d2018-12-07 13:41:56 +000021/* Keep macro alignment */
22/* clang-format off */
23
Andrew Scull4e5f8142018-10-12 14:37:19 +010024#define PAGE_SIZE (1 << PAGE_BITS)
25#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
26
Andrew Scullc66a04d2018-12-07 13:41:56 +000027/* The following are arch-independent page mapping modes. */
Andrew Walbran1281ed42019-10-22 17:23:40 +010028#define MM_MODE_R UINT32_C(0x0001) /* read */
29#define MM_MODE_W UINT32_C(0x0002) /* write */
30#define MM_MODE_X UINT32_C(0x0004) /* execute */
31#define MM_MODE_D UINT32_C(0x0008) /* device */
Andrew Scullc66a04d2018-12-07 13:41:56 +000032
33/*
34 * Memory in stage-1 is either valid (present) or invalid (absent).
35 *
36 * Memory in stage-2 has more states to track sharing, borrowing and giving of
37 * memory. The states are made up of three parts:
38 *
39 * 1. V = valid/invalid : Whether the memory is part of the VM's address
40 * space. A fault will be generated if accessed when
41 * invalid.
42 * 2. O = owned/unowned : Whether the memory is owned by the VM.
43 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
44 * with at most one other.
45 *
46 * These parts compose to form the following state:
47 *
48 * - V O X : Owner of memory with exclusive access.
49 * - V O !X : Owner of memory with access shared with at most one other VM.
50 * - V !O X : Borrower of memory with exclusive access.
51 * - V !O !X : Borrower of memory where access is shared with the owner.
52 * - !V O X : Owner of memory lent to a VM that has exclusive access.
53 *
54 * - !V O !X : Unused. Owner of shared memory always has access.
Andrew Scull73b89542019-11-20 17:31:26 +000055 * - !V !O X : Unused. Next entry is used for invalid memory.
Andrew Scullc66a04d2018-12-07 13:41:56 +000056 *
Andrew Scullc66a04d2018-12-07 13:41:56 +000057 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
58 *
59 * Modes are selected so that owner of exclusive memory is the default.
60 */
Andrew Walbran1281ed42019-10-22 17:23:40 +010061#define MM_MODE_INVALID UINT32_C(0x0010)
62#define MM_MODE_UNOWNED UINT32_C(0x0020)
63#define MM_MODE_SHARED UINT32_C(0x0040)
Andrew Scullc66a04d2018-12-07 13:41:56 +000064
Andrew Scull73b89542019-11-20 17:31:26 +000065/* The mask for a mode that is considered unmapped. */
66#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
67
David Brazdil711fbe92019-08-06 13:39:58 +010068#define MM_FLAG_COMMIT 0x01
69#define MM_FLAG_UNMAP 0x02
70#define MM_FLAG_STAGE1 0x04
71
Andrew Scullc66a04d2018-12-07 13:41:56 +000072/* clang-format on */
73
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000074#define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table)
75
Andrew Scull4e5f8142018-10-12 14:37:19 +010076struct mm_page_table {
77 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
78};
79static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
80 "A page table must take exactly one page.");
81static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
82 "A page table must be page aligned.");
83
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084struct mm_ptable {
Andrew Scull1ba470e2018-10-31 15:14:31 +000085 /** Address of the root of the page table. */
86 paddr_t root;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010087};
88
David Brazdil711fbe92019-08-06 13:39:58 +010089/** The type of addresses stored in the page table. */
90typedef uintvaddr_t ptable_addr_t;
91
Andrew Scullba79b0a2019-07-03 11:26:53 +010092/** Represents the currently locked stage-1 page table of the hypervisor. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010093struct mm_stage1_locked {
94 struct mm_ptable *ptable;
95};
96
Andrew Scullda241972019-01-05 18:17:48 +000097void mm_vm_enable_invalidation(void);
98
David Brazdil711fbe92019-08-06 13:39:58 +010099bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool);
100ptable_addr_t mm_ptable_addr_space_end(int flags);
101
Andrew Scullda3df7f2019-01-05 17:49:27 +0000102bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
103void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100104bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000105 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000106bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
107 uint32_t mode, struct mpool *ppool);
108void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000109 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
Andrew Scullda241972019-01-05 18:17:48 +0000110bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000111 struct mpool *ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000112void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
113void mm_vm_dump(struct mm_ptable *t);
Andrew Scull81e85092018-12-12 12:56:20 +0000114bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100115 uint32_t *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100116
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100117struct mm_stage1_locked mm_lock_stage1(void);
118void mm_unlock_stage1(struct mm_stage1_locked *lock);
119void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100120 paddr_t end, uint32_t mode, struct mpool *ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100121bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
122 struct mpool *ppool);
123void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
124
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000125bool mm_init(struct mpool *ppool);