blob: 11a7753333e48ac91bf0399e2fd2226574550706 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull4e5f8142018-10-12 14:37:19 +010019#include <assert.h>
20#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010021#include <stdbool.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010022#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010023
Andrew Scull18c78fc2018-08-20 12:57:41 +010024#include "hf/arch/mm.h"
25
26#include "hf/addr.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010027
Andrew Scullc66a04d2018-12-07 13:41:56 +000028/* Keep macro alignment */
29/* clang-format off */
30
Andrew Scull4e5f8142018-10-12 14:37:19 +010031#define PAGE_SIZE (1 << PAGE_BITS)
32#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
33
Andrew Scullc66a04d2018-12-07 13:41:56 +000034
35/* The following are arch-independent page mapping modes. */
36#define MM_MODE_R 0x0001 /* read */
37#define MM_MODE_W 0x0002 /* write */
38#define MM_MODE_X 0x0004 /* execute */
39#define MM_MODE_D 0x0008 /* device */
40
41/*
42 * Memory in stage-1 is either valid (present) or invalid (absent).
43 *
44 * Memory in stage-2 has more states to track sharing, borrowing and giving of
45 * memory. The states are made up of three parts:
46 *
47 * 1. V = valid/invalid : Whether the memory is part of the VM's address
48 * space. A fault will be generated if accessed when
49 * invalid.
50 * 2. O = owned/unowned : Whether the memory is owned by the VM.
51 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
52 * with at most one other.
53 *
54 * These parts compose to form the following state:
55 *
56 * - V O X : Owner of memory with exclusive access.
57 * - V O !X : Owner of memory with access shared with at most one other VM.
58 * - V !O X : Borrower of memory with exclusive access.
59 * - V !O !X : Borrower of memory where access is shared with the owner.
60 * - !V O X : Owner of memory lent to a VM that has exclusive access.
61 *
62 * - !V O !X : Unused. Owner of shared memory always has access.
63 *
64 * - !V !O X : Invalid memory. Memory is unrelated to the VM.
65 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
66 *
67 * Modes are selected so that owner of exclusive memory is the default.
68 */
69#define MM_MODE_INVALID 0x0010
70#define MM_MODE_UNOWNED 0x0020
71#define MM_MODE_SHARED 0x0040
72
73/**
74 * This flag indicates that memory allocation must not use locks. This is
75 * relevant in systems where interlocked operations are only available after
76 * virtual memory is enabled.
77 */
78#define MM_MODE_NOSYNC 0x0080
79
80/**
81 * This flag indicates that the mapping is intended to be used in a first
82 * stage translation table, which might have different encodings for the
83 * attribute bits than the second stage table.
84 */
85#define MM_MODE_STAGE1 0x0100
86
87/**
88 * This flag indicates that no TLB invalidations should be issued for the
89 * changes in the page table.
90 */
91#define MM_MODE_NOINVALIDATE 0x0200
92
93/* clang-format on */
94
Andrew Scull4e5f8142018-10-12 14:37:19 +010095struct mm_page_table {
96 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
97};
98static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
99 "A page table must take exactly one page.");
100static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
101 "A page table must be page aligned.");
102
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100103struct mm_ptable {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000104 /** Address of the root of the page table. */
105 paddr_t root;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100106};
107
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100108bool mm_ptable_init(struct mm_ptable *t, int mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000109void mm_ptable_fini(struct mm_ptable *t, int mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100110void mm_ptable_dump(struct mm_ptable *t, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100111void mm_ptable_defrag(struct mm_ptable *t, int mode);
112
Andrew Scull80871322018-08-06 12:04:09 +0100113bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
114 int mode, ipaddr_t *ipa);
Andrew Scull80871322018-08-06 12:04:09 +0100115bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000116bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode);
Andrew Scull80871322018-08-06 12:04:09 +0100117bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
118bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
119
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100120bool mm_init(void);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100121bool mm_cpu_init(void);
Andrew Scull80871322018-08-06 12:04:09 +0100122void *mm_identity_map(paddr_t begin, paddr_t end, int mode);
123bool mm_unmap(paddr_t begin, paddr_t end, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100124void mm_defrag(void);