Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 ARM Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | #ifndef __ASM_PGTABLE_PROT_H |
| 17 | #define __ASM_PGTABLE_PROT_H |
| 18 | |
| 19 | #include <asm/memory.h> |
| 20 | #include <asm/pgtable-hwdef.h> |
| 21 | |
| 22 | #include <linux/const.h> |
| 23 | |
| 24 | /* |
| 25 | * Software defined PTE bits definition. |
| 26 | */ |
| 27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
| 28 | #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ |
| 29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
| 30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
| 31 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
| 32 | |
| 33 | #ifndef __ASSEMBLY__ |
| 34 | |
| 35 | #include <asm/pgtable-types.h> |
| 36 | |
| 37 | #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
| 38 | #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
| 39 | |
| 40 | #define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) |
| 41 | #define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) |
| 42 | |
| 43 | #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) |
| 44 | #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) |
| 45 | |
| 46 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
| 47 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
| 48 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) |
| 49 | #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) |
| 50 | #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) |
| 51 | |
| 52 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) |
| 53 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
| 54 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
| 55 | |
| 56 | #define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
| 57 | #define _HYP_PAGE_DEFAULT _PAGE_DEFAULT |
| 58 | |
| 59 | #define PAGE_KERNEL __pgprot(PROT_NORMAL) |
| 60 | #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) |
| 61 | #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) |
| 62 | #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) |
| 63 | #define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) |
| 64 | |
| 65 | #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) |
| 66 | #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) |
| 67 | #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) |
| 68 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
| 69 | |
| 70 | #define PAGE_S2_MEMATTR(attr) \ |
| 71 | ({ \ |
| 72 | u64 __val; \ |
| 73 | if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \ |
| 74 | __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ |
| 75 | else \ |
| 76 | __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \ |
| 77 | __val; \ |
| 78 | }) |
| 79 | |
| 80 | #define PAGE_S2_XN \ |
| 81 | ({ \ |
| 82 | u64 __val; \ |
| 83 | if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) \ |
| 84 | __val = 0; \ |
| 85 | else \ |
| 86 | __val = PTE_S2_XN; \ |
| 87 | __val; \ |
| 88 | }) |
| 89 | |
| 90 | #define PAGE_S2 __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(NORMAL) | PTE_S2_RDONLY | PAGE_S2_XN) |
| 91 | #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) |
| 92 | |
| 93 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) |
| 94 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
| 95 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
| 96 | #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) |
| 97 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) |
| 98 | #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) |
| 99 | |
| 100 | #define __P000 PAGE_NONE |
| 101 | #define __P001 PAGE_READONLY |
| 102 | #define __P010 PAGE_READONLY |
| 103 | #define __P011 PAGE_READONLY |
| 104 | #define __P100 PAGE_EXECONLY |
| 105 | #define __P101 PAGE_READONLY_EXEC |
| 106 | #define __P110 PAGE_READONLY_EXEC |
| 107 | #define __P111 PAGE_READONLY_EXEC |
| 108 | |
| 109 | #define __S000 PAGE_NONE |
| 110 | #define __S001 PAGE_READONLY |
| 111 | #define __S010 PAGE_SHARED |
| 112 | #define __S011 PAGE_SHARED |
| 113 | #define __S100 PAGE_EXECONLY |
| 114 | #define __S101 PAGE_READONLY_EXEC |
| 115 | #define __S110 PAGE_SHARED_EXEC |
| 116 | #define __S111 PAGE_SHARED_EXEC |
| 117 | |
| 118 | #endif /* __ASSEMBLY__ */ |
| 119 | |
| 120 | #endif /* __ASM_PGTABLE_PROT_H */ |