David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __ASM_MMU_H |
| 6 | #define __ASM_MMU_H |
| 7 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #include <asm/cputype.h> |
| 9 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ |
| 11 | #define USER_ASID_BIT 48 |
| 12 | #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) |
| 13 | #define TTBR_ASID_MASK (UL(0xffff) << 48) |
| 14 | |
| 15 | #define BP_HARDEN_EL2_SLOTS 4 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 16 | #define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | |
| 18 | #ifndef __ASSEMBLY__ |
| 19 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 20 | #include <linux/refcount.h> |
| 21 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | typedef struct { |
| 23 | atomic64_t id; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 24 | #ifdef CONFIG_COMPAT |
| 25 | void *sigpage; |
| 26 | #endif |
| 27 | refcount_t pinned; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | void *vdso; |
| 29 | unsigned long flags; |
| 30 | } mm_context_t; |
| 31 | |
| 32 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 33 | * We use atomic64_read() here because the ASID for an 'mm_struct' can |
| 34 | * be reallocated when scheduling one of its threads following a |
| 35 | * rollover event (see new_context() and flush_context()). In this case, |
| 36 | * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush()) |
| 37 | * may use a stale ASID. This is fine in principle as the new ASID is |
| 38 | * guaranteed to be clean in the TLB, but the TLBI routines have to take |
| 39 | * care to handle the following race: |
| 40 | * |
| 41 | * CPU 0 CPU 1 CPU 2 |
| 42 | * |
| 43 | * // ptep_clear_flush(mm) |
| 44 | * xchg_relaxed(pte, 0) |
| 45 | * DSB ISHST |
| 46 | * old = ASID(mm) |
| 47 | * | <rollover> |
| 48 | * | new = new_context(mm) |
| 49 | * \-----------------> atomic_set(mm->context.id, new) |
| 50 | * cpu_switch_mm(mm) |
| 51 | * // Hardware walk of pte using new ASID |
| 52 | * TLBI(old) |
| 53 | * |
| 54 | * In this scenario, the barrier on CPU 0 and the dependency on CPU 1 |
| 55 | * ensure that the page-table walker on CPU 1 *must* see the invalid PTE |
| 56 | * written by CPU 0. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 57 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 58 | #define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | |
| 60 | static inline bool arm64_kernel_unmapped_at_el0(void) |
| 61 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 63 | } |
| 64 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | typedef void (*bp_hardening_cb_t)(void); |
| 66 | |
| 67 | struct bp_hardening_data { |
| 68 | int hyp_vectors_slot; |
| 69 | bp_hardening_cb_t fn; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 70 | |
| 71 | /* |
| 72 | * template_start is only used by the BHB mitigation to identify the |
| 73 | * hyp_vectors_slot sequence. |
| 74 | */ |
| 75 | const char *template_start; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | }; |
| 77 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
| 79 | |
| 80 | static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
| 81 | { |
| 82 | return this_cpu_ptr(&bp_hardening_data); |
| 83 | } |
| 84 | |
| 85 | static inline void arm64_apply_bp_hardening(void) |
| 86 | { |
| 87 | struct bp_hardening_data *d; |
| 88 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 89 | if (!cpus_have_const_cap(ARM64_SPECTRE_V2)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | return; |
| 91 | |
| 92 | d = arm64_get_bp_hardening_data(); |
| 93 | if (d->fn) |
| 94 | d->fn(); |
| 95 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | extern void arm64_memblock_init(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | extern void paging_init(void); |
| 99 | extern void bootmem_init(void); |
| 100 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
| 101 | extern void init_mem_pgprot(void); |
| 102 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
| 103 | unsigned long virt, phys_addr_t size, |
| 104 | pgprot_t prot, bool page_mappings_only); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | extern void mark_linear_text_alias_ro(void); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 107 | extern bool kaslr_requires_kpti(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 109 | #define INIT_MM_CONTEXT(name) \ |
| 110 | .pgd = init_pg_dir, |
| 111 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | #endif /* !__ASSEMBLY__ */ |
| 113 | #endif |