Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __SPARC_MMAN_H__ |
| 3 | #define __SPARC_MMAN_H__ |
| 4 | |
| 5 | #include <uapi/asm/mman.h> |
| 6 | |
| 7 | #ifndef __ASSEMBLY__ |
| 8 | #define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len) |
| 9 | int sparc_mmap_check(unsigned long addr, unsigned long len); |
| 10 | |
| 11 | #ifdef CONFIG_SPARC64 |
| 12 | #include <asm/adi_64.h> |
| 13 | |
| 14 | static inline void ipi_set_tstate_mcde(void *arg) |
| 15 | { |
| 16 | struct mm_struct *mm = arg; |
| 17 | |
| 18 | /* Set TSTATE_MCDE for the task using address map that ADI has been |
| 19 | * enabled on if the task is running. If not, it will be set |
| 20 | * automatically at the next context switch |
| 21 | */ |
| 22 | if (current->mm == mm) { |
| 23 | struct pt_regs *regs; |
| 24 | |
| 25 | regs = task_pt_regs(current); |
| 26 | regs->tstate |= TSTATE_MCDE; |
| 27 | } |
| 28 | } |
| 29 | |
| 30 | #define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot) |
| 31 | static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) |
| 32 | { |
| 33 | if (adi_capable() && (prot & PROT_ADI)) { |
| 34 | struct pt_regs *regs; |
| 35 | |
| 36 | if (!current->mm->context.adi) { |
| 37 | regs = task_pt_regs(current); |
| 38 | regs->tstate |= TSTATE_MCDE; |
| 39 | current->mm->context.adi = true; |
| 40 | on_each_cpu_mask(mm_cpumask(current->mm), |
| 41 | ipi_set_tstate_mcde, current->mm, 0); |
| 42 | } |
| 43 | return VM_SPARC_ADI; |
| 44 | } else { |
| 45 | return 0; |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | #define arch_vm_get_page_prot(vm_flags) sparc_vm_get_page_prot(vm_flags) |
| 50 | static inline pgprot_t sparc_vm_get_page_prot(unsigned long vm_flags) |
| 51 | { |
| 52 | return (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0); |
| 53 | } |
| 54 | |
| 55 | #define arch_validate_prot(prot, addr) sparc_validate_prot(prot, addr) |
| 56 | static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) |
| 57 | { |
| 58 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) |
| 59 | return 0; |
| 60 | if (prot & PROT_ADI) { |
| 61 | if (!adi_capable()) |
| 62 | return 0; |
| 63 | |
| 64 | if (addr) { |
| 65 | struct vm_area_struct *vma; |
| 66 | |
| 67 | vma = find_vma(current->mm, addr); |
| 68 | if (vma) { |
| 69 | /* ADI can not be enabled on PFN |
| 70 | * mapped pages |
| 71 | */ |
| 72 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
| 73 | return 0; |
| 74 | |
| 75 | /* Mergeable pages can become unmergeable |
| 76 | * if ADI is enabled on them even if they |
| 77 | * have identical data on them. This can be |
| 78 | * because ADI enabled pages with identical |
| 79 | * data may still not have identical ADI |
| 80 | * tags on them. Disallow ADI on mergeable |
| 81 | * pages. |
| 82 | */ |
| 83 | if (vma->vm_flags & VM_MERGEABLE) |
| 84 | return 0; |
| 85 | } |
| 86 | } |
| 87 | } |
| 88 | return 1; |
| 89 | } |
| 90 | #endif /* CONFIG_SPARC64 */ |
| 91 | |
| 92 | #endif /* __ASSEMBLY__ */ |
| 93 | #endif /* __SPARC_MMAN_H__ */ |