Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * IA-32 Huge TLB Page Support for Kernel. |
| 4 | * |
| 5 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/sched/mm.h> |
| 12 | #include <linux/hugetlb.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/err.h> |
| 15 | #include <linux/sysctl.h> |
| 16 | #include <linux/compat.h> |
| 17 | #include <asm/mman.h> |
| 18 | #include <asm/tlb.h> |
| 19 | #include <asm/tlbflush.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | #include <asm/elf.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | |
| 22 | #if 0 /* This is just for testing */ |
| 23 | struct page * |
| 24 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 25 | { |
| 26 | unsigned long start = address; |
| 27 | int length = 1; |
| 28 | int nr; |
| 29 | struct page *page; |
| 30 | struct vm_area_struct *vma; |
| 31 | |
| 32 | vma = find_vma(mm, addr); |
| 33 | if (!vma || !is_vm_hugetlb_page(vma)) |
| 34 | return ERR_PTR(-EINVAL); |
| 35 | |
| 36 | pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
| 37 | |
| 38 | /* hugetlb should be locked, and hence, prefaulted */ |
| 39 | WARN_ON(!pte || pte_none(*pte)); |
| 40 | |
| 41 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; |
| 42 | |
| 43 | WARN_ON(!PageHead(page)); |
| 44 | |
| 45 | return page; |
| 46 | } |
| 47 | |
| 48 | int pmd_huge(pmd_t pmd) |
| 49 | { |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | int pud_huge(pud_t pud) |
| 54 | { |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | #else |
| 59 | |
| 60 | /* |
| 61 | * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal |
| 62 | * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. |
| 63 | * Otherwise, returns 0. |
| 64 | */ |
| 65 | int pmd_huge(pmd_t pmd) |
| 66 | { |
| 67 | return !pmd_none(pmd) && |
| 68 | (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; |
| 69 | } |
| 70 | |
| 71 | int pud_huge(pud_t pud) |
| 72 | { |
| 73 | return !!(pud_val(pud) & _PAGE_PSE); |
| 74 | } |
| 75 | #endif |
| 76 | |
| 77 | #ifdef CONFIG_HUGETLB_PAGE |
| 78 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
| 79 | unsigned long addr, unsigned long len, |
| 80 | unsigned long pgoff, unsigned long flags) |
| 81 | { |
| 82 | struct hstate *h = hstate_file(file); |
| 83 | struct vm_unmapped_area_info info; |
| 84 | |
| 85 | info.flags = 0; |
| 86 | info.length = len; |
| 87 | info.low_limit = get_mmap_base(1); |
| 88 | |
| 89 | /* |
| 90 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area |
| 91 | * in the full address space. |
| 92 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 93 | info.high_limit = in_32bit_syscall() ? |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); |
| 95 | |
| 96 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 97 | info.align_offset = 0; |
| 98 | return vm_unmapped_area(&info); |
| 99 | } |
| 100 | |
| 101 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, |
| 102 | unsigned long addr, unsigned long len, |
| 103 | unsigned long pgoff, unsigned long flags) |
| 104 | { |
| 105 | struct hstate *h = hstate_file(file); |
| 106 | struct vm_unmapped_area_info info; |
| 107 | |
| 108 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 109 | info.length = len; |
| 110 | info.low_limit = PAGE_SIZE; |
| 111 | info.high_limit = get_mmap_base(0); |
| 112 | |
| 113 | /* |
| 114 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area |
| 115 | * in the full address space. |
| 116 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 117 | if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; |
| 119 | |
| 120 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 121 | info.align_offset = 0; |
| 122 | addr = vm_unmapped_area(&info); |
| 123 | |
| 124 | /* |
| 125 | * A failed mmap() very likely causes application failure, |
| 126 | * so fall back to the bottom-up function here. This scenario |
| 127 | * can happen with large stack limits and large mmap() |
| 128 | * allocations. |
| 129 | */ |
| 130 | if (addr & ~PAGE_MASK) { |
| 131 | VM_BUG_ON(addr != -ENOMEM); |
| 132 | info.flags = 0; |
| 133 | info.low_limit = TASK_UNMAPPED_BASE; |
| 134 | info.high_limit = TASK_SIZE_LOW; |
| 135 | addr = vm_unmapped_area(&info); |
| 136 | } |
| 137 | |
| 138 | return addr; |
| 139 | } |
| 140 | |
| 141 | unsigned long |
| 142 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 143 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 144 | { |
| 145 | struct hstate *h = hstate_file(file); |
| 146 | struct mm_struct *mm = current->mm; |
| 147 | struct vm_area_struct *vma; |
| 148 | |
| 149 | if (len & ~huge_page_mask(h)) |
| 150 | return -EINVAL; |
| 151 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | if (len > TASK_SIZE) |
| 153 | return -ENOMEM; |
| 154 | |
| 155 | /* No address checking. See comment at mmap_address_hint_valid() */ |
| 156 | if (flags & MAP_FIXED) { |
| 157 | if (prepare_hugepage_range(file, addr, len)) |
| 158 | return -EINVAL; |
| 159 | return addr; |
| 160 | } |
| 161 | |
| 162 | if (addr) { |
| 163 | addr &= huge_page_mask(h); |
| 164 | if (!mmap_address_hint_valid(addr, len)) |
| 165 | goto get_unmapped_area; |
| 166 | |
| 167 | vma = find_vma(mm, addr); |
| 168 | if (!vma || addr + len <= vm_start_gap(vma)) |
| 169 | return addr; |
| 170 | } |
| 171 | |
| 172 | get_unmapped_area: |
| 173 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
| 174 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 175 | pgoff, flags); |
| 176 | else |
| 177 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 178 | pgoff, flags); |
| 179 | } |
| 180 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 181 | |
| 182 | #ifdef CONFIG_X86_64 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 183 | bool __init arch_hugetlb_valid_size(unsigned long size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 184 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 185 | if (size == PMD_SIZE) |
| 186 | return true; |
| 187 | else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) |
| 188 | return true; |
| 189 | else |
| 190 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 191 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 193 | #ifdef CONFIG_CONTIG_ALLOC |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 194 | static __init int gigantic_pages_init(void) |
| 195 | { |
| 196 | /* With compaction or CMA we can allocate gigantic pages at runtime */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 197 | if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
| 199 | return 0; |
| 200 | } |
| 201 | arch_initcall(gigantic_pages_init); |
| 202 | #endif |
| 203 | #endif |