Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | #include <linux/gfp.h> |
| 2 | #include <linux/initrd.h> |
| 3 | #include <linux/ioport.h> |
| 4 | #include <linux/swap.h> |
| 5 | #include <linux/memblock.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #include <linux/swapfile.h> |
| 7 | #include <linux/swapops.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #include <linux/kmemleak.h> |
| 9 | #include <linux/sched/task.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
| 11 | #include <asm/set_memory.h> |
| 12 | #include <asm/e820/api.h> |
| 13 | #include <asm/init.h> |
| 14 | #include <asm/page.h> |
| 15 | #include <asm/page_types.h> |
| 16 | #include <asm/sections.h> |
| 17 | #include <asm/setup.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | #include <asm/tlb.h> |
| 20 | #include <asm/proto.h> |
| 21 | #include <asm/dma.h> /* for MAX_DMA_PFN */ |
| 22 | #include <asm/microcode.h> |
| 23 | #include <asm/kaslr.h> |
| 24 | #include <asm/hypervisor.h> |
| 25 | #include <asm/cpufeature.h> |
| 26 | #include <asm/pti.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 27 | #include <asm/text-patching.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 28 | #include <asm/memtype.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * We need to define the tracepoints somewhere, and tlb.c |
| 32 | * is only compied when SMP=y. |
| 33 | */ |
| 34 | #define CREATE_TRACE_POINTS |
| 35 | #include <trace/events/tlb.h> |
| 36 | |
| 37 | #include "mm_internal.h" |
| 38 | |
| 39 | /* |
| 40 | * Tables translating between page_cache_type_t and pte encoding. |
| 41 | * |
| 42 | * The default values are defined statically as minimal supported mode; |
| 43 | * WC and WT fall back to UC-. pat_init() updates these values to support |
| 44 | * more cache modes, WC and WT, when it is safe to do so. See pat_init() |
| 45 | * for the details. Note, __early_ioremap() used during early boot-time |
| 46 | * takes pgprot_t (pte encoding) and does not use these tables. |
| 47 | * |
| 48 | * Index into __cachemode2pte_tbl[] is the cachemode. |
| 49 | * |
| 50 | * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte |
| 51 | * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. |
| 52 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 53 | static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | [_PAGE_CACHE_MODE_WB ] = 0 | 0 , |
| 55 | [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD, |
| 56 | [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD, |
| 57 | [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD, |
| 58 | [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD, |
| 59 | [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD, |
| 60 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | unsigned long cachemode2protval(enum page_cache_mode pcm) |
| 63 | { |
| 64 | if (likely(pcm == 0)) |
| 65 | return 0; |
| 66 | return __cachemode2pte_tbl[pcm]; |
| 67 | } |
| 68 | EXPORT_SYMBOL(cachemode2protval); |
| 69 | |
| 70 | static uint8_t __pte2cachemode_tbl[8] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB, |
| 72 | [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, |
| 73 | [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, |
| 74 | [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC, |
| 75 | [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB, |
| 76 | [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, |
| 77 | [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, |
| 78 | [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, |
| 79 | }; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 80 | |
| 81 | /* Check that the write-protect PAT entry is set for write-protect */ |
| 82 | bool x86_has_pat_wp(void) |
| 83 | { |
| 84 | return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP; |
| 85 | } |
| 86 | |
| 87 | enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) |
| 88 | { |
| 89 | unsigned long masked; |
| 90 | |
| 91 | masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; |
| 92 | if (likely(masked == 0)) |
| 93 | return 0; |
| 94 | return __pte2cachemode_tbl[__pte2cm_idx(masked)]; |
| 95 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | |
| 97 | static unsigned long __initdata pgt_buf_start; |
| 98 | static unsigned long __initdata pgt_buf_end; |
| 99 | static unsigned long __initdata pgt_buf_top; |
| 100 | |
| 101 | static unsigned long min_pfn_mapped; |
| 102 | |
| 103 | static bool __initdata can_use_brk_pgt = true; |
| 104 | |
| 105 | /* |
| 106 | * Pages returned are already directly mapped. |
| 107 | * |
| 108 | * Changing that is likely to break Xen, see commit: |
| 109 | * |
| 110 | * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve |
| 111 | * |
| 112 | * for detailed information. |
| 113 | */ |
| 114 | __ref void *alloc_low_pages(unsigned int num) |
| 115 | { |
| 116 | unsigned long pfn; |
| 117 | int i; |
| 118 | |
| 119 | if (after_bootmem) { |
| 120 | unsigned int order; |
| 121 | |
| 122 | order = get_order((unsigned long)num << PAGE_SHIFT); |
| 123 | return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); |
| 124 | } |
| 125 | |
| 126 | if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { |
| 127 | unsigned long ret = 0; |
| 128 | |
| 129 | if (min_pfn_mapped < max_pfn_mapped) { |
| 130 | ret = memblock_find_in_range( |
| 131 | min_pfn_mapped << PAGE_SHIFT, |
| 132 | max_pfn_mapped << PAGE_SHIFT, |
| 133 | PAGE_SIZE * num , PAGE_SIZE); |
| 134 | } |
| 135 | if (ret) |
| 136 | memblock_reserve(ret, PAGE_SIZE * num); |
| 137 | else if (can_use_brk_pgt) |
| 138 | ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE)); |
| 139 | |
| 140 | if (!ret) |
| 141 | panic("alloc_low_pages: can not alloc memory"); |
| 142 | |
| 143 | pfn = ret >> PAGE_SHIFT; |
| 144 | } else { |
| 145 | pfn = pgt_buf_end; |
| 146 | pgt_buf_end += num; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | for (i = 0; i < num; i++) { |
| 150 | void *adr; |
| 151 | |
| 152 | adr = __va((pfn + i) << PAGE_SHIFT); |
| 153 | clear_page(adr); |
| 154 | } |
| 155 | |
| 156 | return __va(pfn << PAGE_SHIFT); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. |
| 161 | * With KASLR memory randomization, depending on the machine e820 memory |
| 162 | * and the PUD alignment. We may need twice more pages when KASLR memory |
| 163 | * randomization is enabled. |
| 164 | */ |
| 165 | #ifndef CONFIG_RANDOMIZE_MEMORY |
| 166 | #define INIT_PGD_PAGE_COUNT 6 |
| 167 | #else |
| 168 | #define INIT_PGD_PAGE_COUNT 12 |
| 169 | #endif |
| 170 | #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) |
| 171 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); |
| 172 | void __init early_alloc_pgt_buf(void) |
| 173 | { |
| 174 | unsigned long tables = INIT_PGT_BUF_SIZE; |
| 175 | phys_addr_t base; |
| 176 | |
| 177 | base = __pa(extend_brk(tables, PAGE_SIZE)); |
| 178 | |
| 179 | pgt_buf_start = base >> PAGE_SHIFT; |
| 180 | pgt_buf_end = pgt_buf_start; |
| 181 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
| 182 | } |
| 183 | |
| 184 | int after_bootmem; |
| 185 | |
| 186 | early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); |
| 187 | |
| 188 | struct map_range { |
| 189 | unsigned long start; |
| 190 | unsigned long end; |
| 191 | unsigned page_size_mask; |
| 192 | }; |
| 193 | |
| 194 | static int page_size_mask; |
| 195 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 196 | /* |
| 197 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 198 | * enable and PPro Global page enable), so that any CPU's that boot |
| 199 | * up after us can get the correct flags. Invoked on the boot CPU. |
| 200 | */ |
| 201 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 202 | { |
| 203 | mmu_cr4_features |= mask; |
| 204 | if (trampoline_cr4_features) |
| 205 | *trampoline_cr4_features = mmu_cr4_features; |
| 206 | cr4_set_bits(mask); |
| 207 | } |
| 208 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | static void __init probe_page_size_mask(void) |
| 210 | { |
| 211 | /* |
| 212 | * For pagealloc debugging, identity mapping will use small pages. |
| 213 | * This will simplify cpa(), which otherwise needs to support splitting |
| 214 | * large pages into small in interrupt context, etc. |
| 215 | */ |
| 216 | if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) |
| 217 | page_size_mask |= 1 << PG_LEVEL_2M; |
| 218 | else |
| 219 | direct_gbpages = 0; |
| 220 | |
| 221 | /* Enable PSE if available */ |
| 222 | if (boot_cpu_has(X86_FEATURE_PSE)) |
| 223 | cr4_set_bits_and_update_boot(X86_CR4_PSE); |
| 224 | |
| 225 | /* Enable PGE if available */ |
| 226 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
| 227 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
| 228 | cr4_set_bits_and_update_boot(X86_CR4_PGE); |
| 229 | __supported_pte_mask |= _PAGE_GLOBAL; |
| 230 | } |
| 231 | |
| 232 | /* By the default is everything supported: */ |
| 233 | __default_kernel_pte_mask = __supported_pte_mask; |
| 234 | /* Except when with PTI where the kernel is mostly non-Global: */ |
| 235 | if (cpu_feature_enabled(X86_FEATURE_PTI)) |
| 236 | __default_kernel_pte_mask &= ~_PAGE_GLOBAL; |
| 237 | |
| 238 | /* Enable 1 GB linear kernel mappings if available: */ |
| 239 | if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { |
| 240 | printk(KERN_INFO "Using GB pages for direct mapping\n"); |
| 241 | page_size_mask |= 1 << PG_LEVEL_1G; |
| 242 | } else { |
| 243 | direct_gbpages = 0; |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | static void setup_pcid(void) |
| 248 | { |
| 249 | if (!IS_ENABLED(CONFIG_X86_64)) |
| 250 | return; |
| 251 | |
| 252 | if (!boot_cpu_has(X86_FEATURE_PCID)) |
| 253 | return; |
| 254 | |
| 255 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
| 256 | /* |
| 257 | * This can't be cr4_set_bits_and_update_boot() -- the |
| 258 | * trampoline code can't handle CR4.PCIDE and it wouldn't |
| 259 | * do any good anyway. Despite the name, |
| 260 | * cr4_set_bits_and_update_boot() doesn't actually cause |
| 261 | * the bits in question to remain set all the way through |
| 262 | * the secondary boot asm. |
| 263 | * |
| 264 | * Instead, we brute-force it and set CR4.PCIDE manually in |
| 265 | * start_secondary(). |
| 266 | */ |
| 267 | cr4_set_bits(X86_CR4_PCIDE); |
| 268 | |
| 269 | /* |
| 270 | * INVPCID's single-context modes (2/3) only work if we set |
| 271 | * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable |
| 272 | * on systems that have X86_CR4_PCIDE clear, or that have |
| 273 | * no INVPCID support at all. |
| 274 | */ |
| 275 | if (boot_cpu_has(X86_FEATURE_INVPCID)) |
| 276 | setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE); |
| 277 | } else { |
| 278 | /* |
| 279 | * flush_tlb_all(), as currently implemented, won't work if |
| 280 | * PCID is on but PGE is not. Since that combination |
| 281 | * doesn't exist on real hardware, there's no reason to try |
| 282 | * to fully support it, but it's polite to avoid corrupting |
| 283 | * data if we're on an improperly configured VM. |
| 284 | */ |
| 285 | setup_clear_cpu_cap(X86_FEATURE_PCID); |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | #ifdef CONFIG_X86_32 |
| 290 | #define NR_RANGE_MR 3 |
| 291 | #else /* CONFIG_X86_64 */ |
| 292 | #define NR_RANGE_MR 5 |
| 293 | #endif |
| 294 | |
| 295 | static int __meminit save_mr(struct map_range *mr, int nr_range, |
| 296 | unsigned long start_pfn, unsigned long end_pfn, |
| 297 | unsigned long page_size_mask) |
| 298 | { |
| 299 | if (start_pfn < end_pfn) { |
| 300 | if (nr_range >= NR_RANGE_MR) |
| 301 | panic("run out of range for init_memory_mapping\n"); |
| 302 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; |
| 303 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; |
| 304 | mr[nr_range].page_size_mask = page_size_mask; |
| 305 | nr_range++; |
| 306 | } |
| 307 | |
| 308 | return nr_range; |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * adjust the page_size_mask for small range to go with |
| 313 | * big page size instead small one if nearby are ram too. |
| 314 | */ |
| 315 | static void __ref adjust_range_page_size_mask(struct map_range *mr, |
| 316 | int nr_range) |
| 317 | { |
| 318 | int i; |
| 319 | |
| 320 | for (i = 0; i < nr_range; i++) { |
| 321 | if ((page_size_mask & (1<<PG_LEVEL_2M)) && |
| 322 | !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { |
| 323 | unsigned long start = round_down(mr[i].start, PMD_SIZE); |
| 324 | unsigned long end = round_up(mr[i].end, PMD_SIZE); |
| 325 | |
| 326 | #ifdef CONFIG_X86_32 |
| 327 | if ((end >> PAGE_SHIFT) > max_low_pfn) |
| 328 | continue; |
| 329 | #endif |
| 330 | |
| 331 | if (memblock_is_region_memory(start, end - start)) |
| 332 | mr[i].page_size_mask |= 1<<PG_LEVEL_2M; |
| 333 | } |
| 334 | if ((page_size_mask & (1<<PG_LEVEL_1G)) && |
| 335 | !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { |
| 336 | unsigned long start = round_down(mr[i].start, PUD_SIZE); |
| 337 | unsigned long end = round_up(mr[i].end, PUD_SIZE); |
| 338 | |
| 339 | if (memblock_is_region_memory(start, end - start)) |
| 340 | mr[i].page_size_mask |= 1<<PG_LEVEL_1G; |
| 341 | } |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | static const char *page_size_string(struct map_range *mr) |
| 346 | { |
| 347 | static const char str_1g[] = "1G"; |
| 348 | static const char str_2m[] = "2M"; |
| 349 | static const char str_4m[] = "4M"; |
| 350 | static const char str_4k[] = "4k"; |
| 351 | |
| 352 | if (mr->page_size_mask & (1<<PG_LEVEL_1G)) |
| 353 | return str_1g; |
| 354 | /* |
| 355 | * 32-bit without PAE has a 4M large page size. |
| 356 | * PG_LEVEL_2M is misnamed, but we can at least |
| 357 | * print out the right size in the string. |
| 358 | */ |
| 359 | if (IS_ENABLED(CONFIG_X86_32) && |
| 360 | !IS_ENABLED(CONFIG_X86_PAE) && |
| 361 | mr->page_size_mask & (1<<PG_LEVEL_2M)) |
| 362 | return str_4m; |
| 363 | |
| 364 | if (mr->page_size_mask & (1<<PG_LEVEL_2M)) |
| 365 | return str_2m; |
| 366 | |
| 367 | return str_4k; |
| 368 | } |
| 369 | |
| 370 | static int __meminit split_mem_range(struct map_range *mr, int nr_range, |
| 371 | unsigned long start, |
| 372 | unsigned long end) |
| 373 | { |
| 374 | unsigned long start_pfn, end_pfn, limit_pfn; |
| 375 | unsigned long pfn; |
| 376 | int i; |
| 377 | |
| 378 | limit_pfn = PFN_DOWN(end); |
| 379 | |
| 380 | /* head if not big page alignment ? */ |
| 381 | pfn = start_pfn = PFN_DOWN(start); |
| 382 | #ifdef CONFIG_X86_32 |
| 383 | /* |
| 384 | * Don't use a large page for the first 2/4MB of memory |
| 385 | * because there are often fixed size MTRRs in there |
| 386 | * and overlapping MTRRs into large pages can cause |
| 387 | * slowdowns. |
| 388 | */ |
| 389 | if (pfn == 0) |
| 390 | end_pfn = PFN_DOWN(PMD_SIZE); |
| 391 | else |
| 392 | end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
| 393 | #else /* CONFIG_X86_64 */ |
| 394 | end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
| 395 | #endif |
| 396 | if (end_pfn > limit_pfn) |
| 397 | end_pfn = limit_pfn; |
| 398 | if (start_pfn < end_pfn) { |
| 399 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
| 400 | pfn = end_pfn; |
| 401 | } |
| 402 | |
| 403 | /* big page (2M) range */ |
| 404 | start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
| 405 | #ifdef CONFIG_X86_32 |
| 406 | end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); |
| 407 | #else /* CONFIG_X86_64 */ |
| 408 | end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); |
| 409 | if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) |
| 410 | end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); |
| 411 | #endif |
| 412 | |
| 413 | if (start_pfn < end_pfn) { |
| 414 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 415 | page_size_mask & (1<<PG_LEVEL_2M)); |
| 416 | pfn = end_pfn; |
| 417 | } |
| 418 | |
| 419 | #ifdef CONFIG_X86_64 |
| 420 | /* big page (1G) range */ |
| 421 | start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); |
| 422 | end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); |
| 423 | if (start_pfn < end_pfn) { |
| 424 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 425 | page_size_mask & |
| 426 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
| 427 | pfn = end_pfn; |
| 428 | } |
| 429 | |
| 430 | /* tail is not big page (1G) alignment */ |
| 431 | start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
| 432 | end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); |
| 433 | if (start_pfn < end_pfn) { |
| 434 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
| 435 | page_size_mask & (1<<PG_LEVEL_2M)); |
| 436 | pfn = end_pfn; |
| 437 | } |
| 438 | #endif |
| 439 | |
| 440 | /* tail is not big page (2M) alignment */ |
| 441 | start_pfn = pfn; |
| 442 | end_pfn = limit_pfn; |
| 443 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
| 444 | |
| 445 | if (!after_bootmem) |
| 446 | adjust_range_page_size_mask(mr, nr_range); |
| 447 | |
| 448 | /* try to merge same page size and continuous */ |
| 449 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { |
| 450 | unsigned long old_start; |
| 451 | if (mr[i].end != mr[i+1].start || |
| 452 | mr[i].page_size_mask != mr[i+1].page_size_mask) |
| 453 | continue; |
| 454 | /* move it */ |
| 455 | old_start = mr[i].start; |
| 456 | memmove(&mr[i], &mr[i+1], |
| 457 | (nr_range - 1 - i) * sizeof(struct map_range)); |
| 458 | mr[i--].start = old_start; |
| 459 | nr_range--; |
| 460 | } |
| 461 | |
| 462 | for (i = 0; i < nr_range; i++) |
| 463 | pr_debug(" [mem %#010lx-%#010lx] page %s\n", |
| 464 | mr[i].start, mr[i].end - 1, |
| 465 | page_size_string(&mr[i])); |
| 466 | |
| 467 | return nr_range; |
| 468 | } |
| 469 | |
| 470 | struct range pfn_mapped[E820_MAX_ENTRIES]; |
| 471 | int nr_pfn_mapped; |
| 472 | |
| 473 | static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) |
| 474 | { |
| 475 | nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES, |
| 476 | nr_pfn_mapped, start_pfn, end_pfn); |
| 477 | nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES); |
| 478 | |
| 479 | max_pfn_mapped = max(max_pfn_mapped, end_pfn); |
| 480 | |
| 481 | if (start_pfn < (1UL<<(32-PAGE_SHIFT))) |
| 482 | max_low_pfn_mapped = max(max_low_pfn_mapped, |
| 483 | min(end_pfn, 1UL<<(32-PAGE_SHIFT))); |
| 484 | } |
| 485 | |
| 486 | bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) |
| 487 | { |
| 488 | int i; |
| 489 | |
| 490 | for (i = 0; i < nr_pfn_mapped; i++) |
| 491 | if ((start_pfn >= pfn_mapped[i].start) && |
| 492 | (end_pfn <= pfn_mapped[i].end)) |
| 493 | return true; |
| 494 | |
| 495 | return false; |
| 496 | } |
| 497 | |
| 498 | /* |
| 499 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
| 500 | * This runs before bootmem is initialized and gets pages directly from |
| 501 | * the physical memory. To access them they are temporarily mapped. |
| 502 | */ |
| 503 | unsigned long __ref init_memory_mapping(unsigned long start, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 504 | unsigned long end, pgprot_t prot) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 505 | { |
| 506 | struct map_range mr[NR_RANGE_MR]; |
| 507 | unsigned long ret = 0; |
| 508 | int nr_range, i; |
| 509 | |
| 510 | pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", |
| 511 | start, end - 1); |
| 512 | |
| 513 | memset(mr, 0, sizeof(mr)); |
| 514 | nr_range = split_mem_range(mr, 0, start, end); |
| 515 | |
| 516 | for (i = 0; i < nr_range; i++) |
| 517 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 518 | mr[i].page_size_mask, |
| 519 | prot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 520 | |
| 521 | add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); |
| 522 | |
| 523 | return ret >> PAGE_SHIFT; |
| 524 | } |
| 525 | |
| 526 | /* |
| 527 | * We need to iterate through the E820 memory map and create direct mappings |
| 528 | * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply |
| 529 | * create direct mappings for all pfns from [0 to max_low_pfn) and |
| 530 | * [4GB to max_pfn) because of possible memory holes in high addresses |
| 531 | * that cannot be marked as UC by fixed/variable range MTRRs. |
| 532 | * Depending on the alignment of E820 ranges, this may possibly result |
| 533 | * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. |
| 534 | * |
| 535 | * init_mem_mapping() calls init_range_memory_mapping() with big range. |
| 536 | * That range would have hole in the middle or ends, and only ram parts |
| 537 | * will be mapped in init_range_memory_mapping(). |
| 538 | */ |
| 539 | static unsigned long __init init_range_memory_mapping( |
| 540 | unsigned long r_start, |
| 541 | unsigned long r_end) |
| 542 | { |
| 543 | unsigned long start_pfn, end_pfn; |
| 544 | unsigned long mapped_ram_size = 0; |
| 545 | int i; |
| 546 | |
| 547 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { |
| 548 | u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); |
| 549 | u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); |
| 550 | if (start >= end) |
| 551 | continue; |
| 552 | |
| 553 | /* |
| 554 | * if it is overlapping with brk pgt, we need to |
| 555 | * alloc pgt buf from memblock instead. |
| 556 | */ |
| 557 | can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= |
| 558 | min(end, (u64)pgt_buf_top<<PAGE_SHIFT); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 559 | init_memory_mapping(start, end, PAGE_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 560 | mapped_ram_size += end - start; |
| 561 | can_use_brk_pgt = true; |
| 562 | } |
| 563 | |
| 564 | return mapped_ram_size; |
| 565 | } |
| 566 | |
| 567 | static unsigned long __init get_new_step_size(unsigned long step_size) |
| 568 | { |
| 569 | /* |
| 570 | * Initial mapped size is PMD_SIZE (2M). |
| 571 | * We can not set step_size to be PUD_SIZE (1G) yet. |
| 572 | * In worse case, when we cross the 1G boundary, and |
| 573 | * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) |
| 574 | * to map 1G range with PTE. Hence we use one less than the |
| 575 | * difference of page table level shifts. |
| 576 | * |
| 577 | * Don't need to worry about overflow in the top-down case, on 32bit, |
| 578 | * when step_size is 0, round_down() returns 0 for start, and that |
| 579 | * turns it into 0x100000000ULL. |
| 580 | * In the bottom-up case, round_up(x, 0) returns 0 though too, which |
| 581 | * needs to be taken into consideration by the code below. |
| 582 | */ |
| 583 | return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); |
| 584 | } |
| 585 | |
| 586 | /** |
| 587 | * memory_map_top_down - Map [map_start, map_end) top down |
| 588 | * @map_start: start address of the target memory range |
| 589 | * @map_end: end address of the target memory range |
| 590 | * |
| 591 | * This function will setup direct mapping for memory range |
| 592 | * [map_start, map_end) in top-down. That said, the page tables |
| 593 | * will be allocated at the end of the memory, and we map the |
| 594 | * memory in top-down. |
| 595 | */ |
| 596 | static void __init memory_map_top_down(unsigned long map_start, |
| 597 | unsigned long map_end) |
| 598 | { |
| 599 | unsigned long real_end, start, last_start; |
| 600 | unsigned long step_size; |
| 601 | unsigned long addr; |
| 602 | unsigned long mapped_ram_size = 0; |
| 603 | |
| 604 | /* xen has big range in reserved near end of ram, skip it at first.*/ |
| 605 | addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); |
| 606 | real_end = addr + PMD_SIZE; |
| 607 | |
| 608 | /* step_size need to be small so pgt_buf from BRK could cover it */ |
| 609 | step_size = PMD_SIZE; |
| 610 | max_pfn_mapped = 0; /* will get exact value next */ |
| 611 | min_pfn_mapped = real_end >> PAGE_SHIFT; |
| 612 | last_start = start = real_end; |
| 613 | |
| 614 | /* |
| 615 | * We start from the top (end of memory) and go to the bottom. |
| 616 | * The memblock_find_in_range() gets us a block of RAM from the |
| 617 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages |
| 618 | * for page table. |
| 619 | */ |
| 620 | while (last_start > map_start) { |
| 621 | if (last_start > step_size) { |
| 622 | start = round_down(last_start - 1, step_size); |
| 623 | if (start < map_start) |
| 624 | start = map_start; |
| 625 | } else |
| 626 | start = map_start; |
| 627 | mapped_ram_size += init_range_memory_mapping(start, |
| 628 | last_start); |
| 629 | last_start = start; |
| 630 | min_pfn_mapped = last_start >> PAGE_SHIFT; |
| 631 | if (mapped_ram_size >= step_size) |
| 632 | step_size = get_new_step_size(step_size); |
| 633 | } |
| 634 | |
| 635 | if (real_end < map_end) |
| 636 | init_range_memory_mapping(real_end, map_end); |
| 637 | } |
| 638 | |
| 639 | /** |
| 640 | * memory_map_bottom_up - Map [map_start, map_end) bottom up |
| 641 | * @map_start: start address of the target memory range |
| 642 | * @map_end: end address of the target memory range |
| 643 | * |
| 644 | * This function will setup direct mapping for memory range |
| 645 | * [map_start, map_end) in bottom-up. Since we have limited the |
| 646 | * bottom-up allocation above the kernel, the page tables will |
| 647 | * be allocated just above the kernel and we map the memory |
| 648 | * in [map_start, map_end) in bottom-up. |
| 649 | */ |
| 650 | static void __init memory_map_bottom_up(unsigned long map_start, |
| 651 | unsigned long map_end) |
| 652 | { |
| 653 | unsigned long next, start; |
| 654 | unsigned long mapped_ram_size = 0; |
| 655 | /* step_size need to be small so pgt_buf from BRK could cover it */ |
| 656 | unsigned long step_size = PMD_SIZE; |
| 657 | |
| 658 | start = map_start; |
| 659 | min_pfn_mapped = start >> PAGE_SHIFT; |
| 660 | |
| 661 | /* |
| 662 | * We start from the bottom (@map_start) and go to the top (@map_end). |
| 663 | * The memblock_find_in_range() gets us a block of RAM from the |
| 664 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages |
| 665 | * for page table. |
| 666 | */ |
| 667 | while (start < map_end) { |
| 668 | if (step_size && map_end - start > step_size) { |
| 669 | next = round_up(start + 1, step_size); |
| 670 | if (next > map_end) |
| 671 | next = map_end; |
| 672 | } else { |
| 673 | next = map_end; |
| 674 | } |
| 675 | |
| 676 | mapped_ram_size += init_range_memory_mapping(start, next); |
| 677 | start = next; |
| 678 | |
| 679 | if (mapped_ram_size >= step_size) |
| 680 | step_size = get_new_step_size(step_size); |
| 681 | } |
| 682 | } |
| 683 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 684 | /* |
| 685 | * The real mode trampoline, which is required for bootstrapping CPUs |
| 686 | * occupies only a small area under the low 1MB. See reserve_real_mode() |
| 687 | * for details. |
| 688 | * |
| 689 | * If KASLR is disabled the first PGD entry of the direct mapping is copied |
| 690 | * to map the real mode trampoline. |
| 691 | * |
| 692 | * If KASLR is enabled, copy only the PUD which covers the low 1MB |
| 693 | * area. This limits the randomization granularity to 1GB for both 4-level |
| 694 | * and 5-level paging. |
| 695 | */ |
| 696 | static void __init init_trampoline(void) |
| 697 | { |
| 698 | #ifdef CONFIG_X86_64 |
| 699 | if (!kaslr_memory_enabled()) |
| 700 | trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; |
| 701 | else |
| 702 | init_trampoline_kaslr(); |
| 703 | #endif |
| 704 | } |
| 705 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 706 | void __init init_mem_mapping(void) |
| 707 | { |
| 708 | unsigned long end; |
| 709 | |
| 710 | pti_check_boottime_disable(); |
| 711 | probe_page_size_mask(); |
| 712 | setup_pcid(); |
| 713 | |
| 714 | #ifdef CONFIG_X86_64 |
| 715 | end = max_pfn << PAGE_SHIFT; |
| 716 | #else |
| 717 | end = max_low_pfn << PAGE_SHIFT; |
| 718 | #endif |
| 719 | |
| 720 | /* the ISA range is always mapped regardless of memory holes */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 721 | init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 722 | |
| 723 | /* Init the trampoline, possibly with KASLR memory offset */ |
| 724 | init_trampoline(); |
| 725 | |
| 726 | /* |
| 727 | * If the allocation is in bottom-up direction, we setup direct mapping |
| 728 | * in bottom-up, otherwise we setup direct mapping in top-down. |
| 729 | */ |
| 730 | if (memblock_bottom_up()) { |
| 731 | unsigned long kernel_end = __pa_symbol(_end); |
| 732 | |
| 733 | /* |
| 734 | * we need two separate calls here. This is because we want to |
| 735 | * allocate page tables above the kernel. So we first map |
| 736 | * [kernel_end, end) to make memory above the kernel be mapped |
| 737 | * as soon as possible. And then use page tables allocated above |
| 738 | * the kernel to map [ISA_END_ADDRESS, kernel_end). |
| 739 | */ |
| 740 | memory_map_bottom_up(kernel_end, end); |
| 741 | memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); |
| 742 | } else { |
| 743 | memory_map_top_down(ISA_END_ADDRESS, end); |
| 744 | } |
| 745 | |
| 746 | #ifdef CONFIG_X86_64 |
| 747 | if (max_pfn > max_low_pfn) { |
| 748 | /* can we preseve max_low_pfn ?*/ |
| 749 | max_low_pfn = max_pfn; |
| 750 | } |
| 751 | #else |
| 752 | early_ioremap_page_table_range_init(); |
| 753 | #endif |
| 754 | |
| 755 | load_cr3(swapper_pg_dir); |
| 756 | __flush_tlb_all(); |
| 757 | |
| 758 | x86_init.hyper.init_mem_mapping(); |
| 759 | |
| 760 | early_memtest(0, max_pfn_mapped << PAGE_SHIFT); |
| 761 | } |
| 762 | |
| 763 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 764 | * Initialize an mm_struct to be used during poking and a pointer to be used |
| 765 | * during patching. |
| 766 | */ |
| 767 | void __init poking_init(void) |
| 768 | { |
| 769 | spinlock_t *ptl; |
| 770 | pte_t *ptep; |
| 771 | |
| 772 | poking_mm = copy_init_mm(); |
| 773 | BUG_ON(!poking_mm); |
| 774 | |
| 775 | /* |
| 776 | * Randomize the poking address, but make sure that the following page |
| 777 | * will be mapped at the same PMD. We need 2 pages, so find space for 3, |
| 778 | * and adjust the address if the PMD ends after the first one. |
| 779 | */ |
| 780 | poking_addr = TASK_UNMAPPED_BASE; |
| 781 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
| 782 | poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) % |
| 783 | (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); |
| 784 | |
| 785 | if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0) |
| 786 | poking_addr += PAGE_SIZE; |
| 787 | |
| 788 | /* |
| 789 | * We need to trigger the allocation of the page-tables that will be |
| 790 | * needed for poking now. Later, poking may be performed in an atomic |
| 791 | * section, which might cause allocation to fail. |
| 792 | */ |
| 793 | ptep = get_locked_pte(poking_mm, poking_addr, &ptl); |
| 794 | BUG_ON(!ptep); |
| 795 | pte_unmap_unlock(ptep, ptl); |
| 796 | } |
| 797 | |
| 798 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 799 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address |
| 800 | * is valid. The argument is a physical page number. |
| 801 | * |
| 802 | * On x86, access has to be given to the first megabyte of RAM because that |
| 803 | * area traditionally contains BIOS code and data regions used by X, dosemu, |
| 804 | * and similar apps. Since they map the entire memory range, the whole range |
| 805 | * must be allowed (for mapping), but any areas that would otherwise be |
| 806 | * disallowed are flagged as being "zero filled" instead of rejected. |
| 807 | * Access has to be given to non-kernel-ram areas as well, these contain the |
| 808 | * PCI mmio resources as well as potential bios/acpi data regions. |
| 809 | */ |
| 810 | int devmem_is_allowed(unsigned long pagenr) |
| 811 | { |
| 812 | if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, |
| 813 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) |
| 814 | != REGION_DISJOINT) { |
| 815 | /* |
| 816 | * For disallowed memory regions in the low 1MB range, |
| 817 | * request that the page be shown as all zeros. |
| 818 | */ |
| 819 | if (pagenr < 256) |
| 820 | return 2; |
| 821 | |
| 822 | return 0; |
| 823 | } |
| 824 | |
| 825 | /* |
| 826 | * This must follow RAM test, since System RAM is considered a |
| 827 | * restricted resource under CONFIG_STRICT_IOMEM. |
| 828 | */ |
| 829 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { |
| 830 | /* Low 1MB bypasses iomem restrictions. */ |
| 831 | if (pagenr < 256) |
| 832 | return 1; |
| 833 | |
| 834 | return 0; |
| 835 | } |
| 836 | |
| 837 | return 1; |
| 838 | } |
| 839 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 840 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 841 | { |
| 842 | unsigned long begin_aligned, end_aligned; |
| 843 | |
| 844 | /* Make sure boundaries are page aligned */ |
| 845 | begin_aligned = PAGE_ALIGN(begin); |
| 846 | end_aligned = end & PAGE_MASK; |
| 847 | |
| 848 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { |
| 849 | begin = begin_aligned; |
| 850 | end = end_aligned; |
| 851 | } |
| 852 | |
| 853 | if (begin >= end) |
| 854 | return; |
| 855 | |
| 856 | /* |
| 857 | * If debugging page accesses then do not free this memory but |
| 858 | * mark them not present - any buggy init-section access will |
| 859 | * create a kernel page fault: |
| 860 | */ |
| 861 | if (debug_pagealloc_enabled()) { |
| 862 | pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", |
| 863 | begin, end - 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 864 | /* |
| 865 | * Inform kmemleak about the hole in the memory since the |
| 866 | * corresponding pages will be unmapped. |
| 867 | */ |
| 868 | kmemleak_free_part((void *)begin, end - begin); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 869 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); |
| 870 | } else { |
| 871 | /* |
| 872 | * We just marked the kernel text read only above, now that |
| 873 | * we are going to free part of that, we need to make that |
| 874 | * writeable and non-executable first. |
| 875 | */ |
| 876 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); |
| 877 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); |
| 878 | |
| 879 | free_reserved_area((void *)begin, (void *)end, |
| 880 | POISON_FREE_INITMEM, what); |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | /* |
| 885 | * begin/end can be in the direct map or the "high kernel mapping" |
| 886 | * used for the kernel image only. free_init_pages() will do the |
| 887 | * right thing for either kind of address. |
| 888 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 889 | void free_kernel_image_pages(const char *what, void *begin, void *end) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 890 | { |
| 891 | unsigned long begin_ul = (unsigned long)begin; |
| 892 | unsigned long end_ul = (unsigned long)end; |
| 893 | unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; |
| 894 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 895 | free_init_pages(what, begin_ul, end_ul); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 896 | |
| 897 | /* |
| 898 | * PTI maps some of the kernel into userspace. For performance, |
| 899 | * this includes some kernel areas that do not contain secrets. |
| 900 | * Those areas might be adjacent to the parts of the kernel image |
| 901 | * being freed, which may contain secrets. Remove the "high kernel |
| 902 | * image mapping" for these freed areas, ensuring they are not even |
| 903 | * potentially vulnerable to Meltdown regardless of the specific |
| 904 | * optimizations PTI is currently using. |
| 905 | * |
| 906 | * The "noalias" prevents unmapping the direct map alias which is |
| 907 | * needed to access the freed pages. |
| 908 | * |
| 909 | * This is only valid for 64bit kernels. 32bit has only one mapping |
| 910 | * which can't be treated in this way for obvious reasons. |
| 911 | */ |
| 912 | if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI)) |
| 913 | set_memory_np_noalias(begin_ul, len_pages); |
| 914 | } |
| 915 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 916 | void __ref free_initmem(void) |
| 917 | { |
| 918 | e820__reallocate_tables(); |
| 919 | |
| 920 | mem_encrypt_free_decrypted_mem(); |
| 921 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 922 | free_kernel_image_pages("unused kernel image (initmem)", |
| 923 | &__init_begin, &__init_end); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | #ifdef CONFIG_BLK_DEV_INITRD |
| 927 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
| 928 | { |
| 929 | /* |
| 930 | * end could be not aligned, and We can not align that, |
| 931 | * decompresser could be confused by aligned initrd_end |
| 932 | * We already reserve the end partial page before in |
| 933 | * - i386_start_kernel() |
| 934 | * - x86_64_start_kernel() |
| 935 | * - relocate_initrd() |
| 936 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed |
| 937 | */ |
| 938 | free_init_pages("initrd", start, PAGE_ALIGN(end)); |
| 939 | } |
| 940 | #endif |
| 941 | |
| 942 | /* |
| 943 | * Calculate the precise size of the DMA zone (first 16 MB of RAM), |
| 944 | * and pass it to the MM layer - to help it set zone watermarks more |
| 945 | * accurately. |
| 946 | * |
| 947 | * Done on 64-bit systems only for the time being, although 32-bit systems |
| 948 | * might benefit from this as well. |
| 949 | */ |
| 950 | void __init memblock_find_dma_reserve(void) |
| 951 | { |
| 952 | #ifdef CONFIG_X86_64 |
| 953 | u64 nr_pages = 0, nr_free_pages = 0; |
| 954 | unsigned long start_pfn, end_pfn; |
| 955 | phys_addr_t start_addr, end_addr; |
| 956 | int i; |
| 957 | u64 u; |
| 958 | |
| 959 | /* |
| 960 | * Iterate over all memory ranges (free and reserved ones alike), |
| 961 | * to calculate the total number of pages in the first 16 MB of RAM: |
| 962 | */ |
| 963 | nr_pages = 0; |
| 964 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { |
| 965 | start_pfn = min(start_pfn, MAX_DMA_PFN); |
| 966 | end_pfn = min(end_pfn, MAX_DMA_PFN); |
| 967 | |
| 968 | nr_pages += end_pfn - start_pfn; |
| 969 | } |
| 970 | |
| 971 | /* |
| 972 | * Iterate over free memory ranges to calculate the number of free |
| 973 | * pages in the DMA zone, while not counting potential partial |
| 974 | * pages at the beginning or the end of the range: |
| 975 | */ |
| 976 | nr_free_pages = 0; |
| 977 | for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) { |
| 978 | start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN); |
| 979 | end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN); |
| 980 | |
| 981 | if (start_pfn < end_pfn) |
| 982 | nr_free_pages += end_pfn - start_pfn; |
| 983 | } |
| 984 | |
| 985 | set_dma_reserve(nr_pages - nr_free_pages); |
| 986 | #endif |
| 987 | } |
| 988 | |
| 989 | void __init zone_sizes_init(void) |
| 990 | { |
| 991 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 992 | |
| 993 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 994 | |
| 995 | #ifdef CONFIG_ZONE_DMA |
| 996 | max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); |
| 997 | #endif |
| 998 | #ifdef CONFIG_ZONE_DMA32 |
| 999 | max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); |
| 1000 | #endif |
| 1001 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 1002 | #ifdef CONFIG_HIGHMEM |
| 1003 | max_zone_pfns[ZONE_HIGHMEM] = max_pfn; |
| 1004 | #endif |
| 1005 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1006 | free_area_init(max_zone_pfns); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { |
| 1010 | .loaded_mm = &init_mm, |
| 1011 | .next_asid = 1, |
| 1012 | .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ |
| 1013 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1014 | |
| 1015 | void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) |
| 1016 | { |
| 1017 | /* entry 0 MUST be WB (hardwired to speed up translations) */ |
| 1018 | BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); |
| 1019 | |
| 1020 | __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); |
| 1021 | __pte2cachemode_tbl[entry] = cache; |
| 1022 | } |
| 1023 | |
| 1024 | #ifdef CONFIG_SWAP |
| 1025 | unsigned long max_swapfile_size(void) |
| 1026 | { |
| 1027 | unsigned long pages; |
| 1028 | |
| 1029 | pages = generic_max_swapfile_size(); |
| 1030 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1031 | if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1032 | /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ |
| 1033 | unsigned long long l1tf_limit = l1tf_pfn_limit(); |
| 1034 | /* |
| 1035 | * We encode swap offsets also with 3 bits below those for pfn |
| 1036 | * which makes the usable limit higher. |
| 1037 | */ |
| 1038 | #if CONFIG_PGTABLE_LEVELS > 2 |
| 1039 | l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; |
| 1040 | #endif |
| 1041 | pages = min_t(unsigned long long, l1tf_limit, pages); |
| 1042 | } |
| 1043 | return pages; |
| 1044 | } |
| 1045 | #endif |