Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 8 | * Stephane Eranian <eranian@hpl.hp.com> |
| 9 | * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> |
| 10 | * Copyright (C) 1999 VA Linux Systems |
| 11 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
| 12 | * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. |
| 13 | * |
| 14 | * Routines used by ia64 machines with contiguous (or virtually contiguous) |
| 15 | * memory. |
| 16 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | #include <linux/efi.h> |
| 18 | #include <linux/memblock.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/nmi.h> |
| 21 | #include <linux/swap.h> |
| 22 | |
| 23 | #include <asm/meminit.h> |
| 24 | #include <asm/pgalloc.h> |
| 25 | #include <asm/pgtable.h> |
| 26 | #include <asm/sections.h> |
| 27 | #include <asm/mca.h> |
| 28 | |
| 29 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 30 | static unsigned long max_gap; |
| 31 | #endif |
| 32 | |
| 33 | /* physical address where the bootmem map is located */ |
| 34 | unsigned long bootmap_start; |
| 35 | |
| 36 | #ifdef CONFIG_SMP |
| 37 | static void *cpu_data; |
| 38 | /** |
| 39 | * per_cpu_init - setup per-cpu variables |
| 40 | * |
| 41 | * Allocate and setup per-cpu data areas. |
| 42 | */ |
| 43 | void *per_cpu_init(void) |
| 44 | { |
| 45 | static bool first_time = true; |
| 46 | void *cpu0_data = __cpu0_per_cpu; |
| 47 | unsigned int cpu; |
| 48 | |
| 49 | if (!first_time) |
| 50 | goto skip; |
| 51 | first_time = false; |
| 52 | |
| 53 | /* |
| 54 | * get_free_pages() cannot be used before cpu_init() done. |
| 55 | * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs |
| 56 | * to avoid that AP calls get_zeroed_page(). |
| 57 | */ |
| 58 | for_each_possible_cpu(cpu) { |
| 59 | void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; |
| 60 | |
| 61 | memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); |
| 62 | __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; |
| 63 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; |
| 64 | |
| 65 | /* |
| 66 | * percpu area for cpu0 is moved from the __init area |
| 67 | * which is setup by head.S and used till this point. |
| 68 | * Update ar.k3. This move is ensures that percpu |
| 69 | * area for cpu0 is on the correct node and its |
| 70 | * virtual address isn't insanely far from other |
| 71 | * percpu areas which is important for congruent |
| 72 | * percpu allocator. |
| 73 | */ |
| 74 | if (cpu == 0) |
| 75 | ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - |
| 76 | (unsigned long)__per_cpu_start); |
| 77 | |
| 78 | cpu_data += PERCPU_PAGE_SIZE; |
| 79 | } |
| 80 | skip: |
| 81 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
| 82 | } |
| 83 | |
| 84 | static inline void |
| 85 | alloc_per_cpu_data(void) |
| 86 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 87 | size_t size = PERCPU_PAGE_SIZE * num_possible_cpus(); |
| 88 | |
| 89 | cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE, |
| 90 | __pa(MAX_DMA_ADDRESS)); |
| 91 | if (!cpu_data) |
| 92 | panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", |
| 93 | __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | /** |
| 97 | * setup_per_cpu_areas - setup percpu areas |
| 98 | * |
| 99 | * Arch code has already allocated and initialized percpu areas. All |
| 100 | * this function has to do is to teach the determined layout to the |
| 101 | * dynamic percpu allocator, which happens to be more complex than |
| 102 | * creating whole new ones using helpers. |
| 103 | */ |
| 104 | void __init |
| 105 | setup_per_cpu_areas(void) |
| 106 | { |
| 107 | struct pcpu_alloc_info *ai; |
| 108 | struct pcpu_group_info *gi; |
| 109 | unsigned int cpu; |
| 110 | ssize_t static_size, reserved_size, dyn_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | |
| 112 | ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); |
| 113 | if (!ai) |
| 114 | panic("failed to allocate pcpu_alloc_info"); |
| 115 | gi = &ai->groups[0]; |
| 116 | |
| 117 | /* units are assigned consecutively to possible cpus */ |
| 118 | for_each_possible_cpu(cpu) |
| 119 | gi->cpu_map[gi->nr_units++] = cpu; |
| 120 | |
| 121 | /* set parameters */ |
| 122 | static_size = __per_cpu_end - __per_cpu_start; |
| 123 | reserved_size = PERCPU_MODULE_RESERVE; |
| 124 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; |
| 125 | if (dyn_size < 0) |
| 126 | panic("percpu area overflow static=%zd reserved=%zd\n", |
| 127 | static_size, reserved_size); |
| 128 | |
| 129 | ai->static_size = static_size; |
| 130 | ai->reserved_size = reserved_size; |
| 131 | ai->dyn_size = dyn_size; |
| 132 | ai->unit_size = PERCPU_PAGE_SIZE; |
| 133 | ai->atom_size = PAGE_SIZE; |
| 134 | ai->alloc_size = PERCPU_PAGE_SIZE; |
| 135 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 136 | pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | pcpu_free_alloc_info(ai); |
| 138 | } |
| 139 | #else |
| 140 | #define alloc_per_cpu_data() do { } while (0) |
| 141 | #endif /* CONFIG_SMP */ |
| 142 | |
| 143 | /** |
| 144 | * find_memory - setup memory map |
| 145 | * |
| 146 | * Walk the EFI memory map and find usable memory for the system, taking |
| 147 | * into account reserved areas. |
| 148 | */ |
| 149 | void __init |
| 150 | find_memory (void) |
| 151 | { |
| 152 | reserve_memory(); |
| 153 | |
| 154 | /* first find highest page frame number */ |
| 155 | min_low_pfn = ~0UL; |
| 156 | max_low_pfn = 0; |
| 157 | efi_memmap_walk(find_max_min_low_pfn, NULL); |
| 158 | max_pfn = max_low_pfn; |
| 159 | |
| 160 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 161 | efi_memmap_walk(filter_memory, register_active_ranges); |
| 162 | #else |
| 163 | memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); |
| 164 | #endif |
| 165 | |
| 166 | find_initrd(); |
| 167 | |
| 168 | alloc_per_cpu_data(); |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * Set up the page tables. |
| 173 | */ |
| 174 | |
| 175 | void __init |
| 176 | paging_init (void) |
| 177 | { |
| 178 | unsigned long max_dma; |
| 179 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 180 | |
| 181 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 182 | #ifdef CONFIG_ZONE_DMA32 |
| 183 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
| 184 | max_zone_pfns[ZONE_DMA32] = max_dma; |
| 185 | #endif |
| 186 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 187 | |
| 188 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 189 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
| 190 | if (max_gap < LARGE_GAP) { |
| 191 | vmem_map = (struct page *) 0; |
| 192 | } else { |
| 193 | unsigned long map_size; |
| 194 | |
| 195 | /* allocate virtual_mem_map */ |
| 196 | |
| 197 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
| 198 | sizeof(struct page)); |
| 199 | VMALLOC_END -= map_size; |
| 200 | vmem_map = (struct page *) VMALLOC_END; |
| 201 | efi_memmap_walk(create_mem_map_page_table, NULL); |
| 202 | |
| 203 | /* |
| 204 | * alloc_node_mem_map makes an adjustment for mem_map |
| 205 | * which isn't compatible with vmem_map. |
| 206 | */ |
| 207 | NODE_DATA(0)->node_mem_map = vmem_map + |
| 208 | find_min_pfn_with_active_regions(); |
| 209 | |
| 210 | printk("Virtual mem_map starts at 0x%p\n", mem_map); |
| 211 | } |
| 212 | #endif /* !CONFIG_VIRTUAL_MEM_MAP */ |
| 213 | free_area_init_nodes(max_zone_pfns); |
| 214 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
| 215 | } |