blob: e78832dce7bb4366a704db224dd91df10c6a7f87 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
David Brazdil0f672f62019-12-10 10:32:29 +00003 * This file contains pgtable related functions for 64-bit machines.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 *
5 * Derived from arch/ppc64/mm/init.c
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017 */
18
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/export.h>
25#include <linux/types.h>
26#include <linux/mman.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
29#include <linux/stddef.h>
30#include <linux/vmalloc.h>
31#include <linux/slab.h>
32#include <linux/hugetlb.h>
33
34#include <asm/pgalloc.h>
35#include <asm/page.h>
36#include <asm/prom.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037#include <asm/mmu_context.h>
38#include <asm/pgtable.h>
39#include <asm/mmu.h>
40#include <asm/smp.h>
41#include <asm/machdep.h>
42#include <asm/tlb.h>
43#include <asm/processor.h>
44#include <asm/cputable.h>
45#include <asm/sections.h>
46#include <asm/firmware.h>
47#include <asm/dma.h>
48
David Brazdil0f672f62019-12-10 10:32:29 +000049#include <mm/mmu_decl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050
51
52#ifdef CONFIG_PPC_BOOK3S_64
53/*
54 * partition table and process table for ISA 3.0
55 */
56struct prtb_entry *process_tb;
57struct patb_entry *partition_tb;
58/*
59 * page table size
60 */
61unsigned long __pte_index_size;
62EXPORT_SYMBOL(__pte_index_size);
63unsigned long __pmd_index_size;
64EXPORT_SYMBOL(__pmd_index_size);
65unsigned long __pud_index_size;
66EXPORT_SYMBOL(__pud_index_size);
67unsigned long __pgd_index_size;
68EXPORT_SYMBOL(__pgd_index_size);
69unsigned long __pud_cache_index;
70EXPORT_SYMBOL(__pud_cache_index);
71unsigned long __pte_table_size;
72EXPORT_SYMBOL(__pte_table_size);
73unsigned long __pmd_table_size;
74EXPORT_SYMBOL(__pmd_table_size);
75unsigned long __pud_table_size;
76EXPORT_SYMBOL(__pud_table_size);
77unsigned long __pgd_table_size;
78EXPORT_SYMBOL(__pgd_table_size);
79unsigned long __pmd_val_bits;
80EXPORT_SYMBOL(__pmd_val_bits);
81unsigned long __pud_val_bits;
82EXPORT_SYMBOL(__pud_val_bits);
83unsigned long __pgd_val_bits;
84EXPORT_SYMBOL(__pgd_val_bits);
85unsigned long __kernel_virt_start;
86EXPORT_SYMBOL(__kernel_virt_start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087unsigned long __vmalloc_start;
88EXPORT_SYMBOL(__vmalloc_start);
89unsigned long __vmalloc_end;
90EXPORT_SYMBOL(__vmalloc_end);
91unsigned long __kernel_io_start;
92EXPORT_SYMBOL(__kernel_io_start);
David Brazdil0f672f62019-12-10 10:32:29 +000093unsigned long __kernel_io_end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094struct page *vmemmap;
95EXPORT_SYMBOL(vmemmap);
96unsigned long __pte_frag_nr;
97EXPORT_SYMBOL(__pte_frag_nr);
98unsigned long __pte_frag_size_shift;
99EXPORT_SYMBOL(__pte_frag_size_shift);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100#endif
101
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102#ifndef __PAGETABLE_PUD_FOLDED
103/* 4 level page table */
104struct page *pgd_page(pgd_t pgd)
105{
David Brazdil0f672f62019-12-10 10:32:29 +0000106 if (pgd_is_leaf(pgd)) {
107 VM_WARN_ON(!pgd_huge(pgd));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108 return pte_page(pgd_pte(pgd));
David Brazdil0f672f62019-12-10 10:32:29 +0000109 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 return virt_to_page(pgd_page_vaddr(pgd));
111}
112#endif
113
114struct page *pud_page(pud_t pud)
115{
David Brazdil0f672f62019-12-10 10:32:29 +0000116 if (pud_is_leaf(pud)) {
117 VM_WARN_ON(!pud_huge(pud));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 return pte_page(pud_pte(pud));
David Brazdil0f672f62019-12-10 10:32:29 +0000119 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 return virt_to_page(pud_page_vaddr(pud));
121}
122
123/*
124 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
125 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
126 */
127struct page *pmd_page(pmd_t pmd)
128{
David Brazdil0f672f62019-12-10 10:32:29 +0000129 if (pmd_is_leaf(pmd)) {
130 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131 return pte_page(pmd_pte(pmd));
David Brazdil0f672f62019-12-10 10:32:29 +0000132 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133 return virt_to_page(pmd_page_vaddr(pmd));
134}
135
136#ifdef CONFIG_STRICT_KERNEL_RWX
137void mark_rodata_ro(void)
138{
139 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
140 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
141 return;
142 }
143
144 if (radix_enabled())
145 radix__mark_rodata_ro();
146 else
147 hash__mark_rodata_ro();
David Brazdil0f672f62019-12-10 10:32:29 +0000148
149 // mark_initmem_nx() should have already run by now
150 ptdump_check_wx();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151}
152
153void mark_initmem_nx(void)
154{
155 if (radix_enabled())
156 radix__mark_initmem_nx();
157 else
158 hash__mark_initmem_nx();
159}
160#endif