blob: ee31f36f48f344cc13da6389203a227638f0296a [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Page management definitions for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#ifndef _ASM_PAGE_H
9#define _ASM_PAGE_H
10
11#include <linux/const.h>
12
13/* This is probably not the most graceful way to handle this. */
14
15#ifdef CONFIG_PAGE_SIZE_4KB
16#define PAGE_SHIFT 12
17#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
18#endif
19
20#ifdef CONFIG_PAGE_SIZE_16KB
21#define PAGE_SHIFT 14
22#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
23#endif
24
25#ifdef CONFIG_PAGE_SIZE_64KB
26#define PAGE_SHIFT 16
27#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
28#endif
29
30#ifdef CONFIG_PAGE_SIZE_256KB
31#define PAGE_SHIFT 18
32#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
33#endif
34
35#ifdef CONFIG_PAGE_SIZE_1MB
36#define PAGE_SHIFT 20
37#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
38#endif
39
40/*
41 * These should be defined in hugetlb.h, but apparently not.
42 * "Huge" for us should be 4MB or 16MB, which are both represented
43 * in L1 PTE's. Right now, it's set up for 4MB.
44 */
45#ifdef CONFIG_HUGETLB_PAGE
46#define HPAGE_SHIFT 22
47#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
48#define HPAGE_MASK (~(HPAGE_SIZE-1))
49#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
50#define HVM_HUGEPAGE_SIZE 0x5
51#endif
52
53#define PAGE_SIZE (1UL << PAGE_SHIFT)
54#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
55
56#ifdef __KERNEL__
57#ifndef __ASSEMBLY__
58
59/*
60 * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
61 */
62#include <linux/pfn.h>
63
64/*
65 * We implement a two-level architecture-specific page table structure.
66 * Null intermediate page table level (pmd, pud) definitions will come from
67 * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
68 */
69typedef struct { unsigned long pte; } pte_t;
70typedef struct { unsigned long pgd; } pgd_t;
71typedef struct { unsigned long pgprot; } pgprot_t;
72typedef struct page *pgtable_t;
73
74#define pte_val(x) ((x).pte)
75#define pgd_val(x) ((x).pgd)
76#define pgprot_val(x) ((x).pgprot)
77#define __pte(x) ((pte_t) { (x) })
78#define __pgd(x) ((pgd_t) { (x) })
79#define __pgprot(x) ((pgprot_t) { (x) })
80
81/*
82 * We need a __pa and a __va routine for kernel space.
83 * MIPS says they're only used during mem_init.
84 * also, check if we need a PHYS_OFFSET.
85 */
86#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
87#define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
88
89/* The "page frame" descriptor is defined in linux/mm.h */
90struct page;
91
92/* Returns page frame descriptor for virtual address. */
93#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
94
95/* Default vm area behavior is non-executable. */
96#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
97 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
98
99#define pfn_valid(pfn) ((pfn) < max_mapnr)
100#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
101
102/* Need to not use a define for linesize; may move this to another file. */
103static inline void clear_page(void *page)
104{
105 /* This can only be done on pages with L1 WB cache */
106 asm volatile(
107 " loop0(1f,%1);\n"
108 "1: { dczeroa(%0);\n"
109 " %0 = add(%0,#32); }:endloop0\n"
110 : "+r" (page)
111 : "r" (PAGE_SIZE/32)
112 : "lc0", "sa0", "memory"
113 );
114}
115
116#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
117
118/*
119 * Under assumption that kernel always "sees" user map...
120 */
121#define clear_user_page(page, vaddr, pg) clear_page(page)
122#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
123
124/*
125 * page_to_phys - convert page to physical address
126 * @page - pointer to page entry in mem_map
127 */
128#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
129
130#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
131#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
132
133#define page_to_virt(page) __va(page_to_phys(page))
134
135/*
136 * For port to Hexagon Virtual Machine, MAYBE we check for attempts
137 * to reference reserved HVM space, but in any case, the VM will be
138 * protected.
139 */
140#define kern_addr_valid(addr) (1)
141
142#include <asm/mem-layout.h>
143#include <asm-generic/memory_model.h>
144/* XXX Todo: implement assembly-optimized version of getorder. */
145#include <asm-generic/getorder.h>
146
147#endif /* ifdef __ASSEMBLY__ */
148#endif /* ifdef __KERNEL__ */
149
150#endif