Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ee943ac..3e30019 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -17,7 +17,6 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
-#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/fixmap.h>
#include <asm/sections.h>
@@ -29,6 +28,7 @@
#include <asm/traps.h>
#include <asm/procinfo.h>
#include <asm/memory.h>
+#include <asm/pgalloc.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -65,9 +65,6 @@
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
-pgprot_t pgprot_hyp_device;
-pgprot_t pgprot_s2;
-pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
@@ -77,48 +74,34 @@
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
- pteval_t pte_s2;
};
-#ifdef CONFIG_ARM_LPAE
-#define s2_policy(policy) policy
-#else
-#define s2_policy(policy) 0
-#endif
-
-unsigned long kimage_voffset __ro_after_init;
-
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
- .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
- .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}
};
@@ -229,12 +212,14 @@
static int __init early_cachepolicy(char *p)
{
pr_warn("cachepolicy kernel parameter not supported without cp15\n");
+ return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init noalign_setup(char *__unused)
{
pr_warn("noalign kernel parameter not supported without cp15\n");
+ return 1;
}
__setup("noalign", noalign_setup);
@@ -248,9 +233,6 @@
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
- .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
- s2_policy(L_PTE_S2_MT_DEV_SHARED) |
- L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -261,7 +243,7 @@
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_CACHED] = { /* ioremap_cached */
+ [MT_DEVICE_CACHED] = { /* ioremap_cache */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
@@ -376,11 +358,7 @@
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
- pud_t *pud = pud_offset(pgd, addr);
- pmd_t *pmd = pmd_offset(pud, addr);
-
- return pmd;
+ return pmd_off_k(addr);
}
void __init early_fixmap_init(void)
@@ -415,9 +393,9 @@
FIXADDR_END);
BUG_ON(idx >= __end_of_fixed_addresses);
- /* we only support device mappings until pgprot_kernel has been set */
+ /* We support only device mappings before pgprot_kernel is set. */
if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
- pgprot_val(pgprot_kernel) == 0))
+ pgprot_val(prot) && pgprot_val(pgprot_kernel) == 0))
return;
if (pgprot_val(prot))
@@ -436,7 +414,6 @@
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
- pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -560,9 +537,6 @@
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
- s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
- s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
#ifndef CONFIG_ARM_LPAE
/*
@@ -606,7 +580,6 @@
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
- s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -668,9 +641,6 @@
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
- pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
- pgprot_s2_device = __pgprot(s2_device_pgprot);
- pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -829,12 +799,12 @@
} while (pmd++, addr = next, addr != end);
}
-static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
void *(*alloc)(unsigned long sz), bool ng)
{
- pud_t *pud = pud_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
@@ -844,6 +814,21 @@
} while (pud++, addr = next, addr != end);
}
+static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
+{
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = p4d_addr_end(addr, end);
+ alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
+ phys += next - addr;
+ } while (p4d++, addr = next, addr != end);
+}
+
#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct mm_struct *mm,
struct map_desc *md,
@@ -891,7 +876,8 @@
pgd = pgd_offset(mm, addr);
end = addr + length;
do {
- pud_t *pud = pud_offset(pgd, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd = pmd_offset(pud, addr);
int i;
@@ -942,7 +928,7 @@
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
+ alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
phys += next - addr;
addr = next;
@@ -978,7 +964,13 @@
bool ng)
{
#ifdef CONFIG_ARM_LPAE
- pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ p4d_t *p4d;
+ pud_t *pud;
+
+ p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ if (WARN_ON(!p4d))
+ return;
+ pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))
return;
pmd_alloc(mm, pud, 0);
@@ -1165,9 +1157,8 @@
void __init adjust_lowmem_bounds(void)
{
- phys_addr_t memblock_limit = 0;
- u64 vmalloc_limit;
- struct memblock_region *reg;
+ phys_addr_t block_start, block_end, memblock_limit = 0;
+ u64 vmalloc_limit, i;
phys_addr_t lowmem_limit = 0;
/*
@@ -1183,26 +1174,18 @@
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
- for_each_memblock(memory, reg) {
- if (!memblock_is_nomap(reg)) {
- if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
- phys_addr_t len;
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (!IS_ALIGNED(block_start, PMD_SIZE)) {
+ phys_addr_t len;
- len = round_up(reg->base, PMD_SIZE) - reg->base;
- memblock_mark_nomap(reg->base, len);
- }
- break;
+ len = round_up(block_start, PMD_SIZE) - block_start;
+ memblock_mark_nomap(block_start, len);
}
+ break;
}
- for_each_memblock(memory, reg) {
- phys_addr_t block_start = reg->base;
- phys_addr_t block_end = reg->base + reg->size;
-
- if (memblock_is_nomap(reg))
- continue;
-
- if (reg->base < vmalloc_limit) {
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (block_start < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
@@ -1460,19 +1443,15 @@
static void __init map_lowmem(void)
{
- struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t start, end;
+ u64 i;
/* Map all the lowmem memory banks. */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+ for_each_mem_range(i, &start, &end) {
struct map_desc map;
- if (memblock_is_nomap(reg))
- continue;
-
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
@@ -1670,9 +1649,6 @@
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
-
- /* Compute the virt/idmap offset, mostly for the sake of KVM */
- kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
}
void __init early_mm_init(const struct machine_desc *mdesc)
@@ -1680,3 +1656,17 @@
build_mem_type_table();
early_paging_init(mdesc);
}
+
+void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ unsigned long ext = 0;
+
+ if (addr < TASK_SIZE && pte_valid_user(pteval)) {
+ if (!pte_special(pteval))
+ __sync_icache_dcache(pteval);
+ ext |= PTE_EXT_NG;
+ }
+
+ set_pte_ext(ptep, pteval, ext);
+}