Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e2e40bb..5b00dc3 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -14,7 +14,6 @@
* Routines used by ia64 machines with contiguous (or virtually contiguous)
* memory.
*/
-#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/mm.h>
@@ -85,8 +84,13 @@
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
+
+ cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
+ if (!cpu_data)
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
/**
@@ -104,7 +108,6 @@
struct pcpu_group_info *gi;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int rc;
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
if (!ai)
@@ -130,10 +133,7 @@
ai->atom_size = PAGE_SIZE;
ai->alloc_size = PERCPU_PAGE_SIZE;
- rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
- if (rc)
- panic("failed to setup percpu area (err=%d)", rc);
-
+ pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
pcpu_free_alloc_info(ai);
}
#else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 1928d57..4f33f6e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/efi.h>
@@ -187,7 +186,7 @@
unsigned long base_offset;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int node, prev_node, unit, nr_units, rc;
+ int node, prev_node, unit, nr_units;
ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
if (!ai)
@@ -228,7 +227,7 @@
* CPUs are put into groups according to node. Walk cpu_map
* and create new groups at node boundaries.
*/
- prev_node = -1;
+ prev_node = NUMA_NO_NODE;
ai->nr_groups = 0;
for (unit = 0; unit < nr_units; unit++) {
cpu = cpu_map[unit];
@@ -246,10 +245,7 @@
gi->cpu_map = &cpu_map[unit];
}
- rc = pcpu_setup_first_chunk(ai, base);
- if (rc)
- panic("failed to setup percpu area (err=%d)", rc);
-
+ pcpu_setup_first_chunk(ai, base);
pcpu_free_alloc_info(ai);
}
#endif
@@ -397,8 +393,7 @@
*
* Each node's per-node area has a copy of the global pg_data_t list, so
* we copy that to each node here, as well as setting the per-cpu pointer
- * to the local node data structure. The active_cpus field of the per-node
- * structure gets setup by the platform_cpu_init() function later.
+ * to the local node data structure.
*/
static void __init initialize_pernode_data(void)
{
@@ -436,7 +431,7 @@
{
void *ptr = NULL;
u8 best = 0xff;
- int bestnode = -1, node, anynode = 0;
+ int bestnode = NUMA_NO_NODE, node, anynode = 0;
for_each_online_node(node) {
if (node_isset(node, memory_less_mask))
@@ -448,11 +443,17 @@
anynode = node;
}
- if (bestnode == -1)
+ if (bestnode == NUMA_NO_NODE)
bestnode = anynode;
- ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ bestnode);
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
+ __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
+ __pa(MAX_DMA_ADDRESS));
return ptr;
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index a9d55ad..c2f299f 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -21,28 +21,6 @@
extern int die(char *, struct pt_regs *, long);
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- int ret = 0;
-
- if (!user_mode(regs)) {
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, trap))
- ret = 1;
- preempt_enable();
- }
-
- return ret;
-}
-#else
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- return 0;
-}
-#endif
-
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
@@ -116,7 +94,7 @@
/*
* This is to handle the kprobes on user space access instructions
*/
- if (notify_page_fault(regs, TRAP_BRKPT))
+ if (kprobe_page_fault(regs, TRAP_BRKPT))
return;
if (user_mode(regs))
@@ -248,16 +226,8 @@
return;
}
if (user_mode(regs)) {
- struct siginfo si;
-
- clear_siginfo(&si);
- si.si_signo = signal;
- si.si_errno = 0;
- si.si_code = code;
- si.si_addr = (void __user *) address;
- si.si_isr = isr;
- si.si_flags = __ISR_VALID;
- force_sig_info(signal, &si, current);
+ force_sig_fault(signal, code, (void __user *) address,
+ 0, __ISR_VALID, isr);
return;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3b85c3e..bf9df26 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,7 +8,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dmar.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -23,10 +24,10 @@
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
+#include <linux/swiotlb.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
@@ -63,7 +64,7 @@
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+ flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
@@ -72,18 +73,14 @@
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void
-dma_mark_clean(void *addr, size_t size)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long pg_addr, end;
+ unsigned long pfn = PHYS_PFN(paddr);
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
+ do {
+ set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
+ } while (++pfn <= PHYS_PFN(paddr + size - 1));
}
inline void
@@ -446,23 +443,45 @@
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
- if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ if (pgd_none(*pgd)) {
+ pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pud)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, pud);
+ }
pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pmd)
+ goto err_alloc;
+ pud_populate(&init_mm, pud, pmd);
+ }
pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ if (pmd_none(*pmd)) {
+ pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pte)
+ goto err_alloc;
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
pte = pte_offset_kernel(pmd, address);
- if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
+ if (pte_none(*pte)) {
+ void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
+ node);
+ if (!page)
+ goto err_alloc;
+ set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
PAGE_KERNEL));
+ }
}
return 0;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node);
+ return -ENOMEM;
}
struct memmap_init_callback_data {
@@ -612,13 +631,17 @@
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
-#ifdef CONFIG_PCI
/*
- * This needs to be called _after_ the command line has been parsed but _before_
- * any drivers that may need the PCI DMA interface are initialized or bootmem has
- * been freed.
+ * This needs to be called _after_ the command line has been parsed but
+ * _before_ any drivers that may need the PCI DMA interface are
+ * initialized or bootmem has been freed.
*/
- platform_dma_init();
+#ifdef CONFIG_INTEL_IOMMU
+ detect_intel_iommu();
+ if (!iommu_detected)
+#endif
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
#endif
#ifdef CONFIG_FLATMEM
@@ -627,7 +650,7 @@
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
@@ -646,14 +669,14 @@
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
@@ -661,21 +684,14 @@
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
- int ret;
zone = page_zone(pfn_to_page(start_pfn));
- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
- if (ret)
- pr_warn("%s: Problem encountered in __remove_pages() as"
- " ret=%d\n", __func__, ret);
-
- return ret;
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
-#endif
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 43964cd..0c0de2c 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/compiler.h>
@@ -45,7 +42,7 @@
/*
* For things in kern_memmap, we must use the same attribute
* as the rest of the kernel. For more details, see
- * Documentation/ia64/aliasing.txt.
+ * Documentation/ia64/aliasing.rst.
*/
attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index aa19b7a..5e1015e 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -15,7 +15,7 @@
#include <linux/mm.h>
#include <linux/node.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
@@ -36,6 +36,12 @@
*/
u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
+int __node_distance(int from, int to)
+{
+ return slit_distance(from, to);
+}
+EXPORT_SYMBOL(__node_distance);
+
/* Identify which cnode a physical address resides on */
int
paddr_to_nid(unsigned long paddr)
@@ -49,6 +55,7 @@
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
}
+EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index acf10eb..72cc568 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TLB support routines.
*
@@ -21,7 +22,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/delay.h>
@@ -59,8 +60,16 @@
void __init
mmu_context_init (void)
{
- ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
- ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+ ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ if (!ia64_ctx.bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
+ ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ if (!ia64_ctx.flushmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
}
/*
@@ -236,7 +245,8 @@
spinaphore_init(&ptcg_sem, max_purges);
}
-void
+#ifdef CONFIG_SMP
+static void
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
@@ -273,6 +283,7 @@
activate_context(active_mm);
}
}
+#endif /* CONFIG_SMP */
void
local_flush_tlb_all (void)
@@ -297,8 +308,8 @@
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+static void
+__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
@@ -323,7 +334,7 @@
preempt_disable();
#ifdef CONFIG_SMP
if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
- platform_global_tlb_purge(mm, start, end, nbits);
+ ia64_global_tlb_purge(mm, start, end, nbits);
preempt_enable();
return;
}
@@ -335,6 +346,25 @@
preempt_enable();
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (unlikely(end - start >= 1024*1024*1024*1024UL
+ || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
+ /*
+ * If we flush more than a tera-byte or across regions, we're
+ * probably better off just flushing the entire TLB(s). This
+ * should be very rare and is not worth optimizing for.
+ */
+ flush_tlb_all();
+ } else {
+ /* flush the address range from the tlb */
+ __flush_tlb_range(vma, start, end);
+ /* flush the virt. page-table area mapping the addr range */
+ __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
+ }
+}
EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void)