Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index acf10eb..72cc568 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TLB support routines.
*
@@ -21,7 +22,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/delay.h>
@@ -59,8 +60,16 @@
void __init
mmu_context_init (void)
{
- ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
- ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+ ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ if (!ia64_ctx.bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
+ ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ if (!ia64_ctx.flushmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
}
/*
@@ -236,7 +245,8 @@
spinaphore_init(&ptcg_sem, max_purges);
}
-void
+#ifdef CONFIG_SMP
+static void
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
@@ -273,6 +283,7 @@
activate_context(active_mm);
}
}
+#endif /* CONFIG_SMP */
void
local_flush_tlb_all (void)
@@ -297,8 +308,8 @@
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+static void
+__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
@@ -323,7 +334,7 @@
preempt_disable();
#ifdef CONFIG_SMP
if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
- platform_global_tlb_purge(mm, start, end, nbits);
+ ia64_global_tlb_purge(mm, start, end, nbits);
preempt_enable();
return;
}
@@ -335,6 +346,25 @@
preempt_enable();
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (unlikely(end - start >= 1024*1024*1024*1024UL
+ || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
+ /*
+ * If we flush more than a tera-byte or across regions, we're
+ * probably better off just flushing the entire TLB(s). This
+ * should be very rare and is not worth optimizing for.
+ */
+ flush_tlb_all();
+ } else {
+ /* flush the address range from the tlb */
+ __flush_tlb_range(vma, start, end);
+ /* flush the virt. page-table area mapping the addr range */
+ __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
+ }
+}
EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void)