Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 5b00dc3..e30e360 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -21,8 +21,6 @@
#include <linux/swap.h>
#include <asm/meminit.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mca.h>
@@ -210,6 +208,6 @@
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 41d243c..4d08134 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -24,7 +24,6 @@
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/slab.h>
-#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/meminit.h>
#include <asm/numa.h>
@@ -180,7 +179,7 @@
void __init setup_per_cpu_areas(void)
{
struct pcpu_alloc_info *ai;
- struct pcpu_group_info *uninitialized_var(gi);
+ struct pcpu_group_info *gi;
unsigned int *cpu_map;
void *base;
unsigned long base_offset;
@@ -601,7 +600,6 @@
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
@@ -627,7 +625,7 @@
max_zone_pfns[ZONE_DMA32] = max_dma;
#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
@@ -656,7 +654,7 @@
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
- return vmemmap_populate_basepages(start, end, node);
+ return vmemmap_populate_basepages(start, end, node, NULL);
}
void vmemmap_free(unsigned long start, unsigned long end,
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index c2f299f..cd9766d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -14,8 +14,8 @@
#include <linux/kdebug.h>
#include <linux/prefetch.h>
#include <linux/uaccess.h>
+#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/exception.h>
@@ -29,6 +29,7 @@
mapped_kernel_page_is_present (unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
@@ -37,7 +38,11 @@
if (pgd_none(*pgd) || pgd_bad(*pgd))
return 0;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d) || p4d_bad(*p4d))
+ return 0;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_bad(*pud))
return 0;
@@ -65,13 +70,13 @@
struct mm_struct *mm = current->mm;
unsigned long mask;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
- /* mmap_sem is performance critical.... */
- prefetchw(&mm->mmap_sem);
+ /* mmap_lock is performance critical.... */
+ prefetchw(&mm->mmap_lock);
/*
* If we're in an interrupt or have no user context, we must not take the fault..
@@ -82,7 +87,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
/*
* If fault is in region 5 and we are in the kernel, we may already
- * have the mmap_sem (pfn_valid macro is called during mmap). There
+ * have the mmap_lock (pfn_valid macro is called during mmap). There
* is no vma for region 5 addr's anyway, so skip getting the semaphore
* and go directly to the exception handling code.
*/
@@ -101,8 +106,10 @@
flags |= FAULT_FLAG_USER;
if (mask & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma && !prev_vma )
@@ -139,9 +146,9 @@
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -162,15 +169,10 @@
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -179,7 +181,7 @@
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
check_expansion:
@@ -210,7 +212,7 @@
goto good_area;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
@@ -276,7 +278,7 @@
return;
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index d16e419..b331f94 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -18,7 +18,6 @@
#include <linux/sysctl.h>
#include <linux/log2.h>
#include <asm/mman.h>
-#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
@@ -30,12 +29,14 @@
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
- pud = pud_alloc(mm, pgd, taddr);
+ p4d = p4d_offset(pgd, taddr);
+ pud = pud_alloc(mm, p4d, taddr);
if (pud) {
pmd = pmd_alloc(mm, pud, taddr);
if (pmd)
@@ -49,17 +50,21 @@
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, taddr);
- if (pud_present(*pud)) {
- pmd = pmd_offset(pud, taddr);
- if (pmd_present(*pmd))
- pte = pte_offset_map(pmd, taddr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, taddr);
+ if (pud_present(*pud)) {
+ pmd = pmd_offset(pud, taddr);
+ if (pmd_present(*pmd))
+ pte = pte_offset_map(pmd, taddr);
+ }
}
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ee50506..f316a83 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <linux/dmar.h>
#include <linux/efi.h>
#include <linux/elf.h>
@@ -73,8 +73,7 @@
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
{
unsigned long pfn = PHYS_PFN(paddr);
@@ -118,13 +117,13 @@
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(¤t->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -136,13 +135,13 @@
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
- down_write(¤t->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
}
}
@@ -208,6 +207,7 @@
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -215,7 +215,10 @@
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{
- pud = pud_alloc(&init_mm, pgd, address);
+ p4d = p4d_alloc(&init_mm, pgd, address);
+ if (!p4d)
+ goto out;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
goto out;
pmd = pmd_alloc(&init_mm, pud, address);
@@ -382,6 +385,7 @@
do {
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -392,7 +396,13 @@
continue;
}
- pud = pud_offset(pgd, end_address);
+ p4d = p4d_offset(pgd, end_address);
+ if (p4d_none(*p4d)) {
+ end_address += P4D_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(p4d, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
@@ -430,6 +440,7 @@
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -444,12 +455,20 @@
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd)) {
+ p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!p4d)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, p4d);
+ }
+ p4d = p4d_offset(pgd, address);
+
+ if (p4d_none(*p4d)) {
pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pud)
goto err_alloc;
- pgd_populate(&init_mm, pgd, pud);
+ p4d_populate(&init_mm, p4d, pud);
}
- pud = pud_offset(pgd, address);
+ pud = pud_offset(p4d, address);
if (pud_none(*pud)) {
pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
@@ -517,18 +536,18 @@
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start),
- MEMINIT_EARLY, NULL);
+ args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0;
}
void __meminit
-memmap_init (unsigned long size, int nid, unsigned long zone,
+arch_memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn,
- MEMINIT_EARLY, NULL);
+ memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
struct memmap_init_callback_data args;
@@ -543,6 +562,10 @@
}
}
+void __init memmap_init(void)
+{
+}
+
int
ia64_pfn_valid (unsigned long pfn)
{
@@ -670,13 +693,16 @@
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size,
- struct mhp_restrictions *restrictions)
+ struct mhp_params *params)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
+ if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
+ return -EINVAL;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 0c0de2c..55fd3eb 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/meminit.h>
@@ -99,14 +100,14 @@
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_nocache (unsigned long phys_addr, unsigned long size)
+ioremap_uc(unsigned long phys_addr, unsigned long size)
{
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
return __ioremap_uc(phys_addr);
}
-EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(ioremap_uc);
void
early_iounmap (volatile void __iomem *addr, unsigned long size)
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 5e1015e..f349642 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -106,7 +106,5 @@
return 0;
return nid;
}
-
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#endif
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 72cc568..135b513 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -27,7 +27,6 @@
#include <asm/delay.h>
#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
@@ -369,7 +368,7 @@
void ia64_tlb_init(void)
{
- ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
+ ia64_ptce_info_t ptce_info;
u64 tr_pgbits;
long status;
pal_vm_info_1_u_t vm_info_1;