Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0804a6a..15af4dd 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -18,7 +18,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
#include <linux/swiotlb.h>
@@ -30,6 +30,7 @@
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -91,18 +92,6 @@
*/
phys_addr_t arm_dma_limit;
unsigned long arm_dma_pfn_limit;
-
-static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
- unsigned long dma_size)
-{
- if (size[0] <= dma_size)
- return;
-
- size[ZONE_NORMAL] = size[0] - dma_size;
- size[ZONE_DMA] = dma_size;
- hole[ZONE_NORMAL] = hole[0];
- hole[ZONE_DMA] = 0;
-}
#endif
void __init setup_dma_zone(const struct machine_desc *mdesc)
@@ -120,67 +109,38 @@
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- struct memblock_region *reg;
-
- /*
- * initialise the zones.
- */
- memset(zone_size, 0, sizeof(zone_size));
-
- /*
- * The memory size has already been determined. If we need
- * to do anything fancy with the allocation of this memory
- * to the zones, now is the time to do it.
- */
- zone_size[0] = max_low - min;
-#ifdef CONFIG_HIGHMEM
- zone_size[ZONE_HIGHMEM] = max_high - max_low;
-#endif
-
- /*
- * Calculate the size of the holes.
- * holes = node_size - sum(bank_sizes)
- */
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_memblock(memory, reg) {
- unsigned long start = memblock_region_memory_base_pfn(reg);
- unsigned long end = memblock_region_memory_end_pfn(reg);
-
- if (start < max_low) {
- unsigned long low_end = min(end, max_low);
- zhole_size[0] -= low_end - start;
- }
-#ifdef CONFIG_HIGHMEM
- if (end > max_low) {
- unsigned long high_start = max(start, max_low);
- zhole_size[ZONE_HIGHMEM] -= end - high_start;
- }
-#endif
- }
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
#ifdef CONFIG_ZONE_DMA
- /*
- * Adjust the sizes according to any special requirements for
- * this machine type.
- */
- if (arm_dma_zone_size)
- arm_adjust_dma_zone(zone_size, zhole_size,
- arm_dma_zone_size >> PAGE_SHIFT);
+ max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
#endif
-
- free_area_init_node(0, zone_size, min, zhole_size);
+ max_zone_pfn[ZONE_NORMAL] = max_low;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfn[ZONE_HIGHMEM] = max_high;
+#endif
+ free_area_init(max_zone_pfn);
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = __pfn_to_phys(pfn);
+ unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
if (__phys_to_pfn(addr) != pfn)
return 0;
- return memblock_is_map_memory(__pfn_to_phys(pfn));
+ /*
+ * If address less than pageblock_size bytes away from a present
+ * memory chunk there still will be a memory map entry for it
+ * because we round freed memory map to the pageblock boundaries.
+ */
+ if (memblock_overlaps_region(&memblock.memory,
+ ALIGN_DOWN(addr, pageblock_size),
+ pageblock_size))
+ return 1;
+
+ return 0;
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -293,18 +253,13 @@
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
- * Sparsemem tries to allocate bootmem in memory_present(),
- * so must be done after the fixed reservations
- */
- memblocks_present();
-
- /*
- * sparse_init() needs the bootmem allocator up and running.
+ * sparse_init() tries to allocate memory from memblock, so must be
+ * done after the fixed reservations
*/
sparse_init();
/*
- * Now free the memory - free_area_init_node needs
+ * Now free the memory - free_area_init needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
@@ -354,16 +309,14 @@
*/
static void __init free_unused_memmap(void)
{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
+ unsigned long start, end, prev_end = 0;
+ int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
- for_each_memblock(memory, reg) {
- start = memblock_region_memory_base_pfn(reg);
-
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist
@@ -371,15 +324,15 @@
*/
start = min(start,
ALIGN(prev_end, PAGES_PER_SECTION));
-#else
- /*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
- */
- start = round_down(start, MAX_ORDER_NR_PAGES);
#endif
/*
+ * Align down here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
+ */
+ start = round_down(start, pageblock_nr_pages);
+
+ /*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
@@ -387,76 +340,45 @@
free_memmap(prev_end, start);
/*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align up here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
- MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, pageblock_nr_pages);
}
#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
+ if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
+ prev_end = ALIGN(end, pageblock_nr_pages);
free_memmap(prev_end,
ALIGN(prev_end, PAGES_PER_SECTION));
+ }
#endif
}
-#ifdef CONFIG_HIGHMEM
-static inline void free_area_high(unsigned long pfn, unsigned long end)
-{
- for (; pfn < end; pfn++)
- free_highmem_page(pfn_to_page(pfn));
-}
-#endif
-
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
- struct memblock_region *mem, *res;
+ phys_addr_t range_start, range_end;
+ u64 i;
/* set highmem page free */
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &range_start, &range_end, NULL) {
+ unsigned long start = PFN_UP(range_start);
+ unsigned long end = PFN_DOWN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
- if (memblock_is_nomap(mem))
- continue;
-
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
- /* Find and exclude any reserved regions */
- for_each_memblock(reserved, res) {
- unsigned long res_start, res_end;
-
- res_start = memblock_region_reserved_base_pfn(res);
- res_end = memblock_region_reserved_end_pfn(res);
-
- if (res_end < start)
- continue;
- if (res_start < start)
- res_start = start;
- if (res_start > end)
- res_start = end;
- if (res_end > end)
- res_end = end;
- if (res_start != start)
- free_area_high(start, res_start);
- start = res_end;
- if (start == end)
- break;
- }
-
- /* And now free anything which remains */
- if (start < end)
- free_area_high(start, end);
+ for (; start < end; start++)
+ free_highmem_page(pfn_to_page(start));
}
#endif
}
@@ -469,7 +391,11 @@
void __init mem_init(void)
{
#ifdef CONFIG_ARM_LPAE
- swiotlb_init(1);
+ if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > arm_dma_pfn_limit)
+ swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
#endif
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
@@ -569,7 +495,7 @@
{
pmd_t *pmd;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+ pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
@@ -592,8 +518,8 @@
return !!(get_cr() & CR_XP);
}
-void set_section_perms(struct section_perm *perms, int n, bool set,
- struct mm_struct *mm)
+static void set_section_perms(struct section_perm *perms, int n, bool set,
+ struct mm_struct *mm)
{
size_t i;
unsigned long addr;