Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dd90c97..0e7316a 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,19 +11,19 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index a950a22..cac7404 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
index 9439ff0..5698fca 100644
--- a/include/asm-generic/mmiowb.h
+++ b/include/asm-generic/mmiowb.h
@@ -27,7 +27,7 @@
#include <asm/smp.h>
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
-#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
#else
#define __mmiowb_state() arch_mmiowb_state()
#endif /* arch_mmiowb_state */
@@ -35,7 +35,9 @@
static inline void mmiowb_set_pending(void)
{
struct mmiowb_state *ms = __mmiowb_state();
- ms->mmiowb_pending = ms->nesting_count;
+
+ if (likely(ms->nesting_count))
+ ms->mmiowb_pending = ms->nesting_count;
}
static inline void mmiowb_spin_lock(void)
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 18d8e2d..53759d2 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -163,7 +163,7 @@
return nr_bank;
}
-void hyperv_report_panic(struct pt_regs *regs, long err);
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8186918..15f7736 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1159,8 +1159,17 @@
#endif /* !__ASSEMBLY__ */
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
+#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+/*
+ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
+ * with physical address space extension, but falls back to
+ * BITS_PER_LONG otherwise.
+ */
+#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
#endif
#ifndef has_transparent_hugepage
@@ -1171,6 +1180,16 @@
#endif
#endif
+#ifndef p4d_offset_lockless
+#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
+#endif
+#ifndef pud_offset_lockless
+#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
+#endif
+#ifndef pmd_offset_lockless
+#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
+#endif
+
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d1779d4..66397ed 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -53,6 +53,9 @@
/* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[];
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 04c0644..c716ea8 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -137,13 +137,6 @@
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
- * HAVE_RCU_TABLE_NO_INVALIDATE
- *
- * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- * freeing the page-table pages. This can be avoided if you use
- * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- * page-tables natively.
- *
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
@@ -189,8 +182,23 @@
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
#endif
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
+#endif
+
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 2388737..5aa8705 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,7 @@
#ifdef CONFIG_NEED_MULTIPLE_NODES
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#endif
#ifndef pcibus_to_node
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index ce41032..cec543d 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -12,9 +12,9 @@
#endif /* __arch_get_k_vdso_data */
#ifndef __arch_update_vdso_data
-static __always_inline int __arch_update_vdso_data(void)
+static __always_inline bool __arch_update_vdso_data(void)
{
- return 0;
+ return true;
}
#endif /* __arch_update_vdso_data */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dae6460..8a5ced9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -306,7 +306,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data..page_aligned)
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@@ -339,6 +340,7 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
@@ -394,7 +396,7 @@
} \
\
/* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \
@@ -496,10 +498,12 @@
__start___modver = .; \
KEEP(*(__modver)) \
__stop___modver = .; \
- . = ALIGN((align)); \
- __end_rodata = .; \
} \
- . = ALIGN((align));
+ \
+ BTF \
+ \
+ . = ALIGN((align)); \
+ __end_rodata = .;
/* RODATA & RO_DATA provided for backward compatibility.
* All archs are supposed to use RO_DATA() */
@@ -507,6 +511,15 @@
#define RO_DATA(align) RO_DATA_SECTION(align)
/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
+/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
*
@@ -516,9 +529,14 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
@@ -589,6 +607,20 @@
}
/*
+ * .BTF
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+#define BTF \
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
+ __start_BTF = .; \
+ KEEP(*(.BTF)) \
+ __stop_BTF = .; \
+ }
+#else
+#define BTF
+#endif
+
+/*
* Init task
*/
#define INIT_TASK_DATA_SECTION(align) \
@@ -679,7 +711,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \
@@ -723,8 +757,13 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
- .debug_addr 0 : { *(.debug_addr) }
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
/* Stabs debugging sections. */
#define STABS_DEBUG \
@@ -844,6 +883,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
+ *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else