Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 3b2a1e7..116d0bd 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -7,6 +7,7 @@
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/irqflags.h>
#include <linux/export.h>
@@ -19,12 +20,11 @@
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
@@ -41,12 +41,11 @@
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
@@ -63,12 +62,11 @@
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
@@ -78,32 +76,6 @@
/**
- * __mips_test_and_set_bit - Set a bit and return its old value. This is
- * called by test_and_set_bit() if it cannot find a faster solution.
- * @nr: Bit to set
- * @addr: Address to count from
- */
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
- unsigned long mask;
- unsigned long flags;
- int res;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a) != 0;
- *a |= mask;
- raw_local_irq_restore(flags);
- return res;
-}
-EXPORT_SYMBOL(__mips_test_and_set_bit);
-
-
-/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
@@ -112,13 +84,12 @@
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
@@ -137,13 +108,12 @@
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
@@ -162,13 +132,12 @@
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 2ff84f4..a46db08 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -279,7 +279,8 @@
#endif
/* odd buffer alignment? */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
@@ -307,8 +308,8 @@
/*
* checksum and copy routines based on memcpy.S
*
- * csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_kernel(src, dst, len, sum, errp)
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -317,26 +318,11 @@
#define src a0
#define dst a1
#define len a2
-#define psum a3
#define sum v0
#define odd t8
-#define errptr t9
/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- * not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
*/
/* Instruction type */
@@ -357,11 +343,11 @@
* addr : Address
* handler : Exception handler
*/
-#define EXC(insn, type, reg, addr, handler) \
+#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -370,7 +356,7 @@
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@@ -383,14 +369,14 @@
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
-#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -403,14 +389,14 @@
#else
#define LOADK lw /* No exception */
-#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -449,22 +435,9 @@
.set at=v1
#endif
- .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
- PTR_ADDU AT, src, len /* See (1) above. */
- /* initialize __nocheck if this the first time we execute this
- * macro
- */
-#ifdef CONFIG_64BIT
- move errptr, a4
-#else
- lw errptr, 16(sp)
-#endif
- .if \__nocheck == 1
- FEXPORT(csum_partial_copy_nocheck)
- EXPORT_SYMBOL(csum_partial_copy_nocheck)
- .endif
- move sum, zero
+ li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@@ -496,31 +469,31 @@
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
- LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
- LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
- LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
- LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
- STORE(t4, UNIT(4)(dst), .Ls_exc\@)
+ STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
- STORE(t5, UNIT(5)(dst), .Ls_exc\@)
+ STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
- STORE(t6, UNIT(6)(dst), .Ls_exc\@)
+ STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
- STORE(t7, UNIT(7)(dst), .Ls_exc\@)
+ STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -540,19 +513,19 @@
/*
* len >= 4*NBYTES
*/
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -565,10 +538,10 @@
beq rem, len, .Lcopy_bytes\@
nop
1:
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -591,10 +564,10 @@
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
- STREST(t0, -1(t1), .Ls_exc\@)
+ STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@@ -611,12 +584,12 @@
* Set match = (src and dst have same alignment)
*/
#define match rem
- LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
- LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
- STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -638,26 +611,26 @@
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
- LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
- LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
- LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
- LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
- LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -670,11 +643,11 @@
beq rem, len, .Lcopy_bytes\@
nop
1:
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -695,11 +668,10 @@
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
- LOADBU(t0, N(src), .Ll_exc_copy\@); \
+ LOADBU(t0, N(src)); \
SUB len, len, 1; \
- STOREB(t0, N(dst), .Ls_exc\@); \
+ STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@@ -713,9 +685,9 @@
COPY_BYTE(4)
COPY_BYTE(5)
#endif
- LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+ LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
- STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+ STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@@ -732,7 +704,8 @@
addu sum, v1
#endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
@@ -751,97 +724,31 @@
#endif
.set pop
.set reorder
- ADDC32(sum, psum)
jr ra
.set noreorder
-
-.Ll_exc_copy\@:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOADK t0, TI_TASK($28)
- li t2, SHIFT_START
- LOADK t0, THREAD_BUADDR(t0)
-1:
- LOADBU(t1, 0(src), .Ll_exc\@)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- SLLV t1, t1, t2
- addu t2, SHIFT_INC
- ADDC(sum, t1)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc\@:
- LOADK t0, TI_TASK($28)
- nop
- LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- .set push
- .set noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
-#endif
- li v1, -EFAULT
- b .Ldone\@
- sw v1, (errptr)
-
-.Ls_exc\@:
- li v0, -1 /* invalid checksum */
- li v1, -EFAULT
- jr ra
- sw v1, (errptr)
- .set pop
.endm
-LEAF(__csum_partial_copy_kernel)
-EXPORT_SYMBOL(__csum_partial_copy_kernel)
+ .set noreorder
+.L_exc:
+ jr ra
+ li v0, 0
+
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
-END(__csum_partial_copy_kernel)
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 68c495e..2e8dfc1 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -24,6 +24,8 @@
#define GCC_DADDI_IMM_ASM() "r"
#endif
+#ifndef CONFIG_HAVE_PLAT_DELAY
+
void __delay(unsigned long loops)
{
__asm__ __volatile__ (
@@ -63,3 +65,5 @@
__delay((ns * 0x00000005ull * HZ * lpj) >> 32);
}
EXPORT_SYMBOL(__ndelay);
+
+#endif
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 83ed372..4256423 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
@@ -80,7 +79,7 @@
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(¤t_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
- unsigned long uninitialized_var(s_mmid);
+ unsigned long s_mmid;
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index cdd19d8..88065ee 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -301,14 +301,14 @@
and t0, src, ADDRMASK
PREFS( 0, 2*32(src) )
PREFD( 1, 2*32(dst) )
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
bnez t1, .Ldst_unaligned\@
nop
bnez t0, .Lsrc_unaligned_dst_aligned\@
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
or t0, t0, t1
bnez t0, .Lcopy_unaligned_bytes\@
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
@@ -389,7 +389,7 @@
bne rem, len, 1b
.set noreorder
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
@@ -491,7 +491,7 @@
bne len, rem, 1b
.set noreorder
-#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
.Lcopy_bytes_checklen\@:
beqz len, .Ldone\@
nop
@@ -520,7 +520,7 @@
jr ra
nop
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
.Lcopy_unaligned_bytes\@:
1:
COPY_BYTE(0)
@@ -534,7 +534,7 @@
ADD src, src, 8
b 1b
ADD dst, dst, 8
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
.if __memcpy == 1
END(memcpy)
.set __memcpy, 0
@@ -598,6 +598,7 @@
nop
.endm
+#ifndef CONFIG_HAVE_PLAT_MEMCPY
.align 5
LEAF(memmove)
EXPORT_SYMBOL(memmove)
@@ -665,6 +666,8 @@
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+#endif
+
#ifdef CONFIG_EVA
/*
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 418611e..d5449e8 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -115,7 +115,7 @@
#endif
.set reorder
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
@@ -125,7 +125,7 @@
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define STORE_BYTE(N) \
EX(sb, a1, N(a0), .Lbyte_fixup\@); \
.set noreorder; \
@@ -150,7 +150,7 @@
ori a0, STORMASK
xori a0, STORMASK
PTR_ADDIU a0, STORSIZE
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
andi t0, a2, 0x40-STORSIZE
@@ -185,7 +185,7 @@
.set noreorder
beqz a2, 1f
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
PTR_ADDU a0, a2 /* What's left */
.set reorder
R10KCBARRIER(0(ra))
@@ -194,7 +194,7 @@
#else
EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
#endif
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
PTR_SUBU t0, $0, a2
.set reorder
move a2, zero /* No remaining longs */
@@ -211,7 +211,7 @@
EX(sb, a1, 6(a0), .Lbyte_fixup\@)
#endif
0:
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
1: move a2, zero
jr ra
@@ -234,7 +234,7 @@
.hidden __memset
.endif
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
.Lbyte_fixup\@:
/*
* unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
@@ -243,7 +243,7 @@
PTR_SUBU a2, t0
PTR_ADDIU a2, 1
jr ra
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
.Lfirst_fixup\@:
/* unset_bytes already in a2 */
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 57497a2..a9b72ea 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
#include <linux/export.h>
#include <linux/stringify.h>
-#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
+#if !defined(CONFIG_CPU_HAS_DIEI)
/*
* For cli() we have to insert nops to make sure that the new value
@@ -110,4 +110,4 @@
}
EXPORT_SYMBOL(arch_local_irq_restore);
-#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_DIEI */
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index b97d9c5..10b4bf7 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
extern int r3k_have_wired_reg;