v4.19.13 snapshot.
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
new file mode 100644
index 0000000..f2dac4d
--- /dev/null
+++ b/arch/parisc/lib/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for parisc-specific library files
+#
+
+lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
+	   ucmpdi2.o delay.o
+
+obj-y	:= iomap.o
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
new file mode 100644
index 0000000..70ffbcf
--- /dev/null
+++ b/arch/parisc/lib/bitops.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bitops.c: atomic operations which got too long to be inlined all over
+ *      the place.
+ * 
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SMP
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+	[0 ... (ATOMIC_HASH_SIZE-1)]  = __ARCH_SPIN_LOCK_UNLOCKED
+};
+#endif
+
+#ifdef CONFIG_64BIT
+unsigned long __xchg64(unsigned long x, unsigned long *ptr)
+{
+	unsigned long temp, flags;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = *ptr;
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return temp;
+}
+#endif
+
+unsigned long __xchg32(int x, int *ptr)
+{
+	unsigned long flags;
+	long temp;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = (long) *ptr;	/* XXX - sign extension wanted? */
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)temp;
+}
+
+
+unsigned long __xchg8(char x, char *ptr)
+{
+	unsigned long flags;
+	long temp;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = (long) *ptr;	/* XXX - sign extension wanted? */
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)temp;
+}
+
+
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
+{
+	unsigned long flags;
+	u64 prev;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return prev;
+}
+
+unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
+{
+	unsigned long flags;
+	unsigned int prev;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)prev;
+}
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
new file mode 100644
index 0000000..ba6384d
--- /dev/null
+++ b/arch/parisc/lib/checksum.c
@@ -0,0 +1,149 @@
+/*
+ * INET		An implementation of the TCP/IP protocol suite for the LINUX
+ *		operating system.  INET is implemented using the  BSD Socket
+ *		interface as the means of communication with the user level.
+ *
+ *		MIPS specific IP/TCP/UDP checksumming routines
+ *
+ * Authors:	Ralf Baechle, <ralf@waldorf-gmbh.de>
+ *		Lots of code moved from tcp.c and ip.c; see those files
+ *		for more names.
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/string.h>
+#include <linux/uaccess.h>
+
+#define addc(_t,_r)                     \
+	__asm__ __volatile__ (          \
+"       add             %0, %1, %0\n"   \
+"       addc            %0, %%r0, %0\n" \
+	: "=r"(_t)                      \
+	: "r"(_r), "0"(_t));
+
+static inline unsigned short from32to16(unsigned int x)
+{
+	/* 32 bits --> 16 bits + carry */
+	x = (x & 0xffff) + (x >> 16);
+	/* 16 bits + carry --> 16 bits including carry */
+	x = (x & 0xffff) + (x >> 16);
+	return (unsigned short)x;
+}
+
+static inline unsigned int do_csum(const unsigned char * buff, int len)
+{
+	int odd, count;
+	unsigned int result = 0;
+
+	if (len <= 0)
+		goto out;
+	odd = 1 & (unsigned long) buff;
+	if (odd) {
+		result = be16_to_cpu(*buff);
+		len--;
+		buff++;
+	}
+	count = len >> 1;		/* nr of 16-bit words.. */
+	if (count) {
+		if (2 & (unsigned long) buff) {
+			result += *(unsigned short *) buff;
+			count--;
+			len -= 2;
+			buff += 2;
+		}
+		count >>= 1;		/* nr of 32-bit words.. */
+		if (count) {
+			while (count >= 4) {
+				unsigned int r1, r2, r3, r4;
+				r1 = *(unsigned int *)(buff + 0);
+				r2 = *(unsigned int *)(buff + 4);
+				r3 = *(unsigned int *)(buff + 8);
+				r4 = *(unsigned int *)(buff + 12);
+				addc(result, r1);
+				addc(result, r2);
+				addc(result, r3);
+				addc(result, r4);
+				count -= 4;
+				buff += 16;
+			}
+			while (count) {
+				unsigned int w = *(unsigned int *) buff;
+				count--;
+				buff += 4;
+				addc(result, w);
+			}
+			result = (result & 0xffff) + (result >> 16);
+		}
+		if (len & 2) {
+			result += *(unsigned short *) buff;
+			buff += 2;
+		}
+	}
+	if (len & 1)
+		result += le16_to_cpu(*buff);
+	result = from32to16(result);
+	if (odd)
+		result = swab16(result);
+out:
+	return result;
+}
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+	unsigned int result = do_csum(buff, len);
+	addc(result, sum);
+	return (__force __wsum)from32to16(result);
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * copy while checksumming, otherwise like csum_partial
+ */
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
+{
+	/*
+	 * It's 2:30 am and I don't feel like doing it real ...
+	 * This is lots slower than the real thing (tm)
+	 */
+	sum = csum_partial(src, len, sum);
+	memcpy(dst, src, len);
+
+	return sum;
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+/*
+ * Copy from userspace and compute checksum.  If we catch an exception
+ * then zero the rest of the buffer.
+ */
+__wsum csum_partial_copy_from_user(const void __user *src,
+					void *dst, int len,
+					__wsum sum, int *err_ptr)
+{
+	int missing;
+
+	missing = copy_from_user(dst, src, len);
+	if (missing) {
+		memset(dst + len - missing, 0, missing);
+		*err_ptr = -EFAULT;
+	}
+		
+	return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
new file mode 100644
index 0000000..66e5065
--- /dev/null
+++ b/arch/parisc/lib/delay.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *	Precise Delay Loops for parisc
+ *
+ *	based on code by:
+ *	Copyright (C) 1993 Linus Torvalds
+ *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
+ *
+ *	parisc implementation:
+ *	Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ */
+
+
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/init.h>
+
+#include <asm/delay.h>
+#include <asm/special_insns.h>    /* for mfctl() */
+#include <asm/processor.h> /* for boot_cpu_data */
+
+/* CR16 based delay: */
+static void __cr16_delay(unsigned long __loops)
+{
+	/*
+	 * Note: Due to unsigned math, cr16 rollovers shouldn't be
+	 * a problem here. However, on 32 bit, we need to make sure
+	 * we don't pass in too big a value. The current default
+	 * value of MAX_UDELAY_MS should help prevent this.
+	 */
+	u32 bclock, now, loops = __loops;
+	int cpu;
+
+	preempt_disable();
+	cpu = smp_processor_id();
+	bclock = mfctl(16);
+	for (;;) {
+		now = mfctl(16);
+		if ((now - bclock) >= loops)
+			break;
+
+		/* Allow RT tasks to run */
+		preempt_enable();
+		asm volatile("	nop\n");
+		barrier();
+		preempt_disable();
+
+		/*
+		 * It is possible that we moved to another CPU, and
+		 * since CR16's are per-cpu we need to calculate
+		 * that. The delay must guarantee that we wait "at
+		 * least" the amount of time. Being moved to another
+		 * CPU could make the wait longer but we just need to
+		 * make sure we waited long enough. Rebalance the
+		 * counter for this CPU.
+		 */
+		if (unlikely(cpu != smp_processor_id())) {
+			loops -= (now - bclock);
+			cpu = smp_processor_id();
+			bclock = mfctl(16);
+		}
+	}
+	preempt_enable();
+}
+
+
+void __udelay(unsigned long usecs)
+{
+	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c
new file mode 100644
index 0000000..7c00496
--- /dev/null
+++ b/arch/parisc/lib/io.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/parisc/lib/io.c
+ *
+ * Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
+ * Copyright (c) Randolph Chung 2001 <tausq@debian.org>
+ *
+ * IO accessing functions which shouldn't be inlined because they're too big
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Copies a block of memory to a device in an efficient manner.
+ * Assumes the device can cope with 32-bit transfers.  If it can't,
+ * don't use this function.
+ */
+void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+	if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
+		goto bytecopy;
+	while ((unsigned long)dst & 3) {
+		writeb(*(char *)src, dst++);
+		src++;
+		count--;
+	}
+	while (count > 3) {
+		__raw_writel(*(u32 *)src, dst);
+		src += 4;
+		dst += 4;
+		count -= 4;
+	}
+ bytecopy:
+	while (count--) {
+		writeb(*(char *)src, dst++);
+		src++;
+	}
+}
+
+/*
+** Copies a block of memory from a device in an efficient manner.
+** Assumes the device can cope with 32-bit transfers.  If it can't,
+** don't use this function.
+**
+** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
+**	27341/64    = 427 cyc per int
+**	61311/128   = 478 cyc per short
+**	122637/256  = 479 cyc per byte
+** Ergo bus latencies dominant (not transfer size).
+**      Minimize total number of transfers at cost of CPU cycles.
+**	TODO: only look at src alignment and adjust the stores to dest.
+*/
+void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+	/* first compare alignment of src/dst */ 
+	if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
+		goto bytecopy;
+
+	if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
+		goto shortcopy;
+
+	/* Then check for misaligned start address */
+	if ((unsigned long)src & 1) {
+		*(u8 *)dst = readb(src);
+		src++;
+		dst++;
+		count--;
+		if (count < 2) goto bytecopy;
+	}
+
+	if ((unsigned long)src & 2) {
+		*(u16 *)dst = __raw_readw(src);
+		src += 2;
+		dst += 2;
+		count -= 2;
+	}
+
+	while (count > 3) {
+		*(u32 *)dst = __raw_readl(src);
+		dst += 4;
+		src += 4;
+		count -= 4;
+	}
+
+ shortcopy:
+	while (count > 1) {
+		*(u16 *)dst = __raw_readw(src);
+		src += 2;
+		dst += 2;
+		count -= 2;
+	}
+
+ bytecopy:
+	while (count--) {
+		*(char *)dst = readb(src);
+		src++;
+		dst++;
+	}
+}
+
+/* Sets a block of memory on a device to a given value.
+ * Assumes the device can cope with 32-bit transfers.  If it can't,
+ * don't use this function.
+ */
+void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+	u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
+	while ((unsigned long)addr & 3) {
+		writeb(val, addr++);
+		count--;
+	}
+	while (count > 3) {
+		__raw_writel(val32, addr);
+		addr += 4;
+		count -= 4;
+	}
+	while (count--) {
+		writeb(val, addr++);
+	}
+}
+
+/*
+ * Read COUNT 8-bit bytes from port PORT into memory starting at
+ * SRC.
+ */
+void insb (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+
+	while (((unsigned long)p) & 0x3) {
+		if (!count)
+			return;
+		count--;
+		*p = inb(port);
+		p++;
+	}
+
+	while (count >= 4) {
+		unsigned int w;
+		count -= 4;
+		w = inb(port) << 24;
+		w |= inb(port) << 16;
+		w |= inb(port) << 8;
+		w |= inb(port);
+		*(unsigned int *) p = w;
+		p += 4;
+	}
+
+	while (count) {
+		--count;
+		*p = inb(port);
+		p++;
+	}
+}
+
+
+/*
+ * Read COUNT 16-bit words from port PORT into memory starting at
+ * SRC.  SRC must be at least short aligned.  This is used by the
+ * IDE driver to read disk sectors.  Performance is important, but
+ * the interfaces seems to be slow: just using the inlined version
+ * of the inw() breaks things.
+ */
+void insw (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count>=2) {
+			
+			count -= 2;
+			l = cpu_to_le16(inw(port)) << 16;
+			l |= cpu_to_le16(inw(port));
+			*(unsigned int *)p = l;
+			p += 4;
+		}
+		if (count) {
+			*(unsigned short *)p = cpu_to_le16(inw(port));
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		*(unsigned short *)p = cpu_to_le16(inw(port));
+		p += 2;
+		count--;
+		while (count>=2) {
+			
+			count -= 2;
+			l = cpu_to_le16(inw(port)) << 16;
+			l |= cpu_to_le16(inw(port));
+			*(unsigned int *)p = l;
+			p += 4;
+		}
+		if (count) {
+			*(unsigned short *)p = cpu_to_le16(inw(port));
+		}
+		break;
+		
+	 case 0x01:			/* Buffer 8-bit aligned */
+	 case 0x03:
+		/* I don't bother with 32bit transfers
+		 * in this case, 16bit will have to do -- DE */
+		--count;
+		
+		l = cpu_to_le16(inw(port));
+		*p = l >> 8;
+		p++;
+		while (count--)
+		{
+			l2 = cpu_to_le16(inw(port));
+			*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
+			p += 2;
+			l = l2;
+		}
+		*p = l & 0xff;
+		break;
+	}
+}
+
+
+
+/*
+ * Read COUNT 32-bit words from port PORT into memory starting at
+ * SRC. Now works with any alignment in SRC. Performance is important,
+ * but the interfaces seems to be slow: just using the inlined version
+ * of the inl() breaks things.
+ */
+void insl (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long) dst) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count--)
+		{
+			*(unsigned int *)p = cpu_to_le32(inl(port));
+			p += 4;
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*(unsigned short *)p = l >> 16;
+		p += 2;
+		
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
+			p += 4;
+			l = l2;
+		}
+		*(unsigned short *)p = l & 0xffff;
+		break;
+	 case 0x01:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*(unsigned char *)p = l >> 24;
+		p++;
+		*(unsigned short *)p = (l >> 8) & 0xffff;
+		p += 2;
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
+			p += 4;
+			l = l2;
+		}
+		*p = l & 0xff;
+		break;
+	 case 0x03:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*p = l >> 24;
+		p++;
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
+			p += 4;
+			l = l2;
+		}
+		*(unsigned short *)p = (l >> 8) & 0xffff;
+		p += 2;
+		*p = l & 0xff;
+		break;
+	}
+}
+
+
+/*
+ * Like insb but in the opposite direction.
+ * Don't worry as much about doing aligned memory transfers:
+ * doing byte reads the "slow" way isn't nearly as slow as
+ * doing byte writes the slow way (no r-m-w cycle).
+ */
+void outsb(unsigned long port, const void * src, unsigned long count)
+{
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	while (count) {
+		count--;
+		outb(*p, port);
+		p++;
+	}
+}
+
+/*
+ * Like insw but in the opposite direction.  This is used by the IDE
+ * driver to write disk sectors.  Performance is important, but the
+ * interfaces seems to be slow: just using the inlined version of the
+ * outw() breaks things.
+ */
+void outsw (unsigned long port, const void *src, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count>=2) {
+			count -= 2;
+			l = *(unsigned int *)p;
+			p += 4;
+			outw(le16_to_cpu(l >> 16), port);
+			outw(le16_to_cpu(l & 0xffff), port);
+		}
+		if (count) {
+			outw(le16_to_cpu(*(unsigned short*)p), port);
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		
+		outw(le16_to_cpu(*(unsigned short*)p), port);
+		p += 2;
+		count--;
+		
+		while (count>=2) {
+			count -= 2;
+			l = *(unsigned int *)p;
+			p += 4;
+			outw(le16_to_cpu(l >> 16), port);
+			outw(le16_to_cpu(l & 0xffff), port);
+		}
+		if (count) {
+			outw(le16_to_cpu(*(unsigned short *)p), port);
+		}
+		break;
+		
+	 case 0x01:			/* Buffer 8-bit aligned */	
+		/* I don't bother with 32bit transfers
+		 * in this case, 16bit will have to do -- DE */
+		
+		l  = *p << 8;
+		p++;
+		count--;
+		while (count)
+		{
+			count--;
+			l2 = *(unsigned short *)p;
+			p += 2;
+			outw(le16_to_cpu(l | l2 >> 8), port);
+		        l = l2 << 8;
+		}
+		l2 = *(unsigned char *)p;
+		outw (le16_to_cpu(l | l2>>8), port);
+		break;
+	
+	}
+}
+
+
+/*
+ * Like insl but in the opposite direction.  This is used by the IDE
+ * driver to write disk sectors.  Works with any alignment in SRC.
+ *  Performance is important, but the interfaces seems to be slow:
+ * just using the inlined version of the outl() breaks things.
+ */
+void outsl (unsigned long port, const void *src, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count--)
+		{
+			outl(le32_to_cpu(*(unsigned int *)p), port);
+			p += 4;
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		--count;
+		
+		l = *(unsigned short *)p;
+		p += 2;
+		
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l << 16 | l2 >> 16), port);
+			l = l2;
+		}
+		l2 = *(unsigned short *)p;
+		outl (le32_to_cpu(l << 16 | l2), port);
+		break;
+	 case 0x01:			/* Buffer 8-bit aligned */
+		--count;
+
+		l = *p << 24;
+		p++;
+		l |= *(unsigned short *)p << 8;
+		p += 2;
+
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l | l2 >> 24), port);
+			l = l2 << 8;
+		}
+		l2 = *p;
+		outl (le32_to_cpu(l | l2), port);
+		break;
+	 case 0x03:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = *p << 24;
+		p++;
+
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l | l2 >> 8), port);
+			l = l2 << 24;
+		}
+		l2 = *(unsigned short *)p << 16;
+		p += 2;
+		l2 |= *p;
+		outl (le32_to_cpu(l | l2), port);
+		break;
+	}
+}
+
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
new file mode 100644
index 0000000..4b19e6e
--- /dev/null
+++ b/arch/parisc/lib/iomap.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * iomap.c - Implement iomap interface for PA-RISC
+ * Copyright (c) 2004 Matthew Wilcox
+ */
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/export.h>
+#include <asm/io.h>
+
+/*
+ * The iomap space on 32-bit PA-RISC is intended to look like this:
+ * 00000000-7fffffff virtual mapped IO
+ * 80000000-8fffffff ISA/EISA port space that can't be virtually mapped
+ * 90000000-9fffffff Dino port space
+ * a0000000-afffffff Astro port space
+ * b0000000-bfffffff PAT port space
+ * c0000000-cfffffff non-swapped memory IO
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * For the moment, here's what it looks like:
+ * 80000000-8fffffff All ISA/EISA port space
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * On 64-bit, everything is extended, so:
+ * 8000000000000000-8fffffffffffffff All ISA/EISA port space
+ * f000000000000000-ffffffffffffffff legacy IO memory pointers
+ */
+
+/*
+ * Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END),
+ * but that's slow and we know it'll be within the first 2GB.
+ */
+#ifdef CONFIG_64BIT
+#define INDIRECT_ADDR(addr)	(((unsigned long)(addr) & 1UL<<63) != 0)
+#define ADDR_TO_REGION(addr)    (((unsigned long)addr >> 60) & 7)
+#define IOPORT_MAP_BASE		(8UL << 60)
+#else
+#define INDIRECT_ADDR(addr)     (((unsigned long)(addr) & 1UL<<31) != 0)
+#define ADDR_TO_REGION(addr)    (((unsigned long)addr >> 28) & 7)
+#define IOPORT_MAP_BASE		(8UL << 28)
+#endif
+
+struct iomap_ops {
+	unsigned int (*read8)(void __iomem *);
+	unsigned int (*read16)(void __iomem *);
+	unsigned int (*read16be)(void __iomem *);
+	unsigned int (*read32)(void __iomem *);
+	unsigned int (*read32be)(void __iomem *);
+	void (*write8)(u8, void __iomem *);
+	void (*write16)(u16, void __iomem *);
+	void (*write16be)(u16, void __iomem *);
+	void (*write32)(u32, void __iomem *);
+	void (*write32be)(u32, void __iomem *);
+	void (*read8r)(void __iomem *, void *, unsigned long);
+	void (*read16r)(void __iomem *, void *, unsigned long);
+	void (*read32r)(void __iomem *, void *, unsigned long);
+	void (*write8r)(void __iomem *, const void *, unsigned long);
+	void (*write16r)(void __iomem *, const void *, unsigned long);
+	void (*write32r)(void __iomem *, const void *, unsigned long);
+};
+
+/* Generic ioport ops.  To be replaced later by specific dino/elroy/wax code */
+
+#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
+
+static unsigned int ioport_read8(void __iomem *addr)
+{
+	return inb(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read16(void __iomem *addr)
+{
+	return inw(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read32(void __iomem *addr)
+{
+	return inl(ADDR2PORT(addr));
+}
+
+static void ioport_write8(u8 datum, void __iomem *addr)
+{
+	outb(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write16(u16 datum, void __iomem *addr)
+{
+	outw(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write32(u32 datum, void __iomem *addr)
+{
+	outl(datum, ADDR2PORT(addr));
+}
+
+static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insb(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insw(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insl(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsb(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsw(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsl(ADDR2PORT(addr), s, n);
+}
+
+static const struct iomap_ops ioport_ops = {
+	.read8 = ioport_read8,
+	.read16 = ioport_read16,
+	.read16be = ioport_read16,
+	.read32 = ioport_read32,
+	.read32be = ioport_read32,
+	.write8 = ioport_write8,
+	.write16 = ioport_write16,
+	.write16be = ioport_write16,
+	.write32 = ioport_write32,
+	.write32be = ioport_write32,
+	.read8r = ioport_read8r,
+	.read16r = ioport_read16r,
+	.read32r = ioport_read32r,
+	.write8r = ioport_write8r,
+	.write16r = ioport_write16r,
+	.write32r = ioport_write32r,
+};
+
+/* Legacy I/O memory ops */
+
+static unsigned int iomem_read8(void __iomem *addr)
+{
+	return readb(addr);
+}
+
+static unsigned int iomem_read16(void __iomem *addr)
+{
+	return readw(addr);
+}
+
+static unsigned int iomem_read16be(void __iomem *addr)
+{
+	return __raw_readw(addr);
+}
+
+static unsigned int iomem_read32(void __iomem *addr)
+{
+	return readl(addr);
+}
+
+static unsigned int iomem_read32be(void __iomem *addr)
+{
+	return __raw_readl(addr);
+}
+
+static void iomem_write8(u8 datum, void __iomem *addr)
+{
+	writeb(datum, addr);
+}
+
+static void iomem_write16(u16 datum, void __iomem *addr)
+{
+	writew(datum, addr);
+}
+
+static void iomem_write16be(u16 datum, void __iomem *addr)
+{
+	__raw_writew(datum, addr);
+}
+
+static void iomem_write32(u32 datum, void __iomem *addr)
+{
+	writel(datum, addr);
+}
+
+static void iomem_write32be(u32 datum, void __iomem *addr)
+{
+	__raw_writel(datum, addr);
+}
+
+static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u8 *)dst = __raw_readb(addr);
+		dst++;
+	}
+}
+
+static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u16 *)dst = __raw_readw(addr);
+		dst += 2;
+	}
+}
+
+static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u32 *)dst = __raw_readl(addr);
+		dst += 4;
+	}
+}
+
+static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writeb(*(u8 *)s, addr);
+		s++;
+	}
+}
+
+static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writew(*(u16 *)s, addr);
+		s += 2;
+	}
+}
+
+static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writel(*(u32 *)s, addr);
+		s += 4;
+	}
+}
+
+static const struct iomap_ops iomem_ops = {
+	.read8 = iomem_read8,
+	.read16 = iomem_read16,
+	.read16be = iomem_read16be,
+	.read32 = iomem_read32,
+	.read32be = iomem_read32be,
+	.write8 = iomem_write8,
+	.write16 = iomem_write16,
+	.write16be = iomem_write16be,
+	.write32 = iomem_write32,
+	.write32be = iomem_write32be,
+	.read8r = iomem_read8r,
+	.read16r = iomem_read16r,
+	.read32r = iomem_read32r,
+	.write8r = iomem_write8r,
+	.write16r = iomem_write16r,
+	.write32r = iomem_write32r,
+};
+
+static const struct iomap_ops *iomap_ops[8] = {
+	[0] = &ioport_ops,
+	[7] = &iomem_ops
+};
+
+
+unsigned int ioread8(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
+	return *((u8 *)addr);
+}
+
+unsigned int ioread16(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
+	return le16_to_cpup((u16 *)addr);
+}
+
+unsigned int ioread16be(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
+	return *((u16 *)addr);
+}
+
+unsigned int ioread32(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
+	return le32_to_cpup((u32 *)addr);
+}
+
+unsigned int ioread32be(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
+	return *((u32 *)addr);
+}
+
+void iowrite8(u8 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr);
+	} else {
+		*((u8 *)addr) = datum;
+	}
+}
+
+void iowrite16(u16 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr);
+	} else {
+		*((u16 *)addr) = cpu_to_le16(datum);
+	}
+}
+
+void iowrite16be(u16 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr);
+	} else {
+		*((u16 *)addr) = datum;
+	}
+}
+
+void iowrite32(u32 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr);
+	} else {
+		*((u32 *)addr) = cpu_to_le32(datum);
+	}
+}
+
+void iowrite32be(u32 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr);
+	} else {
+		*((u32 *)addr) = datum;
+	}
+}
+
+/* Repeating interfaces */
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u8 *)dst = *(u8 *)addr;
+			dst++;
+		}
+	}
+}
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u16 *)dst = *(u16 *)addr;
+			dst += 2;
+		}
+	}
+}
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u32 *)dst = *(u32 *)addr;
+			dst += 4;
+		}
+	}
+}
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u8 *)addr = *(u8 *)src;
+			src++;
+		}
+	}
+}
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u16 *)addr = *(u16 *)src;
+			src += 2;
+		}
+	}
+}
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u32 *)addr = *(u32 *)src;
+			src += 4;
+		}
+	}
+}
+
+/* Mapping interfaces */
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *)(IOPORT_MAP_BASE | port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	if (!INDIRECT_ADDR(addr)) {
+		iounmap(addr);
+	}
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	if (!INDIRECT_ADDR(addr)) {
+		iounmap(addr);
+	}
+}
+
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread16be);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(ioread32be);
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite16be);
+EXPORT_SYMBOL(iowrite32);
+EXPORT_SYMBOL(iowrite32be);
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
new file mode 100644
index 0000000..b53fb6f
--- /dev/null
+++ b/arch/parisc/lib/lusercopy.S
@@ -0,0 +1,426 @@
+/*
+ *    User Space Access Routines
+ *
+ *    Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
+ *    Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
+ *    Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
+ *    Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * These routines still have plenty of room for optimization
+ * (word & doubleword load/store, dual issue, store hints, etc.).
+ */
+
+/*
+ * The following routines assume that space register 3 (sr3) contains
+ * the space id associated with the current users address space.
+ */
+
+
+	.text
+	
+#include <asm/assembly.h>
+#include <asm/errno.h>
+#include <linux/linkage.h>
+
+	/*
+	 * get_sr gets the appropriate space value into
+	 * sr1 for kernel/user space access, depending
+	 * on the flag stored in the task structure.
+	 */
+
+	.macro  get_sr
+	mfctl       %cr30,%r1
+	ldw         TI_SEGMENT(%r1),%r22
+	mfsp        %sr3,%r1
+	or,<>       %r22,%r0,%r0
+	copy        %r0,%r1
+	mtsp        %r1,%sr1
+	.endm
+
+	/*
+	 * unsigned long lclear_user(void *to, unsigned long n)
+	 *
+	 * Returns 0 for success.
+	 * otherwise, returns number of bytes not transferred.
+	 */
+
+ENTRY_CFI(lclear_user)
+	comib,=,n   0,%r25,$lclu_done
+	get_sr
+$lclu_loop:
+	addib,<>    -1,%r25,$lclu_loop
+1:      stbs,ma     %r0,1(%sr1,%r26)
+
+$lclu_done:
+	bv          %r0(%r2)
+	copy        %r25,%r28
+
+2:	b           $lclu_done
+	ldo         1(%r25),%r25
+
+	ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
+ENDPROC_CFI(lclear_user)
+
+
+	/*
+	 * long lstrnlen_user(char *s, long n)
+	 *
+	 * Returns 0 if exception before zero byte or reaching N,
+	 *         N+1 if N would be exceeded,
+	 *         else strlen + 1 (i.e. includes zero byte).
+	 */
+
+ENTRY_CFI(lstrnlen_user)
+	comib,=     0,%r25,$lslen_nzero
+	copy	    %r26,%r24
+	get_sr
+1:      ldbs,ma     1(%sr1,%r26),%r1
+$lslen_loop:
+	comib,=,n   0,%r1,$lslen_done
+	addib,<>    -1,%r25,$lslen_loop
+2:      ldbs,ma     1(%sr1,%r26),%r1
+$lslen_done:
+	bv          %r0(%r2)
+	sub	    %r26,%r24,%r28
+
+$lslen_nzero:
+	b           $lslen_done
+	ldo         1(%r26),%r26 /* special case for N == 0 */
+
+3:      b	    $lslen_done
+	copy        %r24,%r26    /* reset r26 so 0 is returned on fault */
+
+	ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
+
+ENDPROC_CFI(lstrnlen_user)
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ *   On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers.  Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ *   additional interlocks. Assumption is that those were only efficient on old
+ *   machines (pre PA8000 processors)
+ */
+
+	dst = arg0
+	src = arg1
+	len = arg2
+	end = arg3
+	t1  = r19
+	t2  = r20
+	t3  = r21
+	t4  = r22
+	srcspc = sr1
+	dstspc = sr2
+
+	t0 = r1
+	a1 = t1
+	a2 = t2
+	a3 = t3
+	a0 = t4
+
+	save_src = ret0
+	save_dst = ret1
+	save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+	/* Last destination address */
+	add	dst,len,end
+
+	/* short copy with less than 16 bytes? */
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+	/* same alignment? */
+	xor	src,dst,t0
+	extru	t0,31,2,t1
+	cmpib,<>,n  0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+	/* only do 64-bit copies if we can get aligned. */
+	extru	t0,31,3,t1
+	cmpib,<>,n  0,t1,.Lalign_loop32
+
+	/* loop until we are 64-bit aligned */
+.Lalign_loop64:
+	extru	dst,31,3,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_16_start
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop64
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_loop_16_start:
+	ldi	31,t0
+.Lcopy_loop_16:
+	cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10:	ldd	0(srcspc,src),t1
+11:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+12:	std,ma	t1,8(dstspc,dst)
+13:	std,ma	t2,8(dstspc,dst)
+14:	ldd	0(srcspc,src),t1
+15:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+16:	std,ma	t1,8(dstspc,dst)
+17:	std,ma	t2,8(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_16
+	ldo	-32(len),len
+
+.Lword_loop:
+	cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20:	ldw,ma	4(srcspc,src),t1
+21:	stw,ma	t1,4(dstspc,dst)
+	b	.Lword_loop
+	ldo	-4(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+	/* loop until we are 32-bit aligned */
+.Lalign_loop32:
+	extru	dst,31,2,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_8
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop32
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_8:
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10:	ldw	0(srcspc,src),t1
+11:	ldw	4(srcspc,src),t2
+12:	stw,ma	t1,4(dstspc,dst)
+13:	stw,ma	t2,4(dstspc,dst)
+14:	ldw	8(srcspc,src),t1
+15:	ldw	12(srcspc,src),t2
+	ldo	16(src),src
+16:	stw,ma	t1,4(dstspc,dst)
+17:	stw,ma	t2,4(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_8
+	ldo	-16(len),len
+
+.Lbyte_loop:
+	cmpclr,COND(<>) len,%r0,%r0
+	b,n	.Lcopy_done
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lbyte_loop
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+	bv	%r0(%r2)
+	sub	end,dst,ret0
+
+
+	/* src and dst are not aligned the same way. */
+	/* need to go the hard way */
+.Lunaligned_copy:
+	/* align until dst is 32bit-word-aligned */
+	extru	dst,31,2,t1
+	cmpib,=,n	0,t1,.Lcopy_dstaligned
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lunaligned_copy
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+	/* store src, dst and len in safe place */
+	copy	src,save_src
+	copy	dst,save_dst
+	copy	len,save_len
+
+	/* len now needs give number of words to copy */
+	SHRREG	len,2,len
+
+	/*
+	 * Copy from a not-aligned src to an aligned dst using shifts.
+	 * Handles 4 words per loop.
+	 */
+
+	depw,z src,28,2,t0
+	subi 32,t0,t0
+	mtsar t0
+	extru len,31,2,t0
+	cmpib,= 2,t0,.Lcase2
+	/* Make src aligned by rounding it down.  */
+	depi 0,31,2,src
+
+	cmpiclr,<> 3,t0,%r0
+	b,n .Lcase3
+	cmpiclr,<> 1,t0,%r0
+	b,n .Lcase1
+.Lcase0:
+	cmpb,COND(=) %r0,len,.Lcda_finish
+	nop
+
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b,n .Ldo3
+.Lcase1:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	ldo -1(len),len
+	cmpb,COND(=),n %r0,len,.Ldo0
+.Ldo4:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a3, a0, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a0, a1, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a1, a2, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+	ldo -4(len),len
+	cmpb,COND(<>) %r0,len,.Ldo4
+	nop
+.Ldo0:
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+	/* calculate new src, dst and len and jump to byte-copy loop */
+	sub	dst,save_dst,t0
+	add	save_src,t0,src
+	b	.Lbyte_loop
+	sub	save_len,t0,len
+
+.Lcase3:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo2
+	ldo 1(len),len
+.Lcase2:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo1
+	ldo 2(len),len
+
+
+	/* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+	b	.Lcopy_done
+10:	std,ma	t1,8(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+	b	.Lcopy_done
+10:	stw,ma	t1,4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ENDPROC_CFI(pa_memcpy)
+
+	.end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
new file mode 100644
index 0000000..865a7f7
--- /dev/null
+++ b/arch/parisc/lib/memcpy.c
@@ -0,0 +1,84 @@
+/*
+ *    Optimized memory copy routines.
+ *
+ *    Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ *    Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Portions derived from the GNU C Library
+ *    Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/uaccess.h>
+
+#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
+#define get_kernel_space() (0)
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+extern unsigned long pa_memcpy(void *dst, const void *src,
+				unsigned long len);
+
+unsigned long raw_copy_to_user(void __user *dst, const void *src,
+			       unsigned long len)
+{
+	mtsp(get_kernel_space(), 1);
+	mtsp(get_user_space(), 2);
+	return pa_memcpy((void __force *)dst, src, len);
+}
+EXPORT_SYMBOL(raw_copy_to_user);
+
+unsigned long raw_copy_from_user(void *dst, const void __user *src,
+			       unsigned long len)
+{
+	mtsp(get_user_space(), 1);
+	mtsp(get_kernel_space(), 2);
+	return pa_memcpy(dst, (void __force *)src, len);
+}
+EXPORT_SYMBOL(raw_copy_from_user);
+
+unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
+{
+	mtsp(get_user_space(), 1);
+	mtsp(get_user_space(), 2);
+	return pa_memcpy((void __force *)dst, (void __force *)src, len);
+}
+
+
+void * memcpy(void * dst,const void *src, size_t count)
+{
+	mtsp(get_kernel_space(), 1);
+	mtsp(get_kernel_space(), 2);
+	pa_memcpy(dst, src, count);
+	return dst;
+}
+
+EXPORT_SYMBOL(raw_copy_in_user);
+EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+	unsigned long addr = (unsigned long)src;
+
+	if (addr < PAGE_SIZE)
+		return -EFAULT;
+
+	/* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+	return __probe_kernel_read(dst, src, size);
+}
diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
new file mode 100644
index 0000000..1d7929b
--- /dev/null
+++ b/arch/parisc/lib/memset.c
@@ -0,0 +1,91 @@
+/* Copyright (C) 1991, 1997 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */
+
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+  unsigned int c = sc;
+  long int dstp = (long int) dstpp;
+
+  if (len >= 8)
+    {
+      size_t xlen;
+      op_t cccc;
+
+      cccc = (unsigned char) c;
+      cccc |= cccc << 8;
+      cccc |= cccc << 16;
+      if (OPSIZ > 4)
+	/* Do the shift in two steps to avoid warning if long has 32 bits.  */
+	cccc |= (cccc << 16) << 16;
+
+      /* There are at least some bytes to set.
+	 No need to test for LEN == 0 in this alignment loop.  */
+      while (dstp % OPSIZ != 0)
+	{
+	  ((unsigned char *) dstp)[0] = c;
+	  dstp += 1;
+	  len -= 1;
+	}
+
+      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
+      xlen = len / (OPSIZ * 8);
+      while (xlen > 0)
+	{
+	  ((op_t *) dstp)[0] = cccc;
+	  ((op_t *) dstp)[1] = cccc;
+	  ((op_t *) dstp)[2] = cccc;
+	  ((op_t *) dstp)[3] = cccc;
+	  ((op_t *) dstp)[4] = cccc;
+	  ((op_t *) dstp)[5] = cccc;
+	  ((op_t *) dstp)[6] = cccc;
+	  ((op_t *) dstp)[7] = cccc;
+	  dstp += 8 * OPSIZ;
+	  xlen -= 1;
+	}
+      len %= OPSIZ * 8;
+
+      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
+      xlen = len / OPSIZ;
+      while (xlen > 0)
+	{
+	  ((op_t *) dstp)[0] = cccc;
+	  dstp += OPSIZ;
+	  xlen -= 1;
+	}
+      len %= OPSIZ;
+    }
+
+  /* Write the last few bytes.  */
+  while (len > 0)
+    {
+      ((unsigned char *) dstp)[0] = c;
+      dstp += 1;
+      len -= 1;
+    }
+
+  return dstpp;
+}
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 0000000..8e6014a
--- /dev/null
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+
+union ull_union {
+	unsigned long long ull;
+	struct {
+		unsigned int high;
+		unsigned int low;
+	} ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+	union ull_union au = {.ull = a};
+	union ull_union bu = {.ull = b};
+
+	if (au.ui.high < bu.ui.high)
+		return 0;
+	else if (au.ui.high > bu.ui.high)
+		return 2;
+	if (au.ui.low < bu.ui.low)
+		return 0;
+	else if (au.ui.low > bu.ui.low)
+		return 2;
+	return 1;
+}