v4.19.13 snapshot.
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
new file mode 100644
index 0000000..4072849
--- /dev/null
+++ b/drivers/char/Kconfig
@@ -0,0 +1,570 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Character device configuration
+#
+
+menu "Character devices"
+
+source "drivers/tty/Kconfig"
+
+config DEVMEM
+	bool "/dev/mem virtual device support"
+	default y
+	help
+	  Say Y here if you want to support the /dev/mem device.
+	  The /dev/mem device is used to access areas of physical
+	  memory.
+	  When in doubt, say "Y".
+
+config DEVKMEM
+	bool "/dev/kmem virtual device support"
+	# On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
+	depends on !ARM64
+	help
+	  Say Y here if you want to support the /dev/kmem device. The
+	  /dev/kmem device is rarely used, but can be used for certain
+	  kind of kernel debugging operations.
+	  When in doubt, say "N".
+
+config SGI_SNSC
+	bool "SGI Altix system controller communication support"
+	depends on (IA64_SGI_SN2 || IA64_GENERIC)
+	help
+	  If you have an SGI Altix and you want to enable system
+	  controller communication from user space (you want this!),
+	  say Y.  Otherwise, say N.
+
+config SGI_TIOCX
+       bool "SGI TIO CX driver support"
+       depends on (IA64_SGI_SN2 || IA64_GENERIC)
+       help
+         If you have an SGI Altix and you have fpga devices attached
+         to your TIO, say Y here, otherwise say N.
+
+config SGI_MBCS
+       tristate "SGI FPGA Core Services driver support"
+       depends on SGI_TIOCX
+       help
+         If you have an SGI Altix with an attached SABrick
+         say Y or M here, otherwise say N.
+
+source "drivers/tty/serial/Kconfig"
+source "drivers/tty/serdev/Kconfig"
+
+config TTY_PRINTK
+	tristate "TTY driver to output user messages via printk"
+	depends on EXPERT && TTY
+	default n
+	---help---
+	  If you say Y here, the support for writing user messages (i.e.
+	  console messages) via printk is available.
+
+	  The feature is useful to inline user messages with kernel
+	  messages.
+	  In order to use this feature, you should output user messages
+	  to /dev/ttyprintk or redirect console to this TTY.
+
+	  If unsure, say N.
+
+config PRINTER
+	tristate "Parallel printer support"
+	depends on PARPORT
+	---help---
+	  If you intend to attach a printer to the parallel port of your Linux
+	  box (as opposed to using a serial printer; if the connector at the
+	  printer has 9 or 25 holes ["female"], then it's serial), say Y.
+	  Also read the Printing-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  It is possible to share one parallel port among several devices
+	  (e.g. printer and ZIP drive) and it is safe to compile the
+	  corresponding drivers into the kernel.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/admin-guide/parport.rst>.  The module will be called lp.
+
+	  If you have several parallel ports, you can specify which ports to
+	  use with the "lp" kernel command line option.  (Try "man bootparam"
+	  or see the documentation of your boot loader (lilo or loadlin) about
+	  how to pass options to the kernel at boot time.)  The syntax of the
+	  "lp" command line option can be found in <file:drivers/char/lp.c>.
+
+	  If you have more than 8 printers, you need to increase the LP_NO
+	  macro in lp.c and the PARPORT_MAX macro in parport.h.
+
+config LP_CONSOLE
+	bool "Support for console on line printer"
+	depends on PRINTER
+	---help---
+	  If you want kernel messages to be printed out as they occur, you
+	  can have a console on the printer. This option adds support for
+	  doing that; to actually get it to happen you need to pass the
+	  option "console=lp0" to the kernel at boot time.
+
+	  If the printer is out of paper (or off, or unplugged, or too
+	  busy..) the kernel will stall until the printer is ready again.
+	  By defining CONSOLE_LP_STRICT to 0 (at your own risk) you
+	  can make the kernel continue when this happens,
+	  but it'll lose the kernel messages.
+
+	  If unsure, say N.
+
+config PPDEV
+	tristate "Support for user-space parallel port device drivers"
+	depends on PARPORT
+	---help---
+	  Saying Y to this adds support for /dev/parport device nodes.  This
+	  is needed for programs that want portable access to the parallel
+	  port, for instance deviceid (which displays Plug-and-Play device
+	  IDs).
+
+	  This is the parallel port equivalent of SCSI generic support (sg).
+	  It is safe to say N to this -- it is not needed for normal printing
+	  or parallel port CD-ROM/disk support.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ppdev.
+
+	  If unsure, say N.
+
+source "drivers/tty/hvc/Kconfig"
+
+config VIRTIO_CONSOLE
+	tristate "Virtio console"
+	depends on VIRTIO && TTY
+	select HVC_DRIVER
+	help
+	  Virtio console for use with hypervisors.
+
+	  Also serves as a general-purpose serial device for data
+	  transfer between the guest and host.  Character devices at
+	  /dev/vportNpn will be created when corresponding ports are
+	  found, where N is the device number and n is the port number
+	  within that device.  If specified by the host, a sysfs
+	  attribute called 'name' will be populated with a name for
+	  the port which can be used by udev scripts to create a
+	  symlink to the device.
+
+config IBM_BSR
+	tristate "IBM POWER Barrier Synchronization Register support"
+	depends on PPC_PSERIES
+	help
+	  This devices exposes a hardware mechanism for fast synchronization
+	  of threads across a large system which avoids bouncing a cacheline
+	  between several cores on a system
+
+config POWERNV_OP_PANEL
+	tristate "IBM POWERNV Operator Panel Display support"
+	depends on PPC_POWERNV
+	default m
+	help
+	  If you say Y here, a special character device node, /dev/op_panel,
+	  will be created which exposes the operator panel display on IBM
+	  Power Systems machines with FSPs.
+
+	  If you don't require access to the operator panel display from user
+	  space, say N.
+
+	  If unsure, say M here to build it as a module called powernv-op-panel.
+
+source "drivers/char/ipmi/Kconfig"
+
+config DS1620
+	tristate "NetWinder thermometer support"
+	depends on ARCH_NETWINDER
+	help
+	  Say Y here to include support for the thermal management hardware
+	  found in the NetWinder. This driver allows the user to control the
+	  temperature set points and to read the current temperature.
+
+	  It is also possible to say M here to build it as a module (ds1620)
+	  It is recommended to be used on a NetWinder, but it is not a
+	  necessity.
+
+config NWBUTTON
+	tristate "NetWinder Button"
+	depends on ARCH_NETWINDER
+	---help---
+	  If you say Y here and create a character device node /dev/nwbutton
+	  with major and minor numbers 10 and 158 ("man mknod"), then every
+	  time the orange button is pressed a number of times, the number of
+	  times the button was pressed will be written to that device.
+
+	  This is most useful for applications, as yet unwritten, which
+	  perform actions based on how many times the button is pressed in a
+	  row.
+
+	  Do not hold the button down for too long, as the driver does not
+	  alter the behaviour of the hardware reset circuitry attached to the
+	  button; it will still execute a hard reset if the button is held
+	  down for longer than approximately five seconds.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nwbutton.
+
+	  Most people will answer Y to this question and "Reboot Using Button"
+	  below to be able to initiate a system shutdown from the button.
+
+config NWBUTTON_REBOOT
+	bool "Reboot Using Button"
+	depends on NWBUTTON
+	help
+	  If you say Y here, then you will be able to initiate a system
+	  shutdown and reboot by pressing the orange button a number of times.
+	  The number of presses to initiate the shutdown is two by default,
+	  but this can be altered by modifying the value of NUM_PRESSES_REBOOT
+	  in nwbutton.h and recompiling the driver or, if you compile the
+	  driver as a module, you can specify the number of presses at load
+	  time with "insmod button reboot_count=<something>".
+
+config NWFLASH
+	tristate "NetWinder flash support"
+	depends on ARCH_NETWINDER
+	---help---
+	  If you say Y here and create a character device /dev/flash with
+	  major 10 and minor 160 you can manipulate the flash ROM containing
+	  the NetWinder firmware. Be careful as accidentally overwriting the
+	  flash contents can render your computer unbootable. On no account
+	  allow random users access to this device. :-)
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nwflash.
+
+	  If you're not sure, say N.
+
+source "drivers/char/hw_random/Kconfig"
+
+config NVRAM
+	tristate "/dev/nvram support"
+	depends on ATARI || X86 || GENERIC_NVRAM
+	---help---
+	  If you say Y here and create a character special file /dev/nvram
+	  with major number 10 and minor number 144 using mknod ("man mknod"),
+	  you get read and write access to the extra bytes of non-volatile
+	  memory in the real time clock (RTC), which is contained in every PC
+	  and most Ataris.  The actual number of bytes varies, depending on the
+	  nvram in the system, but is usually 114 (128-14 for the RTC).
+
+	  This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
+	  on Ataris. /dev/nvram may be used to view settings there, or to
+	  change them (with some utility). It could also be used to frequently
+	  save a few bits of very important data that may not be lost over
+	  power-off and for which writing to disk is too insecure. Note
+	  however that most NVRAM space in a PC belongs to the BIOS and you
+	  should NEVER idly tamper with it. See Ralf Brown's interrupt list
+	  for a guide to the use of CMOS bytes by your BIOS.
+
+	  On Atari machines, /dev/nvram is always configured and does not need
+	  to be selected.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nvram.
+
+#
+# These legacy RTC drivers just cause too many conflicts with the generic
+# RTC framework ... let's not even try to coexist any more.
+#
+if RTC_LIB=n
+
+config RTC
+	tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
+	depends on ALPHA || (MIPS && MACH_LOONGSON64)
+	---help---
+	  If you say Y here and create a character special file /dev/rtc with
+	  major number 10 and minor number 135 using mknod ("man mknod"), you
+	  will get access to the real time clock (or hardware clock) built
+	  into your computer.
+
+	  Every PC has such a clock built in. It can be used to generate
+	  signals from as low as 1Hz up to 8192Hz, and can also be used
+	  as a 24 hour alarm. It reports status information via the file
+	  /proc/driver/rtc and its behaviour is set by various ioctls on
+	  /dev/rtc.
+
+	  If you run Linux on a multiprocessor machine and said Y to
+	  "Symmetric Multi Processing" above, you should say Y here to read
+	  and set the RTC in an SMP compatible fashion.
+
+	  If you think you have a use for such a device (such as periodic data
+	  sampling), then say Y here, and read <file:Documentation/rtc.txt>
+	  for details.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rtc.
+
+config JS_RTC
+	tristate "Enhanced Real Time Clock Support"
+	depends on SPARC32 && PCI
+	---help---
+	  If you say Y here and create a character special file /dev/rtc with
+	  major number 10 and minor number 135 using mknod ("man mknod"), you
+	  will get access to the real time clock (or hardware clock) built
+	  into your computer.
+
+	  Every PC has such a clock built in. It can be used to generate
+	  signals from as low as 1Hz up to 8192Hz, and can also be used
+	  as a 24 hour alarm. It reports status information via the file
+	  /proc/driver/rtc and its behaviour is set by various ioctls on
+	  /dev/rtc.
+
+	  If you think you have a use for such a device (such as periodic data
+	  sampling), then say Y here, and read <file:Documentation/rtc.txt>
+	  for details.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called js-rtc.
+
+config EFI_RTC
+	bool "EFI Real Time Clock Services"
+	depends on IA64
+
+endif # RTC_LIB
+
+config DTLK
+	tristate "Double Talk PC internal speech card support"
+	depends on ISA
+	help
+	  This driver is for the DoubleTalk PC, a speech synthesizer
+	  manufactured by RC Systems (<http://www.rcsys.com/>).  It is also
+	  called the `internal DoubleTalk'.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called dtlk.
+
+config XILINX_HWICAP
+	tristate "Xilinx HWICAP Support"
+	depends on XILINX_VIRTEX || MICROBLAZE
+	help
+	  This option enables support for Xilinx Internal Configuration
+	  Access Port (ICAP) driver.  The ICAP is used on Xilinx Virtex
+	  FPGA platforms to partially reconfigure the FPGA at runtime.
+
+	  If unsure, say N.
+
+config R3964
+	tristate "Siemens R3964 line discipline"
+	depends on TTY
+	---help---
+	  This driver allows synchronous communication with devices using the
+	  Siemens R3964 packet protocol. Unless you are dealing with special
+	  hardware like PLCs, you are unlikely to need this.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called n_r3964.
+
+	  If unsure, say N.
+
+config APPLICOM
+	tristate "Applicom intelligent fieldbus card support"
+	depends on PCI
+	---help---
+	  This driver provides the kernel-side support for the intelligent
+	  fieldbus cards made by Applicom International. More information
+	  about these cards can be found on the WWW at the address
+	  <http://www.applicom-int.com/>, or by email from David Woodhouse
+	  <dwmw2@infradead.org>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called applicom.
+
+	  If unsure, say N.
+
+config SONYPI
+	tristate "Sony Vaio Programmable I/O Control Device support"
+	depends on X86_32 && PCI && INPUT
+	---help---
+	  This driver enables access to the Sony Programmable I/O Control
+	  Device which can be found in many (all ?) Sony Vaio laptops.
+
+	  If you have one of those laptops, read
+	  <file:Documentation/laptops/sonypi.txt>, and say Y or M here.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sonypi.
+
+config GPIO_TB0219
+	tristate "TANBAC TB0219 GPIO support"
+	depends on TANBAC_TB022X
+	select GPIO_VR41XX
+
+source "drivers/char/pcmcia/Kconfig"
+
+config MWAVE
+	tristate "ACP Modem (Mwave) support"
+	depends on X86 && TTY
+	select SERIAL_8250
+	---help---
+	  The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
+	  kernel driver and a user level application. Together these components
+	  support direct attachment to public switched telephone networks (PSTNs)
+	  and support selected world wide countries.
+
+	  This version of the ACP Modem driver supports the IBM Thinkpad 600E,
+	  600, and 770 that include on board ACP modem hardware.
+
+	  The modem also supports the standard communications port interface
+	  (ttySx) and is compatible with the Hayes AT Command Set.
+
+	  The user level application needed to use this driver can be found at
+	  the IBM Linux Technology Center (LTC) web site:
+	  <http://www.ibm.com/linux/ltc/>.
+
+	  If you own one of the above IBM Thinkpads which has the Mwave chipset
+	  in it, say Y.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mwave.
+
+config SCx200_GPIO
+	tristate "NatSemi SCx200 GPIO Support"
+	depends on SCx200
+	select NSC_GPIO
+	help
+	  Give userspace access to the GPIO pins on the National
+	  Semiconductor SCx200 processors.
+
+	  If compiled as a module, it will be called scx200_gpio.
+
+config PC8736x_GPIO
+	tristate "NatSemi PC8736x GPIO Support"
+	depends on X86_32 && !UML
+	default SCx200_GPIO	# mostly N
+	select NSC_GPIO		# needed for support routines
+	help
+	  Give userspace access to the GPIO pins on the National
+	  Semiconductor PC-8736x (x=[03456]) SuperIO chip.  The chip
+	  has multiple functional units, inc several managed by
+	  hwmon/pc87360 driver.  Tested with PC-87366
+
+	  If compiled as a module, it will be called pc8736x_gpio.
+
+config NSC_GPIO
+	tristate "NatSemi Base GPIO Support"
+	depends on X86_32
+	# selected by SCx200_GPIO and PC8736x_GPIO
+	# what about 2 selectors differing: m != y
+	help
+	  Common support used (and needed) by scx200_gpio and
+	  pc8736x_gpio drivers.  If those drivers are built as
+	  modules, this one will be too, named nsc_gpio
+
+config RAW_DRIVER
+	tristate "RAW driver (/dev/raw/rawN)"
+	depends on BLOCK
+	help
+	  The raw driver permits block devices to be bound to /dev/raw/rawN.
+	  Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+	  See the raw(8) manpage for more details.
+
+          Applications should preferably open the device (eg /dev/hda1)
+          with the O_DIRECT flag.
+
+config MAX_RAW_DEVS
+	int "Maximum number of RAW devices to support (1-65536)"
+	depends on RAW_DRIVER
+	range 1 65536
+	default "256"
+	help
+	  The maximum number of RAW devices that are supported.
+	  Default is 256. Increase this number in case you need lots of
+	  raw devices.
+
+config HPET
+	bool "HPET - High Precision Event Timer" if (X86 || IA64)
+	default n
+	depends on ACPI
+	help
+	  If you say Y here, you will have a miscdevice named "/dev/hpet/".  Each
+	  open selects one of the timers supported by the HPET.  The timers are
+	  non-periodic and/or periodic.
+
+config HPET_MMAP
+	bool "Allow mmap of HPET"
+	default y
+	depends on HPET
+	help
+	  If you say Y here, user applications will be able to mmap
+	  the HPET registers.
+
+config HPET_MMAP_DEFAULT
+	bool "Enable HPET MMAP access by default"
+	default y
+	depends on HPET_MMAP
+	help
+	  In some hardware implementations, the page containing HPET
+	  registers may also contain other things that shouldn't be
+	  exposed to the user.  This option selects the default (if
+	  kernel parameter hpet_mmap is not set) user access to the
+	  registers for applications that require it.
+
+config HANGCHECK_TIMER
+	tristate "Hangcheck timer"
+	depends on X86 || IA64 || PPC64 || S390
+	help
+	  The hangcheck-timer module detects when the system has gone
+	  out to lunch past a certain margin.  It can reboot the system
+	  or merely print a warning.
+
+config UV_MMTIMER
+	tristate "UV_MMTIMER Memory mapped RTC for SGI UV"
+	depends on X86_UV
+	default m
+	help
+	  The uv_mmtimer device allows direct userspace access to the
+	  UV system timer.
+
+source "drivers/char/tpm/Kconfig"
+
+config TELCLOCK
+	tristate "Telecom clock driver for ATCA SBC"
+	depends on X86
+	default n
+	help
+	  The telecom clock device is specific to the MPCBL0010 and MPCBL0050
+	  ATCA computers and allows direct userspace access to the
+	  configuration of the telecom clock configuration settings.  This
+	  device is used for hardware synchronization across the ATCA backplane
+	  fabric.  Upon loading, the driver exports a sysfs directory,
+	  /sys/devices/platform/telco_clock, with a number of files for
+	  controlling the behavior of this hardware.
+
+config DEVPORT
+	bool "/dev/port character device"
+	depends on ISA || PCI
+	default y
+	help
+	  Say Y here if you want to support the /dev/port device. The /dev/port
+	  device is similar to /dev/mem, but for I/O ports.
+
+source "drivers/s390/char/Kconfig"
+
+source "drivers/char/xillybus/Kconfig"
+
+config ADI
+	tristate "SPARC Privileged ADI driver"
+	depends on SPARC64
+	default m
+	help
+	  SPARC M7 and newer processors utilize ADI (Application Data
+	  Integrity) to version and protect memory.  This driver provides
+	  read/write access to the ADI versions for privileged processes.
+	  This feature is also known as MCD (Memory Corruption Detection)
+	  and SSM (Silicon Secured Memory).  Intended consumers of this
+	  driver include crash and makedumpfile.
+
+endmenu
+
+config RANDOM_TRUST_CPU
+	bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+	depends on X86 || S390 || PPC
+	default n
+	help
+	Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+	RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
+	for the purposes of initializing Linux's CRNG.  Since this is not
+	something that can be independently audited, this amounts to trusting
+	that CPU manufacturer (perhaps with the insistence or mandate
+	of a Nation State's intelligence or law enforcement agencies)
+	has not installed a hidden back door to compromise the CPU's
+	random number generation facilities. This can also be configured
+	at boot with "random.trust_cpu=on/off".
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
new file mode 100644
index 0000000..b8d42b4
--- /dev/null
+++ b/drivers/char/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel character device drivers.
+#
+
+obj-y				+= mem.o random.o
+obj-$(CONFIG_TTY_PRINTK)	+= ttyprintk.o
+obj-y				+= misc.o
+obj-$(CONFIG_ATARI_DSP56K)	+= dsp56k.o
+obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
+obj-$(CONFIG_RAW_DRIVER)	+= raw.o
+obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
+obj-$(CONFIG_MSPEC)		+= mspec.o
+obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
+obj-$(CONFIG_IBM_BSR)		+= bsr.o
+obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
+
+obj-$(CONFIG_PRINTER)		+= lp.o
+
+obj-$(CONFIG_APM_EMULATION)	+= apm-emulation.o
+
+obj-$(CONFIG_DTLK)		+= dtlk.o
+obj-$(CONFIG_APPLICOM)		+= applicom.o
+obj-$(CONFIG_SONYPI)		+= sonypi.o
+obj-$(CONFIG_RTC)		+= rtc.o
+obj-$(CONFIG_HPET)		+= hpet.o
+obj-$(CONFIG_EFI_RTC)		+= efirtc.o
+obj-$(CONFIG_XILINX_HWICAP)	+= xilinx_hwicap/
+ifeq ($(CONFIG_GENERIC_NVRAM),y)
+  obj-$(CONFIG_NVRAM)	+= generic_nvram.o
+else
+  obj-$(CONFIG_NVRAM)	+= nvram.o
+endif
+obj-$(CONFIG_TOSHIBA)		+= toshiba.o
+obj-$(CONFIG_DS1620)		+= ds1620.o
+obj-$(CONFIG_HW_RANDOM)		+= hw_random/
+obj-$(CONFIG_PPDEV)		+= ppdev.o
+obj-$(CONFIG_NWBUTTON)		+= nwbutton.o
+obj-$(CONFIG_NWFLASH)		+= nwflash.o
+obj-$(CONFIG_SCx200_GPIO)	+= scx200_gpio.o
+obj-$(CONFIG_PC8736x_GPIO)	+= pc8736x_gpio.o
+obj-$(CONFIG_NSC_GPIO)		+= nsc_gpio.o
+obj-$(CONFIG_GPIO_TB0219)	+= tb0219.o
+obj-$(CONFIG_TELCLOCK)		+= tlclk.o
+
+obj-$(CONFIG_MWAVE)		+= mwave/
+obj-y				+= agp/
+obj-$(CONFIG_PCMCIA)		+= pcmcia/
+
+obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
+obj-$(CONFIG_TCG_TPM)		+= tpm/
+
+obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
+
+obj-$(CONFIG_JS_RTC)		+= js-rtc.o
+js-rtc-y = rtc.o
+
+obj-$(CONFIG_XILLYBUS)		+= xillybus/
+obj-$(CONFIG_POWERNV_OP_PANEL)	+= powernv-op-panel.o
+obj-$(CONFIG_ADI)		+= adi.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
new file mode 100644
index 0000000..751d7cc
--- /dev/null
+++ b/drivers/char/adi.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Privileged ADI driver for sparc64
+ *
+ * Author: Tom Hromatka <tom.hromatka@oracle.com>
+ */
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/asi.h>
+
+#define MAX_BUF_SZ	PAGE_SIZE
+
+static int adi_open(struct inode *inode, struct file *file)
+{
+	file->f_mode |= FMODE_UNSIGNED_OFFSET;
+	return 0;
+}
+
+static int read_mcd_tag(unsigned long addr)
+{
+	long err;
+	int ver;
+
+	__asm__ __volatile__(
+		"1:	ldxa [%[addr]] %[asi], %[ver]\n"
+		"	mov 0, %[err]\n"
+		"2:\n"
+		"	.section .fixup,#alloc,#execinstr\n"
+		"	.align 4\n"
+		"3:	sethi %%hi(2b), %%g1\n"
+		"	jmpl  %%g1 + %%lo(2b), %%g0\n"
+		"	mov %[invalid], %[err]\n"
+		"	.previous\n"
+		"	.section __ex_table, \"a\"\n"
+		"	.align 4\n"
+		"	.word  1b, 3b\n"
+		"	.previous\n"
+		: [ver] "=r" (ver), [err] "=r" (err)
+		: [addr] "r"  (addr), [invalid] "i" (EFAULT),
+		  [asi] "i" (ASI_MCD_REAL)
+		: "memory", "g1"
+		);
+
+	if (err)
+		return -EFAULT;
+	else
+		return ver;
+}
+
+static ssize_t adi_read(struct file *file, char __user *buf,
+			size_t count, loff_t *offp)
+{
+	size_t ver_buf_sz, bytes_read = 0;
+	int ver_buf_idx = 0;
+	loff_t offset;
+	u8 *ver_buf;
+	ssize_t ret;
+
+	ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+	ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+	if (!ver_buf)
+		return -ENOMEM;
+
+	offset = (*offp) * adi_blksize();
+
+	while (bytes_read < count) {
+		ret = read_mcd_tag(offset);
+		if (ret < 0)
+			goto out;
+
+		ver_buf[ver_buf_idx] = (u8)ret;
+		ver_buf_idx++;
+		offset += adi_blksize();
+
+		if (ver_buf_idx >= ver_buf_sz) {
+			if (copy_to_user(buf + bytes_read, ver_buf,
+					 ver_buf_sz)) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			bytes_read += ver_buf_sz;
+			ver_buf_idx = 0;
+
+			ver_buf_sz = min(count - bytes_read,
+					 (size_t)MAX_BUF_SZ);
+		}
+	}
+
+	(*offp) += bytes_read;
+	ret = bytes_read;
+out:
+	kfree(ver_buf);
+	return ret;
+}
+
+static int set_mcd_tag(unsigned long addr, u8 ver)
+{
+	long err;
+
+	__asm__ __volatile__(
+		"1:	stxa %[ver], [%[addr]] %[asi]\n"
+		"	mov 0, %[err]\n"
+		"2:\n"
+		"	.section .fixup,#alloc,#execinstr\n"
+		"	.align 4\n"
+		"3:	sethi %%hi(2b), %%g1\n"
+		"	jmpl %%g1 + %%lo(2b), %%g0\n"
+		"	mov %[invalid], %[err]\n"
+		"	.previous\n"
+		"	.section __ex_table, \"a\"\n"
+		"	.align 4\n"
+		"	.word 1b, 3b\n"
+		"	.previous\n"
+		: [err] "=r" (err)
+		: [ver] "r" (ver), [addr] "r"  (addr),
+		  [invalid] "i" (EFAULT), [asi] "i" (ASI_MCD_REAL)
+		: "memory", "g1"
+		);
+
+	if (err)
+		return -EFAULT;
+	else
+		return ver;
+}
+
+static ssize_t adi_write(struct file *file, const char __user *buf,
+			 size_t count, loff_t *offp)
+{
+	size_t ver_buf_sz, bytes_written = 0;
+	loff_t offset;
+	u8 *ver_buf;
+	ssize_t ret;
+	int i;
+
+	if (count <= 0)
+		return -EINVAL;
+
+	ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+	ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+	if (!ver_buf)
+		return -ENOMEM;
+
+	offset = (*offp) * adi_blksize();
+
+	do {
+		if (copy_from_user(ver_buf, &buf[bytes_written],
+				   ver_buf_sz)) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		for (i = 0; i < ver_buf_sz; i++) {
+			ret = set_mcd_tag(offset, ver_buf[i]);
+			if (ret < 0)
+				goto out;
+
+			offset += adi_blksize();
+		}
+
+		bytes_written += ver_buf_sz;
+		ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+	} while (bytes_written < count);
+
+	(*offp) += bytes_written;
+	ret = bytes_written;
+out:
+	__asm__ __volatile__("membar #Sync");
+	kfree(ver_buf);
+	return ret;
+}
+
+static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
+{
+	loff_t ret = -EINVAL;
+
+	switch (whence) {
+	case SEEK_END:
+	case SEEK_DATA:
+	case SEEK_HOLE:
+		/* unsupported */
+		return -EINVAL;
+	case SEEK_CUR:
+		if (offset == 0)
+			return file->f_pos;
+
+		offset += file->f_pos;
+		break;
+	case SEEK_SET:
+		break;
+	}
+
+	if (offset != file->f_pos) {
+		file->f_pos = offset;
+		file->f_version = 0;
+		ret = offset;
+	}
+
+	return ret;
+}
+
+static const struct file_operations adi_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= adi_llseek,
+	.open		= adi_open,
+	.read		= adi_read,
+	.write		= adi_write,
+};
+
+static struct miscdevice adi_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = KBUILD_MODNAME,
+	.fops = &adi_fops,
+};
+
+static int __init adi_init(void)
+{
+	if (!adi_capable())
+		return -EPERM;
+
+	return misc_register(&adi_miscdev);
+}
+
+static void __exit adi_exit(void)
+{
+	misc_deregister(&adi_miscdev);
+}
+
+module_init(adi_init);
+module_exit(adi_exit);
+
+MODULE_AUTHOR("Tom Hromatka <tom.hromatka@oracle.com>");
+MODULE_DESCRIPTION("Privileged interface to ADI");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
new file mode 100644
index 0000000..6231714
--- /dev/null
+++ b/drivers/char/agp/Kconfig
@@ -0,0 +1,163 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig AGP
+	tristate "/dev/agpgart (AGP Support)"
+	depends on ALPHA || IA64 || PARISC || PPC || X86
+	depends on PCI
+	---help---
+	  AGP (Accelerated Graphics Port) is a bus system mainly used to
+	  connect graphics cards to the rest of the system.
+
+	  If you have an AGP system and you say Y here, it will be possible to
+	  use the AGP features of your 3D rendering video card. This code acts
+	  as a sort of "AGP driver" for the motherboard's chipset.
+
+	  If you need more texture memory than you can get with the AGP GART
+	  (theoretically up to 256 MB, but in practice usually 64 or 128 MB
+	  due to kernel allocation issues), you could use PCI accesses
+	  and have up to a couple gigs of texture space.
+
+	  Note that this is the only means to have X/GLX use
+	  write-combining with MTRR support on the AGP bus. Without it, OpenGL
+	  direct rendering will be a lot slower but still faster than PIO.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called agpgart.
+
+	  You should say Y here if you want to use GLX or DRI.
+
+	  If unsure, say N.
+
+config AGP_ALI
+	tristate "ALI chipset support"
+	depends on AGP && X86_32
+	---help---
+	  This option gives you AGP support for the GLX component of
+	  X on the following ALi chipsets.  The supported chipsets
+	  include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
+	  For the ALi-chipset question, ALi suggests you refer to
+	  <http://www.ali.com.tw/>.
+
+	  The M1541 chipset can do AGP 1x and 2x, but note that there is an
+	  acknowledged incompatibility with Matrox G200 cards. Due to
+	  timing issues, this chipset cannot do AGP 2x with the G200.
+	  This is a hardware limitation. AGP 1x seems to be fine, though.
+
+config AGP_ATI
+	tristate "ATI chipset support"
+	depends on AGP && X86_32
+	---help---
+	  This option gives you AGP support for the GLX component of
+	  X on the ATI RadeonIGP family of chipsets.
+
+config AGP_AMD
+	tristate "AMD Irongate, 761, and 762 chipset support"
+	depends on AGP && X86_32
+	help
+	  This option gives you AGP support for the GLX component of
+	  X on AMD Irongate, 761, and 762 chipsets.
+
+config AGP_AMD64
+	tristate "AMD Opteron/Athlon64 on-CPU GART support"
+	depends on AGP && X86 && AMD_NB
+	help
+	  This option gives you AGP support for the GLX component of
+	  X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
+	  You still need an external AGP bridge like the AMD 8151, VIA
+          K8T400M, SiS755. It may also support other AGP bridges when loaded
+	  with agp_try_unsupported=1.
+
+config AGP_INTEL
+	tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
+	depends on AGP && X86
+	select INTEL_GTT
+	help
+	  This option gives you AGP support for the GLX component of X
+	  on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
+	  E7205 and E7505 chipsets and full support for the 810, 815, 830M,
+	  845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+
+
+
+config AGP_NVIDIA
+	tristate "NVIDIA nForce/nForce2 chipset support"
+	depends on AGP && X86_32
+	help
+	  This option gives you AGP support for the GLX component of
+	  X on NVIDIA chipsets including nForce and nForce2
+
+config AGP_SIS
+	tristate "SiS chipset support"
+	depends on AGP && X86
+	help
+	  This option gives you AGP support for the GLX component of
+	  X on Silicon Integrated Systems [SiS] chipsets.
+
+	  Note that 5591/5592 AGP chipsets are NOT supported.
+
+
+config AGP_SWORKS
+	tristate "Serverworks LE/HE chipset support"
+	depends on AGP && X86_32
+	help
+	  Say Y here to support the Serverworks AGP card.  See
+	  <http://www.serverworks.com/> for product descriptions and images.
+
+config AGP_VIA
+	tristate "VIA chipset support"
+	depends on AGP && X86
+	help
+	  This option gives you AGP support for the GLX component of
+	  X on VIA MVP3/Apollo Pro chipsets.
+
+config AGP_I460
+	tristate "Intel 460GX chipset support"
+	depends on AGP && (IA64_DIG || IA64_GENERIC)
+	help
+	  This option gives you AGP GART support for the Intel 460GX chipset
+	  for IA64 processors.
+
+config AGP_HP_ZX1
+	tristate "HP ZX1 chipset AGP support"
+	depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+	help
+	  This option gives you AGP GART support for the HP ZX1 chipset
+	  for IA64 processors.
+
+config AGP_PARISC
+	tristate "HP Quicksilver AGP support"
+	depends on AGP && PARISC && 64BIT
+	help
+	  This option gives you AGP GART support for the HP Quicksilver
+	  AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
+	  workstation...)
+
+config AGP_ALPHA_CORE
+	tristate "Alpha AGP support"
+	depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
+	default AGP
+
+config AGP_UNINORTH
+	tristate "Apple UniNorth & U3 AGP support"
+	depends on AGP && PPC_PMAC
+	help
+	  This option gives you AGP support for Apple machines with a
+	  UniNorth or U3 (Apple G5) bridge.
+
+config AGP_EFFICEON
+	tristate "Transmeta Efficeon support"
+	depends on AGP && X86_32
+	help
+	  This option gives you AGP support for the Transmeta Efficeon
+	  series processors with integrated northbridges.
+
+config AGP_SGI_TIOCA
+        tristate "SGI TIO chipset AGP support"
+        depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
+        help
+          This option gives you AGP GART support for the SGI TIO chipset
+          for IA64 processors.
+
+config INTEL_GTT
+	tristate
+	depends on X86 && PCI
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
new file mode 100644
index 0000000..4a786ff
--- /dev/null
+++ b/drivers/char/agp/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+agpgart-y := backend.o frontend.o generic.o isoch.o
+
+agpgart-$(CONFIG_COMPAT)	+= compat_ioctl.o
+
+obj-$(CONFIG_AGP)		+= agpgart.o
+obj-$(CONFIG_AGP_ALI)		+= ali-agp.o
+obj-$(CONFIG_AGP_ATI)		+= ati-agp.o
+obj-$(CONFIG_AGP_AMD)		+= amd-k7-agp.o
+obj-$(CONFIG_AGP_AMD64)		+= amd64-agp.o
+obj-$(CONFIG_AGP_ALPHA_CORE)	+= alpha-agp.o
+obj-$(CONFIG_AGP_EFFICEON)	+= efficeon-agp.o
+obj-$(CONFIG_AGP_HP_ZX1)	+= hp-agp.o
+obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
+obj-$(CONFIG_AGP_I460)		+= i460-agp.o
+obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
+obj-$(CONFIG_INTEL_GTT)		+= intel-gtt.o
+obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
+obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
+obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
+obj-$(CONFIG_AGP_SWORKS)	+= sworks-agp.o
+obj-$(CONFIG_AGP_UNINORTH)	+= uninorth-agp.o
+obj-$(CONFIG_AGP_VIA)		+= via-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644
index 0000000..4eb1c77
--- /dev/null
+++ b/drivers/char/agp/agp.h
@@ -0,0 +1,290 @@
+/*
+ * AGPGART
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2004 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+#include <asm/agp.h>	/* for flush_agp_cache() */
+
+#define PFX "agpgart: "
+
+//#define AGP_DEBUG 1
+#ifdef AGP_DEBUG
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
+#else
+#define DBG(x,y...) do { } while (0)
+#endif
+
+extern struct agp_bridge_data *agp_bridge;
+
+enum aper_size_type {
+	U8_APER_SIZE,
+	U16_APER_SIZE,
+	U32_APER_SIZE,
+	LVL2_APER_SIZE,
+	FIXED_APER_SIZE
+};
+
+struct gatt_mask {
+	unsigned long mask;
+	u32 type;
+	/* totally device specific, for integrated chipsets that
+	 * might have different types of memory masks.  For other
+	 * devices this will probably be ignored */
+};
+
+#define AGP_PAGE_DESTROY_UNMAP 1
+#define AGP_PAGE_DESTROY_FREE 2
+
+struct aper_size_info_8 {
+	int size;
+	int num_entries;
+	int page_order;
+	u8 size_value;
+};
+
+struct aper_size_info_16 {
+	int size;
+	int num_entries;
+	int page_order;
+	u16 size_value;
+};
+
+struct aper_size_info_32 {
+	int size;
+	int num_entries;
+	int page_order;
+	u32 size_value;
+};
+
+struct aper_size_info_lvl2 {
+	int size;
+	int num_entries;
+	u32 size_value;
+};
+
+struct aper_size_info_fixed {
+	int size;
+	int num_entries;
+	int page_order;
+};
+
+struct agp_bridge_driver {
+	struct module *owner;
+	const void *aperture_sizes;
+	int num_aperture_sizes;
+	enum aper_size_type size_type;
+	bool cant_use_aperture;
+	bool needs_scratch_page;
+	const struct gatt_mask *masks;
+	int (*fetch_size)(void);
+	int (*configure)(void);
+	void (*agp_enable)(struct agp_bridge_data *, u32);
+	void (*cleanup)(void);
+	void (*tlb_flush)(struct agp_memory *);
+	unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
+	void (*cache_flush)(void);
+	int (*create_gatt_table)(struct agp_bridge_data *);
+	int (*free_gatt_table)(struct agp_bridge_data *);
+	int (*insert_memory)(struct agp_memory *, off_t, int);
+	int (*remove_memory)(struct agp_memory *, off_t, int);
+	struct agp_memory *(*alloc_by_type) (size_t, int);
+	void (*free_by_type)(struct agp_memory *);
+	struct page *(*agp_alloc_page)(struct agp_bridge_data *);
+	int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
+	void (*agp_destroy_page)(struct page *, int flags);
+	void (*agp_destroy_pages)(struct agp_memory *);
+	int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+};
+
+struct agp_bridge_data {
+	const struct agp_version *version;
+	const struct agp_bridge_driver *driver;
+	const struct vm_operations_struct *vm_ops;
+	void *previous_size;
+	void *current_size;
+	void *dev_private_data;
+	struct pci_dev *dev;
+	u32 __iomem *gatt_table;
+	u32 *gatt_table_real;
+	unsigned long scratch_page;
+	struct page *scratch_page_page;
+	dma_addr_t scratch_page_dma;
+	unsigned long gart_bus_addr;
+	unsigned long gatt_bus_addr;
+	u32 mode;
+	enum chipset_type type;
+	unsigned long *key_list;
+	atomic_t current_memory_agp;
+	atomic_t agp_in_use;
+	int max_memory_agp;	/* in number of pages */
+	int aperture_size_idx;
+	int capndx;
+	int flags;
+	char major_version;
+	char minor_version;
+	struct list_head list;
+	u32 apbase_config;
+	/* list of agp_memory mapped to the aperture */
+	struct list_head mapped_list;
+	spinlock_t mapped_lock;
+};
+
+#define KB(x)	((x) * 1024)
+#define MB(x)	(KB (KB (x)))
+#define GB(x)	(MB (KB (x)))
+
+#define A_SIZE_8(x)	((struct aper_size_info_8 *) x)
+#define A_SIZE_16(x)	((struct aper_size_info_16 *) x)
+#define A_SIZE_32(x)	((struct aper_size_info_32 *) x)
+#define A_SIZE_LVL2(x)	((struct aper_size_info_lvl2 *) x)
+#define A_SIZE_FIX(x)	((struct aper_size_info_fixed *) x)
+#define A_IDX8(bridge)	(A_SIZE_8((bridge)->driver->aperture_sizes) + i)
+#define A_IDX16(bridge)	(A_SIZE_16((bridge)->driver->aperture_sizes) + i)
+#define A_IDX32(bridge)	(A_SIZE_32((bridge)->driver->aperture_sizes) + i)
+#define MAXKEY		(4096 * 32)
+
+#define PGE_EMPTY(b, p)	(!(p) || (p) == (unsigned long) (b)->scratch_page)
+
+
+struct agp_device_ids {
+	unsigned short device_id; /* first, to make table easier to read */
+	enum chipset_type chipset;
+	const char *chipset_name;
+	int (*chipset_setup) (struct pci_dev *pdev);	/* used to override generic */
+};
+
+/* Driver registration */
+struct agp_bridge_data *agp_alloc_bridge(void);
+void agp_put_bridge(struct agp_bridge_data *bridge);
+int agp_add_bridge(struct agp_bridge_data *bridge);
+void agp_remove_bridge(struct agp_bridge_data *bridge);
+
+/* Frontend routines. */
+int agp_frontend_initialize(void);
+void agp_frontend_cleanup(void);
+
+/* Generic routines. */
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
+struct agp_memory *agp_create_memory(int scratch_pages);
+int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
+void agp_generic_free_by_type(struct agp_memory *curr);
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
+			    struct agp_memory *memory, size_t page_count);
+void agp_generic_destroy_page(struct page *page, int flags);
+void agp_generic_destroy_pages(struct agp_memory *memory);
+void agp_free_key(int key);
+int agp_num_entries(void);
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
+void agp_device_command(u32 command, bool agp_v3);
+int agp_3_5_enable(struct agp_bridge_data *bridge);
+void global_cache_flush(void);
+void get_agp_version(struct agp_bridge_data *bridge);
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+				      dma_addr_t phys, int type);
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+				  int type);
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+
+/* generic functions for user-populated AGP memory types */
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
+void agp_alloc_page_array(size_t size, struct agp_memory *mem);
+static inline void agp_free_page_array(struct agp_memory *mem)
+{
+	kvfree(mem->pages);
+}
+
+
+/* generic routines for agp>=3 */
+int agp3_generic_fetch_size(void);
+void agp3_generic_tlbflush(struct agp_memory *mem);
+int agp3_generic_configure(void);
+void agp3_generic_cleanup(void);
+
+/* aperture sizes have been standardised since v3 */
+#define AGP_GENERIC_SIZES_ENTRIES 11
+extern const struct aper_size_info_16 agp3_generic_sizes[];
+
+extern int agp_off;
+extern int agp_try_unsupported_boot;
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* Chipset independent registers (from AGP Spec) */
+#define AGP_APBASE	0x10
+#define AGP_APERTURE_BAR	0
+
+#define AGPSTAT		0x4
+#define AGPCMD		0x8
+#define AGPNISTAT	0xc
+#define AGPCTRL		0x10
+#define AGPAPSIZE	0x14
+#define AGPNEPG		0x16
+#define AGPGARTLO	0x18
+#define AGPGARTHI	0x1c
+#define AGPNICMD	0x20
+
+#define AGP_MAJOR_VERSION_SHIFT	(20)
+#define AGP_MINOR_VERSION_SHIFT	(16)
+
+#define AGPSTAT_RQ_DEPTH	(0xff000000)
+#define AGPSTAT_RQ_DEPTH_SHIFT	24
+
+#define AGPSTAT_CAL_MASK	(1<<12|1<<11|1<<10)
+#define AGPSTAT_ARQSZ		(1<<15|1<<14|1<<13)
+#define AGPSTAT_ARQSZ_SHIFT	13
+
+#define AGPSTAT_SBA		(1<<9)
+#define AGPSTAT_AGP_ENABLE	(1<<8)
+#define AGPSTAT_FW		(1<<4)
+#define AGPSTAT_MODE_3_0	(1<<3)
+
+#define AGPSTAT2_1X		(1<<0)
+#define AGPSTAT2_2X		(1<<1)
+#define AGPSTAT2_4X		(1<<2)
+
+#define AGPSTAT3_RSVD		(1<<2)
+#define AGPSTAT3_8X		(1<<1)
+#define AGPSTAT3_4X		(1)
+
+#define AGPCTRL_APERENB		(1<<8)
+#define AGPCTRL_GTLBEN		(1<<7)
+
+#define AGP2_RESERVED_MASK 0x00fffcc8
+#define AGP3_RESERVED_MASK 0x00ff00c4
+
+#define AGP_ERRATA_FASTWRITES 1<<0
+#define AGP_ERRATA_SBA	 1<<1
+#define AGP_ERRATA_1X 1<<2
+
+#endif	/* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
new file mode 100644
index 0000000..89527ba
--- /dev/null
+++ b/drivers/char/agp/ali-agp.c
@@ -0,0 +1,422 @@
+/*
+ * ALi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/page.h>		/* PAGE_SIZE */
+#include "agp.h"
+
+#define ALI_AGPCTRL	0xb8
+#define ALI_ATTBASE	0xbc
+#define ALI_TLBCTRL	0xc0
+#define ALI_TAGCTRL	0xc4
+#define ALI_CACHE_FLUSH_CTRL	0xD0
+#define ALI_CACHE_FLUSH_ADDR_MASK	0xFFFFF000
+#define ALI_CACHE_FLUSH_EN	0x100
+
+static int ali_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	struct aper_size_info_32 *values;
+
+	pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+	temp &= ~(0xfffffff0);
+	values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void ali_tlbflush(struct agp_memory *mem)
+{
+	u32 temp;
+
+	pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+	temp &= 0xfffffff0;
+	temp |= (1<<0 | 1<<1);
+	pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
+}
+
+static void ali_cleanup(void)
+{
+	struct aper_size_info_32 *previous_size;
+	u32 temp;
+
+	previous_size = A_SIZE_32(agp_bridge->previous_size);
+
+	pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+// clear tag
+	pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
+			((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+	pci_read_config_dword(agp_bridge->dev,  ALI_ATTBASE, &temp);
+	pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
+			((temp & 0x00000ff0) | previous_size->size_value));
+}
+
+static int ali_configure(void)
+{
+	u32 temp;
+	struct aper_size_info_32 *current_size;
+
+	current_size = A_SIZE_32(agp_bridge->current_size);
+
+	/* aperture size and gatt addr */
+	pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+	temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
+			| (current_size->size_value & 0xf));
+	pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
+
+	/* tlb control */
+	pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+#if 0
+	if (agp_bridge->type == ALI_M1541) {
+		u32 nlvm_addr = 0;
+
+		switch (current_size->size_value) {
+			case 0:  break;
+			case 1:  nlvm_addr = 0x100000;break;
+			case 2:  nlvm_addr = 0x200000;break;
+			case 3:  nlvm_addr = 0x400000;break;
+			case 4:  nlvm_addr = 0x800000;break;
+			case 6:  nlvm_addr = 0x1000000;break;
+			case 7:  nlvm_addr = 0x2000000;break;
+			case 8:  nlvm_addr = 0x4000000;break;
+			case 9:  nlvm_addr = 0x8000000;break;
+			case 10: nlvm_addr = 0x10000000;break;
+			default: break;
+		}
+		nlvm_addr--;
+		nlvm_addr&=0xfff00000;
+
+		nlvm_addr+= agp_bridge->gart_bus_addr;
+		nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
+		dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
+			 nlvm_addr);
+	}
+#endif
+
+	pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+	temp &= 0xffffff7f;		//enable TLB
+	pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
+
+	return 0;
+}
+
+
+static void m1541_cache_flush(void)
+{
+	int i, page_count;
+	u32 temp;
+
+	global_cache_flush();
+
+	page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
+	for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+		pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+				&temp);
+		pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+				  (agp_bridge->gatt_bus_addr + i)) |
+				 ALI_CACHE_FLUSH_EN));
+	}
+}
+
+static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
+{
+	struct page *page = agp_generic_alloc_page(agp_bridge);
+	u32 temp;
+
+	if (!page)
+		return NULL;
+
+	pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+			(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+			  page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
+	return page;
+}
+
+static void ali_destroy_page(struct page *page, int flags)
+{
+	if (page) {
+		if (flags & AGP_PAGE_DESTROY_UNMAP) {
+			global_cache_flush();	/* is this really needed?  --hch */
+			agp_generic_destroy_page(page, flags);
+		} else
+			agp_generic_destroy_page(page, flags);
+	}
+}
+
+static void m1541_destroy_page(struct page *page, int flags)
+{
+	u32 temp;
+
+	if (page == NULL)
+		return;
+
+	if (flags & AGP_PAGE_DESTROY_UNMAP) {
+		global_cache_flush();
+
+		pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+		pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+				       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+					 page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
+	}
+	agp_generic_destroy_page(page, flags);
+}
+
+
+/* Setup function */
+
+static const struct aper_size_info_32 ali_generic_sizes[7] =
+{
+	{256, 65536, 6, 10},
+	{128, 32768, 5, 9},
+	{64, 16384, 4, 8},
+	{32, 8192, 3, 7},
+	{16, 4096, 2, 6},
+	{8, 2048, 1, 4},
+	{4, 1024, 0, 3}
+};
+
+static const struct agp_bridge_driver ali_generic_bridge = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= ali_generic_sizes,
+	.size_type		= U32_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= ali_configure,
+	.fetch_size		= ali_fetch_size,
+	.cleanup		= ali_cleanup,
+	.tlb_flush		= ali_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_destroy_page	= ali_destroy_page,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver ali_m1541_bridge = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= ali_generic_sizes,
+	.size_type		= U32_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.configure		= ali_configure,
+	.fetch_size		= ali_fetch_size,
+	.cleanup		= ali_cleanup,
+	.tlb_flush		= ali_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= m1541_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= m1541_alloc_page,
+	.agp_destroy_page	= m1541_destroy_page,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ali_agp_device_ids[] =
+{
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1541,
+		.chipset_name	= "M1541",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1621,
+		.chipset_name	= "M1621",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1631,
+		.chipset_name	= "M1631",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1632,
+		.chipset_name	= "M1632",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1641,
+		.chipset_name	= "M1641",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1644,
+		.chipset_name	= "M1644",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1647,
+		.chipset_name	= "M1647",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1651,
+		.chipset_name	= "M1651",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1671,
+		.chipset_name	= "M1671",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1681,
+		.chipset_name	= "M1681",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AL_M1683,
+		.chipset_name	= "M1683",
+	},
+
+	{ }, /* dummy final entry, always present */
+};
+
+static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct agp_device_ids *devs = ali_agp_device_ids;
+	struct agp_bridge_data *bridge;
+	u8 hidden_1621_id, cap_ptr;
+	int j;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	/* probe for known chipsets */
+	for (j = 0; devs[j].chipset_name; j++) {
+		if (pdev->device == devs[j].device_id)
+			goto found;
+	}
+
+	dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+
+
+found:
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_AL_M1541:
+		bridge->driver = &ali_m1541_bridge;
+		break;
+	case PCI_DEVICE_ID_AL_M1621:
+		pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+		switch (hidden_1621_id) {
+		case 0x31:
+			devs[j].chipset_name = "M1631";
+			break;
+		case 0x32:
+			devs[j].chipset_name = "M1632";
+			break;
+		case 0x41:
+			devs[j].chipset_name = "M1641";
+			break;
+		case 0x43:
+			devs[j].chipset_name = "M1621";
+			break;
+		case 0x47:
+			devs[j].chipset_name = "M1647";
+			break;
+		case 0x51:
+			devs[j].chipset_name = "M1651";
+			break;
+		default:
+			break;
+		}
+		/*FALLTHROUGH*/
+	default:
+		bridge->driver = &ali_generic_bridge;
+	}
+
+	dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev,
+			bridge->capndx+PCI_AGP_STATUS,
+			&bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_ali_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_ali_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AL,
+	.device		= PCI_ANY_ID,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
+
+static struct pci_driver agp_ali_pci_driver = {
+	.name		= "agpgart-ali",
+	.id_table	= agp_ali_pci_table,
+	.probe		= agp_ali_probe,
+	.remove		= agp_ali_remove,
+};
+
+static int __init agp_ali_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_ali_pci_driver);
+}
+
+static void __exit agp_ali_cleanup(void)
+{
+	pci_unregister_driver(&agp_ali_pci_driver);
+}
+
+module_init(agp_ali_init);
+module_exit(agp_ali_cleanup);
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
new file mode 100644
index 0000000..c9bf2c2
--- /dev/null
+++ b/drivers/char/agp/alpha-agp.c
@@ -0,0 +1,220 @@
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/machvec.h>
+#include <asm/agp_backend.h>
+#include "../../../arch/alpha/kernel/pci_impl.h"
+
+#include "agp.h"
+
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+	dma_addr_t dma_addr;
+	unsigned long pa;
+	struct page *page;
+
+	dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
+	pa = agp->ops->translate(agp, dma_addr);
+
+	if (pa == (unsigned long)-EINVAL)
+		return VM_FAULT_SIGBUS;	/* no translation */
+
+	/*
+	 * Get the page, inc the use count, and return it
+	 */
+	page = virt_to_page(__va(pa));
+	get_page(page);
+	vmf->page = page;
+	return 0;
+}
+
+static struct aper_size_info_fixed alpha_core_agp_sizes[] =
+{
+	{ 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+};
+
+static const struct vm_operations_struct alpha_core_agp_vm_ops = {
+	.fault = alpha_core_agp_vm_fault,
+};
+
+
+static int alpha_core_agp_fetch_size(void)
+{
+	return alpha_core_agp_sizes[0].size;
+}
+
+static int alpha_core_agp_configure(void)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+	agp_bridge->gart_bus_addr = agp->aperture.bus_base;
+	return 0;
+}
+
+static void alpha_core_agp_cleanup(void)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+
+	agp->ops->cleanup(agp);
+}
+
+static void alpha_core_agp_tlbflush(struct agp_memory *mem)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+	alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
+}
+
+static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	alpha_agp_info *agp = bridge->dev_private_data;
+
+	agp->mode.lw = agp_collect_device_status(bridge, mode,
+					agp->capability.lw);
+
+	agp->mode.bits.enable = 1;
+	agp->ops->configure(agp);
+
+	agp_device_command(agp->mode.lw, false);
+}
+
+static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
+					int type)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+	int num_entries, status;
+	void *temp;
+
+	if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+		return -EINVAL;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	status = agp->ops->bind(agp, pg_start, mem);
+	mb();
+	alpha_core_agp_tlbflush(mem);
+
+	return status;
+}
+
+static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
+					int type)
+{
+	alpha_agp_info *agp = agp_bridge->dev_private_data;
+	int status;
+
+	status = agp->ops->unbind(agp, pg_start, mem);
+	alpha_core_agp_tlbflush(mem);
+	return status;
+}
+
+static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a)
+{
+	return 0;
+}
+
+struct agp_bridge_driver alpha_core_agp_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= alpha_core_agp_sizes,
+	.num_aperture_sizes	= 1,
+	.size_type		= FIXED_APER_SIZE,
+	.cant_use_aperture	= true,
+	.masks			= NULL,
+
+	.fetch_size		= alpha_core_agp_fetch_size,
+	.configure		= alpha_core_agp_configure,
+	.agp_enable		= alpha_core_agp_enable,
+	.cleanup		= alpha_core_agp_cleanup,
+	.tlb_flush		= alpha_core_agp_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= alpha_core_agp_create_free_gatt_table,
+	.free_gatt_table	= alpha_core_agp_create_free_gatt_table,
+	.insert_memory		= alpha_core_agp_insert_memory,
+	.remove_memory		= alpha_core_agp_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+struct agp_bridge_data *alpha_bridge;
+
+int __init
+alpha_core_agp_setup(void)
+{
+	alpha_agp_info *agp = alpha_mv.agp_info();
+	struct pci_dev *pdev;	/* faked */
+	struct aper_size_info_fixed *aper_size;
+
+	if (!agp)
+		return -ENODEV;
+	if (agp->ops->setup(agp))
+		return -ENODEV;
+
+	/*
+	 * Build the aperture size descriptor
+	 */
+	aper_size = alpha_core_agp_sizes;
+	aper_size->size = agp->aperture.size / (1024 * 1024);
+	aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
+	aper_size->page_order = __ffs(aper_size->num_entries / 1024);
+
+	/*
+	 * Build a fake pci_dev struct
+	 */
+	pdev = pci_alloc_dev(NULL);
+	if (!pdev)
+		return -ENOMEM;
+	pdev->vendor = 0xffff;
+	pdev->device = 0xffff;
+	pdev->sysdata = agp->hose;
+
+	alpha_bridge = agp_alloc_bridge();
+	if (!alpha_bridge)
+		goto fail;
+
+	alpha_bridge->driver = &alpha_core_agp_driver;
+	alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
+	alpha_bridge->current_size = aper_size; /* only 1 size */
+	alpha_bridge->dev_private_data = agp;
+	alpha_bridge->dev = pdev;
+	alpha_bridge->mode = agp->capability.lw;
+
+	printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
+	return agp_add_bridge(alpha_bridge);
+
+ fail:
+	kfree(pdev);
+	return -ENOMEM;
+}
+
+static int __init agp_alpha_core_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	if (alpha_mv.agp_info)
+		return alpha_core_agp_setup();
+	return -ENODEV;
+}
+
+static void __exit agp_alpha_core_cleanup(void)
+{
+	agp_remove_bridge(alpha_bridge);
+	agp_put_bridge(alpha_bridge);
+}
+
+module_init(agp_alpha_core_init);
+module_exit(agp_alpha_core_cleanup);
+
+MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
new file mode 100644
index 0000000..6914e4f
--- /dev/null
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -0,0 +1,568 @@
+/*
+ * AMD K7 AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define AMD_MMBASE_BAR	1
+#define AMD_APSIZE	0xac
+#define AMD_MODECNTL	0xb0
+#define AMD_MODECNTL2	0xb2
+#define AMD_GARTENABLE	0x02	/* In mmio region (16-bit register) */
+#define AMD_ATTBASE	0x04	/* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH	0x0c	/* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY	0x10	/* In mmio region (32-bit register) */
+
+static const struct pci_device_id agp_amdk7_pci_table[];
+
+struct amd_page_map {
+	unsigned long *real;
+	unsigned long __iomem *remapped;
+};
+
+static struct _amd_irongate_private {
+	volatile u8 __iomem *registers;
+	struct amd_page_map **gatt_pages;
+	int num_tables;
+} amd_irongate_private;
+
+static int amd_create_page_map(struct amd_page_map *page_map)
+{
+	int i;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL)
+		return -ENOMEM;
+
+	set_memory_uc((unsigned long)page_map->real, 1);
+	page_map->remapped = page_map->real;
+
+	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+		writel(agp_bridge->scratch_page, page_map->remapped+i);
+		readl(page_map->remapped+i);	/* PCI Posting. */
+	}
+
+	return 0;
+}
+
+static void amd_free_page_map(struct amd_page_map *page_map)
+{
+	set_memory_wb((unsigned long)page_map->real, 1);
+	free_page((unsigned long) page_map->real);
+}
+
+static void amd_free_gatt_pages(void)
+{
+	int i;
+	struct amd_page_map **tables;
+	struct amd_page_map *entry;
+
+	tables = amd_irongate_private.gatt_pages;
+	for (i = 0; i < amd_irongate_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL)
+				amd_free_page_map(entry);
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+	amd_irongate_private.gatt_pages = NULL;
+}
+
+static int amd_create_gatt_pages(int nr_tables)
+{
+	struct amd_page_map **tables;
+	struct amd_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kcalloc(nr_tables + 1, sizeof(struct amd_page_map *),
+			 GFP_KERNEL);
+	if (tables == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_tables; i++) {
+		entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+		tables[i] = entry;
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		retval = amd_create_page_map(entry);
+		if (retval != 0)
+			break;
+	}
+	amd_irongate_private.num_tables = i;
+	amd_irongate_private.gatt_pages = tables;
+
+	if (retval != 0)
+		amd_free_gatt_pages();
+
+	return retval;
+}
+
+/* Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int amd_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct aper_size_info_lvl2 *value;
+	struct amd_page_map page_dir;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+	int retval;
+	int i;
+
+	value = A_SIZE_LVL2(agp_bridge->current_size);
+	retval = amd_create_page_map(&page_dir);
+	if (retval != 0)
+		return retval;
+
+	retval = amd_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		amd_free_page_map(&page_dir);
+		return retval;
+	}
+
+	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+	/* Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+
+	addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+	agp_bridge->gart_bus_addr = addr;
+
+	/* Calculate the agp offset */
+	for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+		writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
+			page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+		readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));	/* PCI Posting. */
+	}
+
+	for (i = 0; i < value->num_entries; i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+		readl(cur_gatt+GET_GATT_OFF(addr));	/* PCI Posting. */
+	}
+
+	return 0;
+}
+
+static int amd_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct amd_page_map page_dir;
+
+	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+	amd_free_gatt_pages();
+	amd_free_page_map(&page_dir);
+	return 0;
+}
+
+static int amd_irongate_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	struct aper_size_info_lvl2 *values;
+
+	pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+	temp = (temp & 0x0000000e);
+	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+	struct aper_size_info_lvl2 *current_size;
+	phys_addr_t reg;
+	u32 temp;
+	u16 enable_reg;
+
+	current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+	if (!amd_irongate_private.registers) {
+		/* Get the memory mapped registers */
+		reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR);
+		amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
+		if (!amd_irongate_private.registers)
+			return -ENOMEM;
+	}
+
+	/* Write out the address of the gatt table */
+	writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
+	readl(amd_irongate_private.registers+AMD_ATTBASE);	/* PCI Posting. */
+
+	/* Write the Sync register */
+	pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
+
+	/* Set indexing mode */
+	pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
+
+	/* Write the enable register */
+	enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+	enable_reg = (enable_reg | 0x0004);
+	writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+	readw(amd_irongate_private.registers+AMD_GARTENABLE);	/* PCI Posting. */
+
+	/* Write out the size register */
+	pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+	temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
+	pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+
+	/* Flush the tlb */
+	writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+	readl(amd_irongate_private.registers+AMD_TLBFLUSH);	/* PCI Posting.*/
+	return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+	struct aper_size_info_lvl2 *previous_size;
+	u32 temp;
+	u16 enable_reg;
+
+	previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+	enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+	enable_reg = (enable_reg & ~(0x0004));
+	writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+	readw(amd_irongate_private.registers+AMD_GARTENABLE);	/* PCI Posting. */
+
+	/* Write back the previous size and disable gart translation */
+	pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+	temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+	pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+	iounmap((void __iomem *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually.  However
+ * currently it just flushes the whole table.  Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(struct agp_memory *temp)
+{
+	writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+	readl(amd_irongate_private.registers+AMD_TLBFLUSH);	/* PCI Posting. */
+}
+
+static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+	if (type != mem->type ||
+	    agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+		return -EINVAL;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_generic_mask_memory(agp_bridge,
+					       page_to_phys(mem->pages[i]),
+					       mem->type),
+		       cur_gatt+GET_GATT_OFF(addr));
+		readl(cur_gatt+GET_GATT_OFF(addr));	/* PCI Posting. */
+	}
+	amd_irongate_tlbflush(mem);
+	return 0;
+}
+
+static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+
+	if (type != mem->type ||
+	    agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+		return -EINVAL;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+		readl(cur_gatt+GET_GATT_OFF(addr));	/* PCI Posting. */
+	}
+
+	amd_irongate_tlbflush(mem);
+	return 0;
+}
+
+static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
+{
+	{2048, 524288, 0x0000000c},
+	{1024, 262144, 0x0000000a},
+	{512, 131072, 0x00000008},
+	{256, 65536, 0x00000006},
+	{128, 32768, 0x00000004},
+	{64, 16384, 0x00000002},
+	{32, 8192, 0x00000000}
+};
+
+static const struct gatt_mask amd_irongate_masks[] =
+{
+	{.mask = 1, .type = 0}
+};
+
+static const struct agp_bridge_driver amd_irongate_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= amd_irongate_sizes,
+	.size_type		= LVL2_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= amd_irongate_configure,
+	.fetch_size		= amd_irongate_fetch_size,
+	.cleanup		= amd_irongate_cleanup,
+	.tlb_flush		= amd_irongate_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= amd_irongate_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= amd_create_gatt_table,
+	.free_gatt_table	= amd_free_gatt_table,
+	.insert_memory		= amd_insert_memory,
+	.remove_memory		= amd_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids amd_agp_device_ids[] =
+{
+	{
+		.device_id	= PCI_DEVICE_ID_AMD_FE_GATE_7006,
+		.chipset_name	= "Irongate",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AMD_FE_GATE_700E,
+		.chipset_name	= "761",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_AMD_FE_GATE_700C,
+		.chipset_name	= "760MP",
+	},
+	{ }, /* dummy final entry, always present */
+};
+
+static int agp_amdk7_probe(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+	int j;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	j = ent - agp_amdk7_pci_table;
+	dev_info(&pdev->dev, "AMD %s chipset\n",
+		 amd_agp_device_ids[j].chipset_name);
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &amd_irongate_driver;
+	bridge->dev_private_data = &amd_irongate_private,
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	/* 751 Errata (22564_B-1.PDF)
+	   erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
+	   system controller may experience noise due to strong drive strengths
+	 */
+	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
+		struct pci_dev *gfxcard=NULL;
+
+		cap_ptr = 0;
+		while (!cap_ptr) {
+			gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
+			if (!gfxcard) {
+				dev_info(&pdev->dev, "no AGP VGA controller\n");
+				return -ENODEV;
+			}
+			cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
+		}
+
+		/* With so many variants of NVidia cards, it's simpler just
+		   to blacklist them all, and then whitelist them as needed
+		   (if necessary at all). */
+		if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
+			agp_bridge->flags |= AGP_ERRATA_1X;
+			dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
+		}
+		pci_dev_put(gfxcard);
+	}
+
+	/* 761 Errata (23613_F.pdf)
+	 * Revisions B0/B1 were a disaster.
+	 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
+	 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
+	 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
+	 * With this lot disabled, we should prevent lockups. */
+	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
+		if (pdev->revision == 0x10 || pdev->revision == 0x11) {
+			agp_bridge->flags = AGP_ERRATA_FASTWRITES;
+			agp_bridge->flags |= AGP_ERRATA_SBA;
+			agp_bridge->flags |= AGP_ERRATA_1X;
+			dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
+		}
+	}
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev,
+			bridge->capndx+PCI_AGP_STATUS,
+			&bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_amdk7_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int agp_amdk7_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return amd_irongate_driver.configure();
+}
+
+#endif /* CONFIG_PM */
+
+/* must be the same order as name table above */
+static const struct pci_device_id agp_amdk7_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AMD,
+	.device		= PCI_DEVICE_ID_AMD_FE_GATE_7006,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AMD,
+	.device		= PCI_DEVICE_ID_AMD_FE_GATE_700E,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AMD,
+	.device		= PCI_DEVICE_ID_AMD_FE_GATE_700C,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
+
+static struct pci_driver agp_amdk7_pci_driver = {
+	.name		= "agpgart-amdk7",
+	.id_table	= agp_amdk7_pci_table,
+	.probe		= agp_amdk7_probe,
+	.remove		= agp_amdk7_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_amdk7_suspend,
+	.resume		= agp_amdk7_resume,
+#endif
+};
+
+static int __init agp_amdk7_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_amdk7_pci_driver);
+}
+
+static void __exit agp_amdk7_cleanup(void)
+{
+	pci_unregister_driver(&agp_amdk7_pci_driver);
+}
+
+module_init(agp_amdk7_init);
+module_exit(agp_amdk7_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
new file mode 100644
index 0000000..c69e39f
--- /dev/null
+++ b/drivers/char/agp/amd64-agp.c
@@ -0,0 +1,818 @@
+/*
+ * Copyright 2001-2003 SuSE Labs.
+ * Distributed under the GNU public license, v2.
+ *
+ * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
+ * It also includes support for the AMD 8151 AGP bridge,
+ * although it doesn't actually do much, as all the real
+ * work is done in the northbridge(s).
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mmzone.h>
+#include <asm/page.h>		/* PAGE_SIZE */
+#include <asm/e820/api.h>
+#include <asm/amd_nb.h>
+#include <asm/gart.h>
+#include "agp.h"
+
+/* NVIDIA K8 registers */
+#define NVIDIA_X86_64_0_APBASE		0x10
+#define NVIDIA_X86_64_1_APBASE1		0x50
+#define NVIDIA_X86_64_1_APLIMIT1	0x54
+#define NVIDIA_X86_64_1_APSIZE		0xa8
+#define NVIDIA_X86_64_1_APBASE2		0xd8
+#define NVIDIA_X86_64_1_APLIMIT2	0xdc
+
+/* ULi K8 registers */
+#define ULI_X86_64_BASE_ADDR		0x10
+#define ULI_X86_64_HTT_FEA_REG		0x50
+#define ULI_X86_64_ENU_SCR_REG		0x54
+
+static struct resource *aperture_resource;
+static bool __initdata agp_try_unsupported = 1;
+static int agp_bridges_found;
+
+static void amd64_tlbflush(struct agp_memory *temp)
+{
+	amd_flush_garts();
+}
+
+static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	long long tmp;
+	int mask_type;
+	struct agp_bridge_data *bridge = mem->bridge;
+	u32 pte;
+
+	num_entries = agp_num_entries();
+
+	if (type != mem->type)
+		return -EINVAL;
+	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+	if (mask_type != 0)
+		return -EINVAL;
+
+
+	/* Make sure we can fit the range in the gatt table. */
+	/* FIXME: could wrap */
+	if (((unsigned long)pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+
+	/* gatt table should be empty. */
+	while (j < (pg_start + mem->page_count)) {
+		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		tmp = agp_bridge->driver->mask_memory(agp_bridge,
+						      page_to_phys(mem->pages[i]),
+						      mask_type);
+
+		BUG_ON(tmp & 0xffffff0000000ffcULL);
+		pte = (tmp & 0x000000ff00000000ULL) >> 28;
+		pte |=(tmp & 0x00000000fffff000ULL);
+		pte |= GPTE_VALID | GPTE_COHERENT;
+
+		writel(pte, agp_bridge->gatt_table+j);
+		readl(agp_bridge->gatt_table+j);	/* PCI Posting. */
+	}
+	amd64_tlbflush(mem);
+	return 0;
+}
+
+/*
+ * This hack alters the order element according
+ * to the size of a long. It sucks. I totally disown this, even
+ * though it does appear to work for the most part.
+ */
+static struct aper_size_info_32 amd64_aperture_sizes[7] =
+{
+	{32,   8192,   3+(sizeof(long)/8), 0 },
+	{64,   16384,  4+(sizeof(long)/8), 1<<1 },
+	{128,  32768,  5+(sizeof(long)/8), 1<<2 },
+	{256,  65536,  6+(sizeof(long)/8), 1<<1 | 1<<2 },
+	{512,  131072, 7+(sizeof(long)/8), 1<<3 },
+	{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
+	{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
+};
+
+
+/*
+ * Get the current Aperture size from the x86-64.
+ * Note, that there may be multiple x86-64's, but we just return
+ * the value from the first one we find. The set_size functions
+ * keep the rest coherent anyway. Or at least should do.
+ */
+static int amd64_fetch_size(void)
+{
+	struct pci_dev *dev;
+	int i;
+	u32 temp;
+	struct aper_size_info_32 *values;
+
+	dev = node_to_amd_nb(0)->misc;
+	if (dev==NULL)
+		return 0;
+
+	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
+	temp = (temp & 0xe);
+	values = A_SIZE_32(amd64_aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	return 0;
+}
+
+/*
+ * In a multiprocessor x86-64 system, this function gets
+ * called once for each CPU.
+ */
+static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
+{
+	u64 aperturebase;
+	u32 tmp;
+	u64 aper_base;
+
+	/* Address to map to */
+	pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
+	aperturebase = (u64)tmp << 25;
+	aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
+
+	enable_gart_translation(hammer, gatt_table);
+
+	return aper_base;
+}
+
+
+static const struct aper_size_info_32 amd_8151_sizes[7] =
+{
+	{2048, 524288, 9, 0x00000000 },	/* 0 0 0 0 0 0 */
+	{1024, 262144, 8, 0x00000400 },	/* 1 0 0 0 0 0 */
+	{512,  131072, 7, 0x00000600 },	/* 1 1 0 0 0 0 */
+	{256,  65536,  6, 0x00000700 },	/* 1 1 1 0 0 0 */
+	{128,  32768,  5, 0x00000720 },	/* 1 1 1 1 0 0 */
+	{64,   16384,  4, 0x00000730 },	/* 1 1 1 1 1 0 */
+	{32,   8192,   3, 0x00000738 }	/* 1 1 1 1 1 1 */
+};
+
+static int amd_8151_configure(void)
+{
+	unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
+	int i;
+
+	if (!amd_nb_has_feature(AMD_NB_GART))
+		return 0;
+
+	/* Configure AGP regs in each x86-64 host bridge. */
+	for (i = 0; i < amd_nb_num(); i++) {
+		agp_bridge->gart_bus_addr =
+			amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
+	}
+	amd_flush_garts();
+	return 0;
+}
+
+
+static void amd64_cleanup(void)
+{
+	u32 tmp;
+	int i;
+
+	if (!amd_nb_has_feature(AMD_NB_GART))
+		return;
+
+	for (i = 0; i < amd_nb_num(); i++) {
+		struct pci_dev *dev = node_to_amd_nb(i)->misc;
+		/* disable gart translation */
+		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
+		tmp &= ~GARTEN;
+		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
+	}
+}
+
+
+static const struct agp_bridge_driver amd_8151_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= amd_8151_sizes,
+	.size_type		= U32_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= amd_8151_configure,
+	.fetch_size		= amd64_fetch_size,
+	.cleanup		= amd64_cleanup,
+	.tlb_flush		= amd64_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= amd64_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+/* Some basic sanity checks for the aperture. */
+static int agp_aperture_valid(u64 aper, u32 size)
+{
+	if (!aperture_valid(aper, size, 32*1024*1024))
+		return 0;
+
+	/* Request the Aperture. This catches cases when someone else
+	   already put a mapping in there - happens with some very broken BIOS
+
+	   Maybe better to use pci_assign_resource/pci_enable_device instead
+	   trusting the bridges? */
+	if (!aperture_resource &&
+	    !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
+		printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * W*s centric BIOS sometimes only set up the aperture in the AGP
+ * bridge, not the northbridge. On AMD64 this is handled early
+ * in aperture.c, but when IOMMU is not enabled or we run
+ * on a 32bit kernel this needs to be redone.
+ * Unfortunately it is impossible to fix the aperture here because it's too late
+ * to allocate that much memory. But at least error out cleanly instead of
+ * crashing.
+ */
+static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
+{
+	u64 aper, nb_aper;
+	int order = 0;
+	u32 nb_order, nb_base;
+	u16 apsize;
+
+	pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
+	nb_order = (nb_order >> 1) & 7;
+	pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
+	nb_aper = (u64)nb_base << 25;
+
+	/* Northbridge seems to contain crap. Try the AGP bridge. */
+
+	pci_read_config_word(agp, cap+0x14, &apsize);
+	if (apsize == 0xffff) {
+		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+			return 0;
+		return -1;
+	}
+
+	apsize &= 0xfff;
+	/* Some BIOS use weird encodings not in the AGPv3 table. */
+	if (apsize & 0xff)
+		apsize |= 0xf00;
+	order = 7 - hweight16(apsize);
+
+	aper = pci_bus_address(agp, AGP_APERTURE_BAR);
+
+	/*
+	 * On some sick chips APSIZE is 0. This means it wants 4G
+	 * so let double check that order, and lets trust the AMD NB settings
+	 */
+	if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
+		dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
+			 32 << order);
+		order = nb_order;
+	}
+
+	if (nb_order >= order) {
+		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+			return 0;
+	}
+
+	dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
+		 aper, 32 << order);
+	if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
+		return -1;
+
+	gart_set_size_and_enable(nb, order);
+	pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
+
+	return 0;
+}
+
+static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
+{
+	int i;
+
+	if (amd_cache_northbridges() < 0)
+		return -ENODEV;
+
+	if (!amd_nb_has_feature(AMD_NB_GART))
+		return -ENODEV;
+
+	i = 0;
+	for (i = 0; i < amd_nb_num(); i++) {
+		struct pci_dev *dev = node_to_amd_nb(i)->misc;
+		if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
+			dev_err(&dev->dev, "no usable aperture found\n");
+#ifdef __x86_64__
+			/* should port this to i386 */
+			dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
+#endif
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/* Handle AMD 8151 quirks */
+static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+{
+	char *revstring;
+
+	switch (pdev->revision) {
+	case 0x01: revstring="A0"; break;
+	case 0x02: revstring="A1"; break;
+	case 0x11: revstring="B0"; break;
+	case 0x12: revstring="B1"; break;
+	case 0x13: revstring="B2"; break;
+	case 0x14: revstring="B3"; break;
+	default:   revstring="??"; break;
+	}
+
+	dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
+
+	/*
+	 * Work around errata.
+	 * Chips before B2 stepping incorrectly reporting v3.5
+	 */
+	if (pdev->revision < 0x13) {
+		dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
+		bridge->major_version = 3;
+		bridge->minor_version = 0;
+	}
+}
+
+
+static const struct aper_size_info_32 uli_sizes[7] =
+{
+	{256, 65536, 6, 10},
+	{128, 32768, 5, 9},
+	{64, 16384, 4, 8},
+	{32, 8192, 3, 7},
+	{16, 4096, 2, 6},
+	{8, 2048, 1, 4},
+	{4, 1024, 0, 3}
+};
+static int uli_agp_init(struct pci_dev *pdev)
+{
+	u32 httfea,baseaddr,enuscr;
+	struct pci_dev *dev1;
+	int i, ret;
+	unsigned size = amd64_fetch_size();
+
+	dev_info(&pdev->dev, "setting up ULi AGP\n");
+	dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
+	if (dev1 == NULL) {
+		dev_info(&pdev->dev, "can't find ULi secondary device\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
+		if (uli_sizes[i].size == size)
+			break;
+
+	if (i == ARRAY_SIZE(uli_sizes)) {
+		dev_info(&pdev->dev, "no ULi size found for %d\n", size);
+		ret = -ENODEV;
+		goto put;
+	}
+
+	/* shadow x86-64 registers into ULi registers */
+	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+			       &httfea);
+
+	/* if x86-64 aperture base is beyond 4G, exit here */
+	if ((httfea & 0x7fff) >> (32 - 25)) {
+		ret = -ENODEV;
+		goto put;
+	}
+
+	httfea = (httfea& 0x7fff) << 25;
+
+	pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
+	baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
+	baseaddr|= httfea;
+	pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
+
+	enuscr= httfea+ (size * 1024 * 1024) - 1;
+	pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
+	pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
+	ret = 0;
+put:
+	pci_dev_put(dev1);
+	return ret;
+}
+
+
+static const struct aper_size_info_32 nforce3_sizes[5] =
+{
+	{512,  131072, 7, 0x00000000 },
+	{256,  65536,  6, 0x00000008 },
+	{128,  32768,  5, 0x0000000C },
+	{64,   16384,  4, 0x0000000E },
+	{32,   8192,   3, 0x0000000F }
+};
+
+/* Handle shadow device of the Nvidia NForce3 */
+/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
+static int nforce3_agp_init(struct pci_dev *pdev)
+{
+	u32 tmp, apbase, apbar, aplimit;
+	struct pci_dev *dev1;
+	int i, ret;
+	unsigned size = amd64_fetch_size();
+
+	dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
+
+	dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
+	if (dev1 == NULL) {
+		dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
+		if (nforce3_sizes[i].size == size)
+			break;
+
+	if (i == ARRAY_SIZE(nforce3_sizes)) {
+		dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
+		ret = -ENODEV;
+		goto put;
+	}
+
+	pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
+	tmp &= ~(0xf);
+	tmp |= nforce3_sizes[i].size_value;
+	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
+
+	/* shadow x86-64 registers into NVIDIA registers */
+	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+			       &apbase);
+
+	/* if x86-64 aperture base is beyond 4G, exit here */
+	if ( (apbase & 0x7fff) >> (32 - 25) ) {
+		dev_info(&pdev->dev, "aperture base > 4G\n");
+		ret = -ENODEV;
+		goto put;
+	}
+
+	apbase = (apbase & 0x7fff) << 25;
+
+	pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
+	apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
+	apbar |= apbase;
+	pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
+
+	aplimit = apbase + (size * 1024 * 1024) - 1;
+	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
+	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
+	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
+	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+
+	ret = 0;
+put:
+	pci_dev_put(dev1);
+
+	return ret;
+}
+
+static int agp_amd64_probe(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+	int err;
+
+	/* The Highlander principle */
+	if (agp_bridges_found)
+		return -ENODEV;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	/* Could check for AGPv3 here */
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+	    pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
+		amd8151_init(pdev, bridge);
+	} else {
+		dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
+			 pdev->vendor, pdev->device);
+	}
+
+	bridge->driver = &amd_8151_driver;
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+	if (cache_nbs(pdev, cap_ptr) == -1) {
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
+	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+		int ret = nforce3_agp_init(pdev);
+		if (ret) {
+			agp_put_bridge(bridge);
+			return ret;
+		}
+	}
+
+	if (pdev->vendor == PCI_VENDOR_ID_AL) {
+		int ret = uli_agp_init(pdev);
+		if (ret) {
+			agp_put_bridge(bridge);
+			return ret;
+		}
+	}
+
+	pci_set_drvdata(pdev, bridge);
+	err = agp_add_bridge(bridge);
+	if (err < 0)
+		return err;
+
+	agp_bridges_found++;
+	return 0;
+}
+
+static void agp_amd64_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	release_mem_region(virt_to_phys(bridge->gatt_table_real),
+			   amd64_aperture_sizes[bridge->aperture_size_idx].size);
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+
+	agp_bridges_found--;
+}
+
+#ifdef CONFIG_PM
+
+static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int agp_amd64_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
+		nforce3_agp_init(pdev);
+
+	return amd_8151_configure();
+}
+
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id agp_amd64_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AMD,
+	.device		= PCI_DEVICE_ID_AMD_8151_0,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* ULi M1689 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AL,
+	.device		= PCI_DEVICE_ID_AL_M1689,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* VIA K8T800Pro */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_VIA,
+	.device		= PCI_DEVICE_ID_VIA_K8T800PRO_0,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* VIA K8T800 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_VIA,
+	.device		= PCI_DEVICE_ID_VIA_8385_0,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* VIA K8M800 / K8N800 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_VIA,
+	.device		= PCI_DEVICE_ID_VIA_8380_0,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* VIA K8M890 / K8N890 */
+	{
+	.class          = (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask     = ~0,
+	.vendor         = PCI_VENDOR_ID_VIA,
+	.device         = PCI_DEVICE_ID_VIA_VT3336,
+	.subvendor      = PCI_ANY_ID,
+	.subdevice      = PCI_ANY_ID,
+	},
+	/* VIA K8T890 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_VIA,
+	.device		= PCI_DEVICE_ID_VIA_3238_0,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* VIA K8T800/K8M800/K8N800 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_VIA,
+	.device		= PCI_DEVICE_ID_VIA_838X_1,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* NForce3 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_NVIDIA,
+	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_NVIDIA,
+	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3S,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* SIS 755 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_SI,
+	.device		= PCI_DEVICE_ID_SI_755,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* SIS 760 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_SI,
+	.device		= PCI_DEVICE_ID_SI_760,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	/* ALI/ULI M1695 */
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_AL,
+	.device		= 0x1695,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+
+static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
+	{ PCI_DEVICE_CLASS(0, 0) },
+	{ }
+};
+
+static struct pci_driver agp_amd64_pci_driver = {
+	.name		= "agpgart-amd64",
+	.id_table	= agp_amd64_pci_table,
+	.probe		= agp_amd64_probe,
+	.remove		= agp_amd64_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_amd64_suspend,
+	.resume		= agp_amd64_resume,
+#endif
+};
+
+
+/* Not static due to IOMMU code calling it early. */
+int __init agp_amd64_init(void)
+{
+	int err = 0;
+
+	if (agp_off)
+		return -EINVAL;
+
+	err = pci_register_driver(&agp_amd64_pci_driver);
+	if (err < 0)
+		return err;
+
+	if (agp_bridges_found == 0) {
+		if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+			printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+#ifdef MODULE
+			printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
+#else
+			printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
+#endif
+			pci_unregister_driver(&agp_amd64_pci_driver);
+			return -ENODEV;
+		}
+
+		/* First check that we have at least one AMD64 NB */
+		if (!pci_dev_present(amd_nb_misc_ids)) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
+			return -ENODEV;
+		}
+
+		/* Look for any AGP bridge */
+		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+		err = driver_attach(&agp_amd64_pci_driver.driver);
+		if (err == 0 && agp_bridges_found == 0) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
+			err = -ENODEV;
+		}
+	}
+	return err;
+}
+
+static int __init agp_amd64_mod_init(void)
+{
+#ifndef MODULE
+	if (gart_iommu_aperture)
+		return agp_bridges_found ? 0 : -ENODEV;
+#endif
+	return agp_amd64_init();
+}
+
+static void __exit agp_amd64_cleanup(void)
+{
+#ifndef MODULE
+	if (gart_iommu_aperture)
+		return;
+#endif
+	if (aperture_resource)
+		release_resource(aperture_resource);
+	pci_unregister_driver(&agp_amd64_pci_driver);
+}
+
+module_init(agp_amd64_mod_init);
+module_exit(agp_amd64_cleanup);
+
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
+module_param(agp_try_unsupported, bool, 0);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
new file mode 100644
index 0000000..20bf5f7
--- /dev/null
+++ b/drivers/char/agp/ati-agp.c
@@ -0,0 +1,586 @@
+/*
+ * ATi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <asm/agp.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define ATI_GART_MMBASE_BAR	1
+#define ATI_RS100_APSIZE	0xac
+#define ATI_RS100_IG_AGPMODE	0xb0
+#define ATI_RS300_APSIZE	0xf8
+#define ATI_RS300_IG_AGPMODE	0xfc
+#define ATI_GART_FEATURE_ID		0x00
+#define ATI_GART_BASE			0x04
+#define ATI_GART_CACHE_SZBASE		0x08
+#define ATI_GART_CACHE_CNTRL		0x0c
+#define ATI_GART_CACHE_ENTRY_CNTRL	0x10
+
+
+static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
+{
+	{2048, 524288, 0x0000000c},
+	{1024, 262144, 0x0000000a},
+	{512, 131072, 0x00000008},
+	{256, 65536, 0x00000006},
+	{128, 32768, 0x00000004},
+	{64, 16384, 0x00000002},
+	{32, 8192, 0x00000000}
+};
+
+static struct gatt_mask ati_generic_masks[] =
+{
+	{ .mask = 1, .type = 0}
+};
+
+
+struct ati_page_map {
+	unsigned long *real;
+	unsigned long __iomem *remapped;
+};
+
+static struct _ati_generic_private {
+	volatile u8 __iomem *registers;
+	struct ati_page_map **gatt_pages;
+	int num_tables;
+} ati_generic_private;
+
+static int ati_create_page_map(struct ati_page_map *page_map)
+{
+	int i, err = 0;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL)
+		return -ENOMEM;
+
+	set_memory_uc((unsigned long)page_map->real, 1);
+	err = map_page_into_agp(virt_to_page(page_map->real));
+	page_map->remapped = page_map->real;
+
+	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+		writel(agp_bridge->scratch_page, page_map->remapped+i);
+		readl(page_map->remapped+i);	/* PCI Posting. */
+	}
+
+	return 0;
+}
+
+
+static void ati_free_page_map(struct ati_page_map *page_map)
+{
+	unmap_page_from_agp(virt_to_page(page_map->real));
+	set_memory_wb((unsigned long)page_map->real, 1);
+	free_page((unsigned long) page_map->real);
+}
+
+
+static void ati_free_gatt_pages(void)
+{
+	int i;
+	struct ati_page_map **tables;
+	struct ati_page_map *entry;
+
+	tables = ati_generic_private.gatt_pages;
+	for (i = 0; i < ati_generic_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL)
+				ati_free_page_map(entry);
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+}
+
+
+static int ati_create_gatt_pages(int nr_tables)
+{
+	struct ati_page_map **tables;
+	struct ati_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kcalloc(nr_tables + 1, sizeof(struct ati_page_map *),
+			 GFP_KERNEL);
+	if (tables == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_tables; i++) {
+		entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
+		tables[i] = entry;
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		retval = ati_create_page_map(entry);
+		if (retval != 0)
+			break;
+	}
+	ati_generic_private.num_tables = i;
+	ati_generic_private.gatt_pages = tables;
+
+	if (retval != 0)
+		ati_free_gatt_pages();
+
+	return retval;
+}
+
+static int is_r200(void)
+{
+	if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+	    (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+	    (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
+	    (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
+		return 1;
+	return 0;
+}
+
+static int ati_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	struct aper_size_info_lvl2 *values;
+
+	if (is_r200())
+		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+	else
+		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+
+	temp = (temp & 0x0000000e);
+	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void ati_tlbflush(struct agp_memory * mem)
+{
+	writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
+	readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL);	/* PCI Posting. */
+}
+
+static void ati_cleanup(void)
+{
+	struct aper_size_info_lvl2 *previous_size;
+	u32 temp;
+
+	previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+	/* Write back the previous size and disable gart translation */
+	if (is_r200()) {
+		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+		temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+		pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+	} else {
+		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+		temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+		pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+	}
+	iounmap((volatile u8 __iomem *)ati_generic_private.registers);
+}
+
+
+static int ati_configure(void)
+{
+	phys_addr_t reg;
+	u32 temp;
+
+	/* Get the memory mapped registers */
+	reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR);
+	ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
+
+	if (!ati_generic_private.registers)
+		return -ENOMEM;
+
+	if (is_r200())
+		pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
+	else
+		pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
+
+	/* address to map to */
+	/*
+	agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev,
+						   AGP_APERTURE_BAR);
+	printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
+	*/
+	writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
+	readl(ati_generic_private.registers+ATI_GART_FEATURE_ID);	/* PCI Posting.*/
+
+	/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
+	pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp);
+	pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14));
+
+	/* Write out the address of the gatt table */
+	writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
+	readl(ati_generic_private.registers+ATI_GART_BASE);	/* PCI Posting. */
+
+	return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
+{
+	pci_save_state(dev);
+	pci_set_power_state(dev, PCI_D3hot);
+
+	return 0;
+}
+
+static int agp_ati_resume(struct pci_dev *dev)
+{
+	pci_set_power_state(dev, PCI_D0);
+	pci_restore_state(dev);
+
+	return ati_configure();
+}
+#endif
+
+/*
+ *Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef  GET_GATT
+#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int ati_insert_memory(struct agp_memory * mem,
+			     off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+	int mask_type;
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+	if (mask_type != 0 || type != mem->type)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		/*CACHE_FLUSH(); */
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->driver->mask_memory(agp_bridge,	
+						       page_to_phys(mem->pages[i]),
+						       mem->type),
+		       cur_gatt+GET_GATT_OFF(addr));
+	}
+	readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
+			     int type)
+{
+	int i;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+	int mask_type;
+
+	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+	if (mask_type != 0 || type != mem->type)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+	}
+
+	readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static int ati_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct aper_size_info_lvl2 *value;
+	struct ati_page_map page_dir;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+	int retval;
+	u32 temp;
+	int i;
+	struct aper_size_info_lvl2 *current_size;
+
+	value = A_SIZE_LVL2(agp_bridge->current_size);
+	retval = ati_create_page_map(&page_dir);
+	if (retval != 0)
+		return retval;
+
+	retval = ati_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		ati_free_page_map(&page_dir);
+		return retval;
+	}
+
+	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+	agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
+	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+	/* Write out the size register */
+	current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+	if (is_r200()) {
+		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+			| 0x00000001);
+		pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+	} else {
+		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+			| 0x00000001);
+		pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+	}
+
+	/*
+	 * Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+	addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+	agp_bridge->gart_bus_addr = addr;
+
+	/* Calculate the agp offset */
+	for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+		writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
+			page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+		readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));	/* PCI Posting. */
+	}
+
+	for (i = 0; i < value->num_entries; i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+	}
+
+	return 0;
+}
+
+static int ati_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct ati_page_map page_dir;
+
+	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+	ati_free_gatt_pages();
+	ati_free_page_map(&page_dir);
+	return 0;
+}
+
+static const struct agp_bridge_driver ati_generic_bridge = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= ati_generic_sizes,
+	.size_type		= LVL2_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= ati_configure,
+	.fetch_size		= ati_fetch_size,
+	.cleanup		= ati_cleanup,
+	.tlb_flush		= ati_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= ati_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= ati_create_gatt_table,
+	.free_gatt_table	= ati_free_gatt_table,
+	.insert_memory		= ati_insert_memory,
+	.remove_memory		= ati_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ati_agp_device_ids[] =
+{
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS100,
+		.chipset_name	= "IGP320/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS200,
+		.chipset_name	= "IGP330/340/345/350/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS200_B,
+		.chipset_name	= "IGP345M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS250,
+		.chipset_name	= "IGP7000/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS300_100,
+		.chipset_name	= "IGP9100/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS300_133,
+		.chipset_name	= "IGP9100/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS300_166,
+		.chipset_name	= "IGP9100/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS300_200,
+		.chipset_name	= "IGP9100/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS350_133,
+		.chipset_name	= "IGP9000/M",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_ATI_RS350_200,
+		.chipset_name	= "IGP9100/M",
+	},
+	{ }, /* dummy final entry, always present */
+};
+
+static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct agp_device_ids *devs = ati_agp_device_ids;
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+	int j;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	/* probe for known chipsets */
+	for (j = 0; devs[j].chipset_name; j++) {
+		if (pdev->device == devs[j].device_id)
+			goto found;
+	}
+
+	dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+
+found:
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	bridge->driver = &ati_generic_bridge;
+
+	dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev,
+			bridge->capndx+PCI_AGP_STATUS,
+			&bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_ati_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_ati_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_ATI,
+	.device		= PCI_ANY_ID,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
+
+static struct pci_driver agp_ati_pci_driver = {
+	.name		= "agpgart-ati",
+	.id_table	= agp_ati_pci_table,
+	.probe		= agp_ati_probe,
+	.remove		= agp_ati_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_ati_suspend,
+	.resume		= agp_ati_resume,
+#endif
+};
+
+static int __init agp_ati_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_ati_pci_driver);
+}
+
+static void __exit agp_ati_cleanup(void)
+{
+	pci_unregister_driver(&agp_ati_pci_driver);
+}
+
+module_init(agp_ati_init);
+module_exit(agp_ati_cleanup);
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
new file mode 100644
index 0000000..38ffb28
--- /dev/null
+++ b/drivers/char/agp/backend.c
@@ -0,0 +1,366 @@
+/*
+ * AGPGART driver backend routines.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include "agp.h"
+
+/* Due to XFree86 brain-damage, we can't go to 1.0 until they
+ * fix some real stupidity. It's only by chance we can bump
+ * past 0.99 at all due to some boolean logic error. */
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 103
+static const struct agp_version agp_current_version =
+{
+	.major = AGPGART_VERSION_MAJOR,
+	.minor = AGPGART_VERSION_MINOR,
+};
+
+struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
+	&agp_generic_find_bridge;
+
+struct agp_bridge_data *agp_bridge;
+LIST_HEAD(agp_bridges);
+EXPORT_SYMBOL(agp_bridge);
+EXPORT_SYMBOL(agp_bridges);
+EXPORT_SYMBOL(agp_find_bridge);
+
+/**
+ *	agp_backend_acquire  -  attempt to acquire an agp backend.
+ *
+ */
+struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge;
+
+	bridge = agp_find_bridge(pdev);
+
+	if (!bridge)
+		return NULL;
+
+	if (atomic_read(&bridge->agp_in_use))
+		return NULL;
+	atomic_inc(&bridge->agp_in_use);
+	return bridge;
+}
+EXPORT_SYMBOL(agp_backend_acquire);
+
+
+/**
+ *	agp_backend_release  -  release the lock on the agp backend.
+ *
+ *	The caller must insure that the graphics aperture translation table
+ *	is read for use by another entity.
+ *
+ *	(Ensure that all memory it bound is unbound.)
+ */
+void agp_backend_release(struct agp_bridge_data *bridge)
+{
+
+	if (bridge)
+		atomic_dec(&bridge->agp_in_use);
+}
+EXPORT_SYMBOL(agp_backend_release);
+
+
+static const struct { int mem, agp; } maxes_table[] = {
+	{0, 0},
+	{32, 4},
+	{64, 28},
+	{128, 96},
+	{256, 204},
+	{512, 440},
+	{1024, 942},
+	{2048, 1920},
+	{4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+	long memory, index, result;
+
+#if PAGE_SHIFT < 20
+	memory = totalram_pages >> (20 - PAGE_SHIFT);
+#else
+	memory = totalram_pages << (PAGE_SHIFT - 20);
+#endif
+	index = 1;
+
+	while ((memory > maxes_table[index].mem) && (index < 8))
+		index++;
+
+	result = maxes_table[index - 1].agp +
+	   ( (memory - maxes_table[index - 1].mem)  *
+	     (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+	   (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+	result = result << (20 - PAGE_SHIFT);
+	return result;
+}
+
+
+static int agp_backend_initialize(struct agp_bridge_data *bridge)
+{
+	int size_value, rc, got_gatt=0, got_keylist=0;
+
+	bridge->max_memory_agp = agp_find_max();
+	bridge->version = &agp_current_version;
+
+	if (bridge->driver->needs_scratch_page) {
+		struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+		if (!page) {
+			dev_err(&bridge->dev->dev,
+				"can't get memory for scratch page\n");
+			return -ENOMEM;
+		}
+
+		bridge->scratch_page_page = page;
+		bridge->scratch_page_dma = page_to_phys(page);
+
+		bridge->scratch_page = bridge->driver->mask_memory(bridge,
+						   bridge->scratch_page_dma, 0);
+	}
+
+	size_value = bridge->driver->fetch_size();
+	if (size_value == 0) {
+		dev_err(&bridge->dev->dev, "can't determine aperture size\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+	if (bridge->driver->create_gatt_table(bridge)) {
+		dev_err(&bridge->dev->dev,
+			"can't get memory for graphics translation table\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	got_gatt = 1;
+
+	bridge->key_list = vzalloc(PAGE_SIZE * 4);
+	if (bridge->key_list == NULL) {
+		dev_err(&bridge->dev->dev,
+			"can't allocate memory for key lists\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	got_keylist = 1;
+
+	/* FIXME vmalloc'd memory not guaranteed contiguous */
+
+	if (bridge->driver->configure()) {
+		dev_err(&bridge->dev->dev, "error configuring host chipset\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+	INIT_LIST_HEAD(&bridge->mapped_list);
+	spin_lock_init(&bridge->mapped_lock);
+
+	return 0;
+
+err_out:
+	if (bridge->driver->needs_scratch_page) {
+		struct page *page = bridge->scratch_page_page;
+
+		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+	}
+	if (got_gatt)
+		bridge->driver->free_gatt_table(bridge);
+	if (got_keylist) {
+		vfree(bridge->key_list);
+		bridge->key_list = NULL;
+	}
+	return rc;
+}
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+{
+	if (bridge->driver->cleanup)
+		bridge->driver->cleanup();
+	if (bridge->driver->free_gatt_table)
+		bridge->driver->free_gatt_table(bridge);
+
+	vfree(bridge->key_list);
+	bridge->key_list = NULL;
+
+	if (bridge->driver->agp_destroy_page &&
+	    bridge->driver->needs_scratch_page) {
+		struct page *page = bridge->scratch_page_page;
+
+		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+	}
+}
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_alloc_bridge(void)
+{
+	struct agp_bridge_data *bridge;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge)
+		return NULL;
+
+	atomic_set(&bridge->agp_in_use, 0);
+	atomic_set(&bridge->current_memory_agp, 0);
+
+	if (list_empty(&agp_bridges))
+		agp_bridge = bridge;
+
+	return bridge;
+}
+EXPORT_SYMBOL(agp_alloc_bridge);
+
+
+void agp_put_bridge(struct agp_bridge_data *bridge)
+{
+        kfree(bridge);
+
+        if (list_empty(&agp_bridges))
+                agp_bridge = NULL;
+}
+EXPORT_SYMBOL(agp_put_bridge);
+
+
+int agp_add_bridge(struct agp_bridge_data *bridge)
+{
+	int error;
+
+	if (agp_off) {
+		error = -ENODEV;
+		goto err_put_bridge;
+	}
+
+	if (!bridge->dev) {
+		printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
+		error = -EINVAL;
+		goto err_put_bridge;
+	}
+
+	/* Grab reference on the chipset driver. */
+	if (!try_module_get(bridge->driver->owner)) {
+		dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
+		error = -EINVAL;
+		goto err_put_bridge;
+	}
+
+	error = agp_backend_initialize(bridge);
+	if (error) {
+		dev_info(&bridge->dev->dev,
+			 "agp_backend_initialize() failed\n");
+		goto err_out;
+	}
+
+	if (list_empty(&agp_bridges)) {
+		error = agp_frontend_initialize();
+		if (error) {
+			dev_info(&bridge->dev->dev,
+				 "agp_frontend_initialize() failed\n");
+			goto frontend_err;
+		}
+
+		dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
+			 bridge->driver->fetch_size(), bridge->gart_bus_addr);
+
+	}
+
+	list_add(&bridge->list, &agp_bridges);
+	return 0;
+
+frontend_err:
+	agp_backend_cleanup(bridge);
+err_out:
+	module_put(bridge->driver->owner);
+err_put_bridge:
+	agp_put_bridge(bridge);
+	return error;
+}
+EXPORT_SYMBOL_GPL(agp_add_bridge);
+
+
+void agp_remove_bridge(struct agp_bridge_data *bridge)
+{
+	agp_backend_cleanup(bridge);
+	list_del(&bridge->list);
+	if (list_empty(&agp_bridges))
+		agp_frontend_cleanup();
+	module_put(bridge->driver->owner);
+}
+EXPORT_SYMBOL_GPL(agp_remove_bridge);
+
+int agp_off;
+int agp_try_unsupported_boot;
+EXPORT_SYMBOL(agp_off);
+EXPORT_SYMBOL(agp_try_unsupported_boot);
+
+static int __init agp_init(void)
+{
+	if (!agp_off)
+		printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
+			AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+	return 0;
+}
+
+static void __exit agp_exit(void)
+{
+}
+
+#ifndef MODULE
+static __init int agp_setup(char *s)
+{
+	if (!strcmp(s,"off"))
+		agp_off = 1;
+	if (!strcmp(s,"try_unsupported"))
+		agp_try_unsupported_boot = 1;
+	return 1;
+}
+__setup("agp=", agp_setup);
+#endif
+
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
+MODULE_DESCRIPTION("AGP GART driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+
+module_init(agp_init);
+module_exit(agp_exit);
+
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
new file mode 100644
index 0000000..52ffe17
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.c
@@ -0,0 +1,291 @@
+/*
+ * AGPGART driver frontend compatibility ioctls
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "agp.h"
+#include "compat_ioctl.h"
+
+static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_info32 userinfo;
+	struct agp_kern_info kerninfo;
+
+	agp_copy_info(agp_bridge, &kerninfo);
+
+	userinfo.version.major = kerninfo.version.major;
+	userinfo.version.minor = kerninfo.version.minor;
+	userinfo.bridge_id = kerninfo.device->vendor |
+	    (kerninfo.device->device << 16);
+	userinfo.agp_mode = kerninfo.mode;
+	userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
+	userinfo.aper_size = kerninfo.aper_size;
+	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+	userinfo.pg_used = kerninfo.current_memory;
+
+	if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_region32 ureserve;
+	struct agp_region kreserve;
+	struct agp_client *client;
+	struct agp_file_private *client_priv;
+
+	DBG("");
+	if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
+		return -EFAULT;
+
+	if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
+		return -EFAULT;
+
+	kreserve.pid = ureserve.pid;
+	kreserve.seg_count = ureserve.seg_count;
+
+	client = agp_find_client_by_pid(kreserve.pid);
+
+	if (kreserve.seg_count == 0) {
+		/* remove a client */
+		client_priv = agp_find_private(kreserve.pid);
+
+		if (client_priv != NULL) {
+			set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+			set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+		}
+		if (client == NULL) {
+			/* client is already removed */
+			return 0;
+		}
+		return agp_remove_client(kreserve.pid);
+	} else {
+		struct agp_segment32 *usegment;
+		struct agp_segment *ksegment;
+		int seg;
+
+		if (ureserve.seg_count >= 16384)
+			return -EINVAL;
+
+		usegment = kmalloc_array(ureserve.seg_count,
+					 sizeof(*usegment),
+					 GFP_KERNEL);
+		if (!usegment)
+			return -ENOMEM;
+
+		ksegment = kmalloc_array(kreserve.seg_count,
+					 sizeof(*ksegment),
+					 GFP_KERNEL);
+		if (!ksegment) {
+			kfree(usegment);
+			return -ENOMEM;
+		}
+
+		if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
+				   sizeof(*usegment) * ureserve.seg_count)) {
+			kfree(usegment);
+			kfree(ksegment);
+			return -EFAULT;
+		}
+
+		for (seg = 0; seg < ureserve.seg_count; seg++) {
+			ksegment[seg].pg_start = usegment[seg].pg_start;
+			ksegment[seg].pg_count = usegment[seg].pg_count;
+			ksegment[seg].prot = usegment[seg].prot;
+		}
+
+		kfree(usegment);
+		kreserve.seg_list = ksegment;
+
+		if (client == NULL) {
+			/* Create the client and add the segment */
+			client = agp_create_client(kreserve.pid);
+
+			if (client == NULL) {
+				kfree(ksegment);
+				return -ENOMEM;
+			}
+			client_priv = agp_find_private(kreserve.pid);
+
+			if (client_priv != NULL) {
+				set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+				set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+			}
+		}
+		return agp_create_segment(client, &kreserve);
+	}
+	/* Will never really happen */
+	return -EINVAL;
+}
+
+static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_memory *memory;
+	struct agp_allocate32 alloc;
+
+	DBG("");
+	if (copy_from_user(&alloc, arg, sizeof(alloc)))
+		return -EFAULT;
+
+	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+	if (memory == NULL)
+		return -ENOMEM;
+
+	alloc.key = memory->key;
+	alloc.physical = memory->physical;
+
+	if (copy_to_user(arg, &alloc, sizeof(alloc))) {
+		agp_free_memory_wrap(memory);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_bind32 bind_info;
+	struct agp_memory *memory;
+
+	DBG("");
+	if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
+		return -EFAULT;
+
+	memory = agp_find_mem_by_key(bind_info.key);
+
+	if (memory == NULL)
+		return -EINVAL;
+
+	return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_memory *memory;
+	struct agp_unbind32 unbind;
+
+	DBG("");
+	if (copy_from_user(&unbind, arg, sizeof(unbind)))
+		return -EFAULT;
+
+	memory = agp_find_mem_by_key(unbind.key);
+
+	if (memory == NULL)
+		return -EINVAL;
+
+	return agp_unbind_memory(memory);
+}
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct agp_file_private *curr_priv = file->private_data;
+	int ret_val = -ENOTTY;
+
+	mutex_lock(&(agp_fe.agp_mutex));
+
+	if ((agp_fe.current_controller == NULL) &&
+	    (cmd != AGPIOC_ACQUIRE32)) {
+		ret_val = -EINVAL;
+		goto ioctl_out;
+	}
+	if ((agp_fe.backend_acquired != true) &&
+	    (cmd != AGPIOC_ACQUIRE32)) {
+		ret_val = -EBUSY;
+		goto ioctl_out;
+	}
+	if (cmd != AGPIOC_ACQUIRE32) {
+		if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+			ret_val = -EPERM;
+			goto ioctl_out;
+		}
+		/* Use the original pid of the controller,
+		 * in case it's threaded */
+
+		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+			ret_val = -EBUSY;
+			goto ioctl_out;
+		}
+	}
+
+	switch (cmd) {
+	case AGPIOC_INFO32:
+		ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_ACQUIRE32:
+		ret_val = agpioc_acquire_wrap(curr_priv);
+		break;
+
+	case AGPIOC_RELEASE32:
+		ret_val = agpioc_release_wrap(curr_priv);
+		break;
+
+	case AGPIOC_SETUP32:
+		ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_RESERVE32:
+		ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_PROTECT32:
+		ret_val = agpioc_protect_wrap(curr_priv);
+		break;
+
+	case AGPIOC_ALLOCATE32:
+		ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_DEALLOCATE32:
+		ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+		break;
+
+	case AGPIOC_BIND32:
+		ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_UNBIND32:
+		ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_CHIPSET_FLUSH32:
+		break;
+	}
+
+ioctl_out:
+	DBG("ioctl returns %d\n", ret_val);
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return ret_val;
+}
+
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
new file mode 100644
index 0000000..f30e0fd
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_COMPAT_IOCTL_H
+#define _AGP_COMPAT_IOCTL_H
+
+#include <linux/compat.h>
+#include <linux/agpgart.h>
+
+#define AGPIOC_INFO32       _IOR (AGPIOC_BASE, 0, compat_uptr_t)
+#define AGPIOC_ACQUIRE32    _IO  (AGPIOC_BASE, 1)
+#define AGPIOC_RELEASE32    _IO  (AGPIOC_BASE, 2)
+#define AGPIOC_SETUP32      _IOW (AGPIOC_BASE, 3, compat_uptr_t)
+#define AGPIOC_RESERVE32    _IOW (AGPIOC_BASE, 4, compat_uptr_t)
+#define AGPIOC_PROTECT32    _IOW (AGPIOC_BASE, 5, compat_uptr_t)
+#define AGPIOC_ALLOCATE32   _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
+#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
+#define AGPIOC_BIND32       _IOW (AGPIOC_BASE, 8, compat_uptr_t)
+#define AGPIOC_UNBIND32     _IOW (AGPIOC_BASE, 9, compat_uptr_t)
+#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
+
+struct agp_info32 {
+	struct agp_version version;	/* version of the driver        */
+	u32 bridge_id;		/* bridge vendor/device         */
+	u32 agp_mode;		/* mode info of bridge          */
+	compat_long_t aper_base;	/* base of aperture             */
+	compat_size_t aper_size;	/* size of aperture             */
+	compat_size_t pg_total;	/* max pages (swap + system)    */
+	compat_size_t pg_system;	/* max pages (system)           */
+	compat_size_t pg_used;		/* current pages used           */
+};
+
+/*
+ * The "prot" down below needs still a "sleep" flag somehow ...
+ */
+struct agp_segment32 {
+	compat_off_t pg_start;		/* starting page to populate    */
+	compat_size_t pg_count;	/* number of pages              */
+	compat_int_t prot;		/* prot flags for mmap          */
+};
+
+struct agp_region32 {
+	compat_pid_t pid;		/* pid of process               */
+	compat_size_t seg_count;	/* number of segments           */
+	struct agp_segment32 *seg_list;
+};
+
+struct agp_allocate32 {
+	compat_int_t key;		/* tag of allocation            */
+	compat_size_t pg_count;	/* number of pages              */
+	u32 type;		/* 0 == normal, other devspec   */
+	u32 physical;           /* device specific (some devices
+				 * need a phys address of the
+				 * actual page behind the gatt
+				 * table)                        */
+};
+
+struct agp_bind32 {
+	compat_int_t key;		/* tag of allocation            */
+	compat_off_t pg_start;		/* starting page to populate    */
+};
+
+struct agp_unbind32 {
+	compat_int_t key;		/* tag of allocation            */
+	u32 priority;		/* priority for paging out      */
+};
+
+extern struct agp_front_data agp_fe;
+
+int agpioc_acquire_wrap(struct agp_file_private *priv);
+int agpioc_release_wrap(struct agp_file_private *priv);
+int agpioc_protect_wrap(struct agp_file_private *priv);
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
+struct agp_file_private *agp_find_private(pid_t pid);
+struct agp_client *agp_create_client(pid_t id);
+int agp_remove_client(pid_t id);
+int agp_create_segment(struct agp_client *client, struct agp_region *region);
+void agp_free_memory_wrap(struct agp_memory *memory);
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
+struct agp_memory *agp_find_mem_by_key(int key);
+struct agp_client *agp_find_client_by_pid(pid_t id);
+
+#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
new file mode 100644
index 0000000..7f88490
--- /dev/null
+++ b/drivers/char/agp/efficeon-agp.c
@@ -0,0 +1,478 @@
+/*
+ * Transmeta's Efficeon AGPGART driver.
+ *
+ * Based upon a diff by Linus around November '02.
+ *
+ * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
+ * and H. Peter Anvin <hpa@transmeta.com>.
+ */
+
+/*
+ * NOTE-cpg-040217:
+ *
+ *   - when compiled as a module, after loading the module,
+ *     it will refuse to unload, indicating it is in use,
+ *     when it is not.
+ *   - no s3 (suspend to ram) testing.
+ *   - tested on the efficeon integrated nothbridge for tens
+ *     of iterations of starting x and glxgears.
+ *   - tested with radeon 9000 and radeon mobility m9 cards
+ *   - tested with c3/c4 enabled (with the mobility m9 card)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+#include "intel-agp.h"
+
+/*
+ * The real differences to the generic AGP code is
+ * in the GART mappings - a two-level setup with the
+ * first level being an on-chip 64-entry table.
+ *
+ * The page array is filled through the ATTPAGE register
+ * (Aperture Translation Table Page Register) at 0xB8. Bits:
+ *  31:20: physical page address
+ *   11:9: Page Attribute Table Index (PATI)
+ *	   must match the PAT index for the
+ *	   mapped pages (the 2nd level page table pages
+ *	   themselves should be just regular WB-cacheable,
+ *	   so this is normally zero.)
+ *      8: Present
+ *    7:6: reserved, write as zero
+ *    5:0: GATT directory index: which 1st-level entry
+ *
+ * The Efficeon AGP spec requires pages to be WB-cacheable
+ * but to be explicitly CLFLUSH'd after any changes.
+ */
+#define EFFICEON_ATTPAGE	0xb8
+#define EFFICEON_L1_SIZE	64	/* Number of PDE pages */
+
+#define EFFICEON_PATI		(0 << 9)
+#define EFFICEON_PRESENT	(1 << 8)
+
+static struct _efficeon_private {
+	unsigned long l1_table[EFFICEON_L1_SIZE];
+} efficeon_private;
+
+static const struct gatt_mask efficeon_generic_masks[] =
+{
+	{.mask = 0x00000001, .type = 0}
+};
+
+/* This function does the same thing as mask_memory() for this chipset... */
+static inline unsigned long efficeon_mask_memory(struct page *page)
+{
+	unsigned long addr = page_to_phys(page);
+	return addr | 0x00000001;
+}
+
+static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
+{
+	{256, 65536, 0},
+	{128, 32768, 32},
+	{64, 16384, 48},
+	{32, 8192, 56}
+};
+
+/*
+ * Control interfaces are largely identical to
+ * the legacy Intel 440BX..
+ */
+
+static int efficeon_fetch_size(void)
+{
+	int i;
+	u16 temp;
+	struct aper_size_info_lvl2 *values;
+
+	pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void efficeon_tlbflush(struct agp_memory * mem)
+{
+	printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void efficeon_cleanup(void)
+{
+	u16 temp;
+	struct aper_size_info_lvl2 *previous_size;
+
+	printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
+	previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+static int efficeon_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_lvl2 *current_size;
+
+	printk(KERN_DEBUG PFX "efficeon_configure()\n");
+
+	current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+			      current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+	/* paccfg/nbxcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+			      (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
+	/* clear any possible error conditions */
+	pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+	return 0;
+}
+
+static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	int index, freed = 0;
+
+	for (index = 0; index < EFFICEON_L1_SIZE; index++) {
+		unsigned long page = efficeon_private.l1_table[index];
+		if (page) {
+			efficeon_private.l1_table[index] = 0;
+			ClearPageReserved(virt_to_page((char *)page));
+			free_page(page);
+			freed++;
+		}
+		printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
+			agp_bridge->dev, EFFICEON_ATTPAGE, index);
+		pci_write_config_dword(agp_bridge->dev,
+			EFFICEON_ATTPAGE, index);
+	}
+	printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
+	return 0;
+}
+
+
+/*
+ * Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef  GET_GATT
+#define GET_GATT(addr) (efficeon_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	int index;
+	const int pati    = EFFICEON_PATI;
+	const int present = EFFICEON_PRESENT;
+	const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+	int num_entries, l1_pages;
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+	printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
+
+	/* There are 2^10 PTE pages per PDE page */
+	BUG_ON(num_entries & 0x3ff);
+	l1_pages = num_entries >> 10;
+
+	for (index = 0 ; index < l1_pages ; index++) {
+		int offset;
+		unsigned long page;
+		unsigned long value;
+
+		page = efficeon_private.l1_table[index];
+		BUG_ON(page);
+
+		page = get_zeroed_page(GFP_KERNEL);
+		if (!page) {
+			efficeon_free_gatt_table(agp_bridge);
+			return -ENOMEM;
+		}
+		SetPageReserved(virt_to_page((char *)page));
+
+		for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
+			clflush((char *)page+offset);
+
+		efficeon_private.l1_table[index] = page;
+
+		value = virt_to_phys((unsigned long *)page) | pati | present | index;
+
+		pci_write_config_dword(agp_bridge->dev,
+			EFFICEON_ATTPAGE, value);
+	}
+
+	return 0;
+}
+
+static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+	int i, count = mem->page_count, num_entries;
+	unsigned int *page, *last_page;
+	const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+	const unsigned long clflush_mask = ~(clflush_chunk-1);
+
+	printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+	if (type != 0 || mem->type != 0)
+		return -EINVAL;
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	last_page = NULL;
+	for (i = 0; i < count; i++) {
+		int index = pg_start + i;
+		unsigned long insert = efficeon_mask_memory(mem->pages[i]);
+
+		page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+		if (!page)
+			continue;
+
+		page += (index & 0x3ff);
+		*page = insert;
+
+		/* clflush is slow, so don't clflush until we have to */
+		if (last_page &&
+		    (((unsigned long)page^(unsigned long)last_page) &
+		     clflush_mask))
+			clflush(last_page);
+
+		last_page = page;
+	}
+
+	if ( last_page )
+		clflush(last_page);
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+	int i, count = mem->page_count, num_entries;
+
+	printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+	if (type != 0 || mem->type != 0)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		int index = pg_start + i;
+		unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+		if (!page)
+			continue;
+		page += (index & 0x3ff);
+		*page = 0;
+	}
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+
+static const struct agp_bridge_driver efficeon_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= efficeon_generic_sizes,
+	.size_type		= LVL2_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.configure		= efficeon_configure,
+	.fetch_size		= efficeon_fetch_size,
+	.cleanup		= efficeon_cleanup,
+	.tlb_flush		= efficeon_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= efficeon_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+
+	// Efficeon-specific GATT table setup / populate / teardown
+	.create_gatt_table	= efficeon_create_gatt_table,
+	.free_gatt_table	= efficeon_free_gatt_table,
+	.insert_memory		= efficeon_insert_memory,
+	.remove_memory		= efficeon_remove_memory,
+	.cant_use_aperture	= false,	// true might be faster?
+
+	// Generic
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static int agp_efficeon_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+	struct resource *r;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	/* Probe for Efficeon controller */
+	if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
+		printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
+		    pdev->device);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &efficeon_driver;
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	/*
+	* If the device has not been properly setup, the following will catch
+	* the problem and should stop the system from crashing.
+	* 20030610 - hamish@zot.org
+	*/
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
+	/*
+	* The following fixes the case where the BIOS has "forgotten" to
+	* provide an address range for the GART.
+	* 20030610 - hamish@zot.org
+	*/
+	r = &pdev->resource[0];
+	if (!r->start && r->end) {
+		if (pci_assign_resource(pdev, 0)) {
+			printk(KERN_ERR PFX "could not assign resource 0\n");
+			agp_put_bridge(bridge);
+			return -ENODEV;
+		}
+	}
+
+	/* Fill in the mode register */
+	if (cap_ptr) {
+		pci_read_config_dword(pdev,
+				bridge->capndx+PCI_AGP_STATUS,
+				&bridge->mode);
+	}
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_efficeon_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
+{
+	return 0;
+}
+
+static int agp_efficeon_resume(struct pci_dev *pdev)
+{
+	printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
+	return efficeon_configure();
+}
+#endif
+
+static const struct pci_device_id agp_efficeon_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_TRANSMETA,
+	.device		= PCI_ANY_ID,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
+
+static struct pci_driver agp_efficeon_pci_driver = {
+	.name		= "agpgart-efficeon",
+	.id_table	= agp_efficeon_pci_table,
+	.probe		= agp_efficeon_probe,
+	.remove		= agp_efficeon_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_efficeon_suspend,
+	.resume		= agp_efficeon_resume,
+#endif
+};
+
+static int __init agp_efficeon_init(void)
+{
+	static int agp_initialised=0;
+
+	if (agp_off)
+		return -EINVAL;
+
+	if (agp_initialised == 1)
+		return 0;
+	agp_initialised=1;
+
+	return pci_register_driver(&agp_efficeon_pci_driver);
+}
+
+static void __exit agp_efficeon_cleanup(void)
+{
+	pci_unregister_driver(&agp_efficeon_pci_driver);
+}
+
+module_init(agp_efficeon_init);
+module_exit(agp_efficeon_cleanup);
+
+MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
new file mode 100644
index 0000000..f695588
--- /dev/null
+++ b/drivers/char/agp/frontend.c
@@ -0,0 +1,1068 @@
+/*
+ * AGPGART driver frontend
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+struct agp_front_data agp_fe;
+
+struct agp_memory *agp_find_mem_by_key(int key)
+{
+	struct agp_memory *curr;
+
+	if (agp_fe.current_controller == NULL)
+		return NULL;
+
+	curr = agp_fe.current_controller->pool;
+
+	while (curr != NULL) {
+		if (curr->key == key)
+			break;
+		curr = curr->next;
+	}
+
+	DBG("key=%d -> mem=%p", key, curr);
+	return curr;
+}
+
+static void agp_remove_from_pool(struct agp_memory *temp)
+{
+	struct agp_memory *prev;
+	struct agp_memory *next;
+
+	/* Check to see if this is even in the memory pool */
+
+	DBG("mem=%p", temp);
+	if (agp_find_mem_by_key(temp->key) != NULL) {
+		next = temp->next;
+		prev = temp->prev;
+
+		if (prev != NULL) {
+			prev->next = next;
+			if (next != NULL)
+				next->prev = prev;
+
+		} else {
+			/* This is the first item on the list */
+			if (next != NULL)
+				next->prev = NULL;
+
+			agp_fe.current_controller->pool = next;
+		}
+	}
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static struct
+agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
+						unsigned long offset,
+					    int size, pgprot_t page_prot)
+{
+	struct agp_segment_priv *seg;
+	int num_segments, i;
+	off_t pg_start;
+	size_t pg_count;
+
+	pg_start = offset / 4096;
+	pg_count = size / 4096;
+	seg = *(client->segments);
+	num_segments = client->num_segments;
+
+	for (i = 0; i < client->num_segments; i++) {
+		if ((seg[i].pg_start == pg_start) &&
+		    (seg[i].pg_count == pg_count) &&
+		    (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+			return seg + i;
+		}
+	}
+
+	return NULL;
+}
+
+static void agp_remove_seg_from_client(struct agp_client *client)
+{
+	DBG("client=%p", client);
+
+	if (client->segments != NULL) {
+		if (*(client->segments) != NULL) {
+			DBG("Freeing %p from client %p", *(client->segments), client);
+			kfree(*(client->segments));
+		}
+		DBG("Freeing %p from client %p", client->segments, client);
+		kfree(client->segments);
+		client->segments = NULL;
+	}
+}
+
+static void agp_add_seg_to_client(struct agp_client *client,
+			       struct agp_segment_priv ** seg, int num_segments)
+{
+	struct agp_segment_priv **prev_seg;
+
+	prev_seg = client->segments;
+
+	if (prev_seg != NULL)
+		agp_remove_seg_from_client(client);
+
+	DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
+	client->num_segments = num_segments;
+	client->segments = seg;
+}
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+	unsigned long prot_bits;
+
+	prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
+	return vm_get_page_prot(prot_bits);
+}
+
+int agp_create_segment(struct agp_client *client, struct agp_region *region)
+{
+	struct agp_segment_priv **ret_seg;
+	struct agp_segment_priv *seg;
+	struct agp_segment *user_seg;
+	size_t i;
+
+	seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+	if (seg == NULL) {
+		kfree(region->seg_list);
+		region->seg_list = NULL;
+		return -ENOMEM;
+	}
+	user_seg = region->seg_list;
+
+	for (i = 0; i < region->seg_count; i++) {
+		seg[i].pg_start = user_seg[i].pg_start;
+		seg[i].pg_count = user_seg[i].pg_count;
+		seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+	}
+	kfree(region->seg_list);
+	region->seg_list = NULL;
+
+	ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+	if (ret_seg == NULL) {
+		kfree(seg);
+		return -ENOMEM;
+	}
+	*ret_seg = seg;
+	agp_add_seg_to_client(client, ret_seg, region->seg_count);
+	return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(struct agp_memory * temp)
+{
+	struct agp_memory *prev;
+
+	prev = agp_fe.current_controller->pool;
+
+	if (prev != NULL) {
+		prev->prev = temp;
+		temp->next = prev;
+	}
+	agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+struct agp_file_private *agp_find_private(pid_t pid)
+{
+	struct agp_file_private *curr;
+
+	curr = agp_fe.file_priv_list;
+
+	while (curr != NULL) {
+		if (curr->my_pid == pid)
+			return curr;
+		curr = curr->next;
+	}
+
+	return NULL;
+}
+
+static void agp_insert_file_private(struct agp_file_private * priv)
+{
+	struct agp_file_private *prev;
+
+	prev = agp_fe.file_priv_list;
+
+	if (prev != NULL)
+		prev->prev = priv;
+	priv->next = prev;
+	agp_fe.file_priv_list = priv;
+}
+
+static void agp_remove_file_private(struct agp_file_private * priv)
+{
+	struct agp_file_private *next;
+	struct agp_file_private *prev;
+
+	next = priv->next;
+	prev = priv->prev;
+
+	if (prev != NULL) {
+		prev->next = next;
+
+		if (next != NULL)
+			next->prev = prev;
+
+	} else {
+		if (next != NULL)
+			next->prev = NULL;
+
+		agp_fe.file_priv_list = next;
+	}
+}
+
+/* End - File flag list routines */
+
+/*
+ * Wrappers for agp_free_memory & agp_allocate_memory
+ * These make sure that internal lists are kept updated.
+ */
+void agp_free_memory_wrap(struct agp_memory *memory)
+{
+	agp_remove_from_pool(memory);
+	agp_free_memory(memory);
+}
+
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+	struct agp_memory *memory;
+
+	memory = agp_allocate_memory(agp_bridge, pg_count, type);
+	if (memory == NULL)
+		return NULL;
+
+	agp_insert_into_pool(memory);
+	return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static struct agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+	struct agp_controller *controller;
+
+	controller = agp_fe.controllers;
+
+	while (controller != NULL) {
+		if (controller->pid == id)
+			return controller;
+		controller = controller->next;
+	}
+
+	return NULL;
+}
+
+static struct agp_controller *agp_create_controller(pid_t id)
+{
+	struct agp_controller *controller;
+
+	controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
+	if (controller == NULL)
+		return NULL;
+
+	controller->pid = id;
+	return controller;
+}
+
+static int agp_insert_controller(struct agp_controller *controller)
+{
+	struct agp_controller *prev_controller;
+
+	prev_controller = agp_fe.controllers;
+	controller->next = prev_controller;
+
+	if (prev_controller != NULL)
+		prev_controller->prev = controller;
+
+	agp_fe.controllers = controller;
+
+	return 0;
+}
+
+static void agp_remove_all_clients(struct agp_controller *controller)
+{
+	struct agp_client *client;
+	struct agp_client *temp;
+
+	client = controller->clients;
+
+	while (client) {
+		struct agp_file_private *priv;
+
+		temp = client;
+		agp_remove_seg_from_client(temp);
+		priv = agp_find_private(temp->pid);
+
+		if (priv != NULL) {
+			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+			clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		}
+		client = client->next;
+		kfree(temp);
+	}
+}
+
+static void agp_remove_all_memory(struct agp_controller *controller)
+{
+	struct agp_memory *memory;
+	struct agp_memory *temp;
+
+	memory = controller->pool;
+
+	while (memory) {
+		temp = memory;
+		memory = memory->next;
+		agp_free_memory_wrap(temp);
+	}
+}
+
+static int agp_remove_controller(struct agp_controller *controller)
+{
+	struct agp_controller *prev_controller;
+	struct agp_controller *next_controller;
+
+	prev_controller = controller->prev;
+	next_controller = controller->next;
+
+	if (prev_controller != NULL) {
+		prev_controller->next = next_controller;
+		if (next_controller != NULL)
+			next_controller->prev = prev_controller;
+
+	} else {
+		if (next_controller != NULL)
+			next_controller->prev = NULL;
+
+		agp_fe.controllers = next_controller;
+	}
+
+	agp_remove_all_memory(controller);
+	agp_remove_all_clients(controller);
+
+	if (agp_fe.current_controller == controller) {
+		agp_fe.current_controller = NULL;
+		agp_fe.backend_acquired = false;
+		agp_backend_release(agp_bridge);
+	}
+	kfree(controller);
+	return 0;
+}
+
+static void agp_controller_make_current(struct agp_controller *controller)
+{
+	struct agp_client *clients;
+
+	clients = controller->clients;
+
+	while (clients != NULL) {
+		struct agp_file_private *priv;
+
+		priv = agp_find_private(clients->pid);
+
+		if (priv != NULL) {
+			set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+			set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		}
+		clients = clients->next;
+	}
+
+	agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(struct agp_controller *controller,
+				      struct agp_file_private *controller_priv)
+{
+	struct agp_client *clients;
+
+	clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+	clients = controller->clients;
+
+	while (clients != NULL) {
+		struct agp_file_private *priv;
+
+		priv = agp_find_private(clients->pid);
+
+		if (priv != NULL)
+			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+
+		clients = clients->next;
+	}
+
+	agp_fe.current_controller = NULL;
+	agp_fe.used_by_controller = false;
+	agp_backend_release(agp_bridge);
+}
+
+/*
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static struct agp_client
+*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
+{
+	struct agp_client *client;
+
+	if (controller == NULL)
+		return NULL;
+
+	client = controller->clients;
+
+	while (client != NULL) {
+		if (client->pid == id)
+			return client;
+		client = client->next;
+	}
+
+	return NULL;
+}
+
+static struct agp_controller *agp_find_controller_for_client(pid_t id)
+{
+	struct agp_controller *controller;
+
+	controller = agp_fe.controllers;
+
+	while (controller != NULL) {
+		if ((agp_find_client_in_controller(controller, id)) != NULL)
+			return controller;
+		controller = controller->next;
+	}
+
+	return NULL;
+}
+
+struct agp_client *agp_find_client_by_pid(pid_t id)
+{
+	struct agp_client *temp;
+
+	if (agp_fe.current_controller == NULL)
+		return NULL;
+
+	temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+	return temp;
+}
+
+static void agp_insert_client(struct agp_client *client)
+{
+	struct agp_client *prev_client;
+
+	prev_client = agp_fe.current_controller->clients;
+	client->next = prev_client;
+
+	if (prev_client != NULL)
+		prev_client->prev = client;
+
+	agp_fe.current_controller->clients = client;
+	agp_fe.current_controller->num_clients++;
+}
+
+struct agp_client *agp_create_client(pid_t id)
+{
+	struct agp_client *new_client;
+
+	new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
+	if (new_client == NULL)
+		return NULL;
+
+	new_client->pid = id;
+	agp_insert_client(new_client);
+	return new_client;
+}
+
+int agp_remove_client(pid_t id)
+{
+	struct agp_client *client;
+	struct agp_client *prev_client;
+	struct agp_client *next_client;
+	struct agp_controller *controller;
+
+	controller = agp_find_controller_for_client(id);
+	if (controller == NULL)
+		return -EINVAL;
+
+	client = agp_find_client_in_controller(controller, id);
+	if (client == NULL)
+		return -EINVAL;
+
+	prev_client = client->prev;
+	next_client = client->next;
+
+	if (prev_client != NULL) {
+		prev_client->next = next_client;
+		if (next_client != NULL)
+			next_client->prev = prev_client;
+
+	} else {
+		if (next_client != NULL)
+			next_client->prev = NULL;
+		controller->clients = next_client;
+	}
+
+	controller->num_clients--;
+	agp_remove_seg_from_client(client);
+	kfree(client);
+	return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned int size, current_size;
+	unsigned long offset;
+	struct agp_client *client;
+	struct agp_file_private *priv = file->private_data;
+	struct agp_kern_info kerninfo;
+
+	mutex_lock(&(agp_fe.agp_mutex));
+
+	if (agp_fe.backend_acquired != true)
+		goto out_eperm;
+
+	if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
+		goto out_eperm;
+
+	agp_copy_info(agp_bridge, &kerninfo);
+	size = vma->vm_end - vma->vm_start;
+	current_size = kerninfo.aper_size;
+	current_size = current_size * 0x100000;
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+	DBG("%lx:%lx", offset, offset+size);
+
+	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+		if ((size + offset) > current_size)
+			goto out_inval;
+
+		client = agp_find_client_by_pid(current->pid);
+
+		if (client == NULL)
+			goto out_eperm;
+
+		if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
+			goto out_inval;
+
+		DBG("client vm_ops=%p", kerninfo.vm_ops);
+		if (kerninfo.vm_ops) {
+			vma->vm_ops = kerninfo.vm_ops;
+		} else if (io_remap_pfn_range(vma, vma->vm_start,
+				(kerninfo.aper_base + offset) >> PAGE_SHIFT,
+				size,
+				pgprot_writecombine(vma->vm_page_prot))) {
+			goto out_again;
+		}
+		mutex_unlock(&(agp_fe.agp_mutex));
+		return 0;
+	}
+
+	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+		if (size != current_size)
+			goto out_inval;
+
+		DBG("controller vm_ops=%p", kerninfo.vm_ops);
+		if (kerninfo.vm_ops) {
+			vma->vm_ops = kerninfo.vm_ops;
+		} else if (io_remap_pfn_range(vma, vma->vm_start,
+				kerninfo.aper_base >> PAGE_SHIFT,
+				size,
+				pgprot_writecombine(vma->vm_page_prot))) {
+			goto out_again;
+		}
+		mutex_unlock(&(agp_fe.agp_mutex));
+		return 0;
+	}
+
+out_eperm:
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return -EPERM;
+
+out_inval:
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return -EINVAL;
+
+out_again:
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return -EAGAIN;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+	struct agp_file_private *priv = file->private_data;
+
+	mutex_lock(&(agp_fe.agp_mutex));
+
+	DBG("priv=%p", priv);
+
+	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+		struct agp_controller *controller;
+
+		controller = agp_find_controller_by_pid(priv->my_pid);
+
+		if (controller != NULL) {
+			if (controller == agp_fe.current_controller)
+				agp_controller_release_current(controller, priv);
+			agp_remove_controller(controller);
+			controller = NULL;
+		}
+	}
+
+	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
+		agp_remove_client(priv->my_pid);
+
+	agp_remove_file_private(priv);
+	kfree(priv);
+	file->private_data = NULL;
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+	int minor = iminor(inode);
+	struct agp_file_private *priv;
+	struct agp_client *client;
+
+	if (minor != AGPGART_MINOR)
+		return -ENXIO;
+
+	mutex_lock(&(agp_fe.agp_mutex));
+
+	priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+	if (priv == NULL) {
+		mutex_unlock(&(agp_fe.agp_mutex));
+		return -ENOMEM;
+	}
+
+	set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+	priv->my_pid = current->pid;
+
+	if (capable(CAP_SYS_RAWIO))
+		/* Root priv, can be controller */
+		set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+
+	client = agp_find_client_by_pid(current->pid);
+
+	if (client != NULL) {
+		set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+	}
+	file->private_data = (void *) priv;
+	agp_insert_file_private(priv);
+	DBG("private=%p, client=%p", priv, client);
+
+	mutex_unlock(&(agp_fe.agp_mutex));
+
+	return 0;
+}
+
+static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_info userinfo;
+	struct agp_kern_info kerninfo;
+
+	agp_copy_info(agp_bridge, &kerninfo);
+
+	memset(&userinfo, 0, sizeof(userinfo));
+	userinfo.version.major = kerninfo.version.major;
+	userinfo.version.minor = kerninfo.version.minor;
+	userinfo.bridge_id = kerninfo.device->vendor |
+	    (kerninfo.device->device << 16);
+	userinfo.agp_mode = kerninfo.mode;
+	userinfo.aper_base = kerninfo.aper_base;
+	userinfo.aper_size = kerninfo.aper_size;
+	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+	userinfo.pg_used = kerninfo.current_memory;
+
+	if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+int agpioc_acquire_wrap(struct agp_file_private *priv)
+{
+	struct agp_controller *controller;
+
+	DBG("");
+
+	if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
+		return -EPERM;
+
+	if (agp_fe.current_controller != NULL)
+		return -EBUSY;
+
+	if (!agp_bridge)
+		return -ENODEV;
+
+        if (atomic_read(&agp_bridge->agp_in_use))
+                return -EBUSY;
+
+	atomic_inc(&agp_bridge->agp_in_use);
+
+	agp_fe.backend_acquired = true;
+
+	controller = agp_find_controller_by_pid(priv->my_pid);
+
+	if (controller != NULL) {
+		agp_controller_make_current(controller);
+	} else {
+		controller = agp_create_controller(priv->my_pid);
+
+		if (controller == NULL) {
+			agp_fe.backend_acquired = false;
+			agp_backend_release(agp_bridge);
+			return -ENOMEM;
+		}
+		agp_insert_controller(controller);
+		agp_controller_make_current(controller);
+	}
+
+	set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+	set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+	return 0;
+}
+
+int agpioc_release_wrap(struct agp_file_private *priv)
+{
+	DBG("");
+	agp_controller_release_current(agp_fe.current_controller, priv);
+	return 0;
+}
+
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_setup mode;
+
+	DBG("");
+	if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
+		return -EFAULT;
+
+	agp_enable(agp_bridge, mode.agp_mode);
+	return 0;
+}
+
+static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_region reserve;
+	struct agp_client *client;
+	struct agp_file_private *client_priv;
+
+	DBG("");
+	if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+		return -EFAULT;
+
+	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+		return -EFAULT;
+
+	client = agp_find_client_by_pid(reserve.pid);
+
+	if (reserve.seg_count == 0) {
+		/* remove a client */
+		client_priv = agp_find_private(reserve.pid);
+
+		if (client_priv != NULL) {
+			set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+			set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+		}
+		if (client == NULL) {
+			/* client is already removed */
+			return 0;
+		}
+		return agp_remove_client(reserve.pid);
+	} else {
+		struct agp_segment *segment;
+
+		if (reserve.seg_count >= 16384)
+			return -EINVAL;
+
+		segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
+				  GFP_KERNEL);
+
+		if (segment == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(segment, (void __user *) reserve.seg_list,
+				   sizeof(struct agp_segment) * reserve.seg_count)) {
+			kfree(segment);
+			return -EFAULT;
+		}
+		reserve.seg_list = segment;
+
+		if (client == NULL) {
+			/* Create the client and add the segment */
+			client = agp_create_client(reserve.pid);
+
+			if (client == NULL) {
+				kfree(segment);
+				return -ENOMEM;
+			}
+			client_priv = agp_find_private(reserve.pid);
+
+			if (client_priv != NULL) {
+				set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+				set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+			}
+		}
+		return agp_create_segment(client, &reserve);
+	}
+	/* Will never really happen */
+	return -EINVAL;
+}
+
+int agpioc_protect_wrap(struct agp_file_private *priv)
+{
+	DBG("");
+	/* This function is not currently implemented */
+	return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_memory *memory;
+	struct agp_allocate alloc;
+
+	DBG("");
+	if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
+		return -EFAULT;
+
+	if (alloc.type >= AGP_USER_TYPES)
+		return -EINVAL;
+
+	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+	if (memory == NULL)
+		return -ENOMEM;
+
+	alloc.key = memory->key;
+	alloc.physical = memory->physical;
+
+	if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
+		agp_free_memory_wrap(memory);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
+{
+	struct agp_memory *memory;
+
+	DBG("");
+	memory = agp_find_mem_by_key(arg);
+
+	if (memory == NULL)
+		return -EINVAL;
+
+	agp_free_memory_wrap(memory);
+	return 0;
+}
+
+static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_bind bind_info;
+	struct agp_memory *memory;
+
+	DBG("");
+	if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
+		return -EFAULT;
+
+	memory = agp_find_mem_by_key(bind_info.key);
+
+	if (memory == NULL)
+		return -EINVAL;
+
+	return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+	struct agp_memory *memory;
+	struct agp_unbind unbind;
+
+	DBG("");
+	if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
+		return -EFAULT;
+
+	memory = agp_find_mem_by_key(unbind.key);
+
+	if (memory == NULL)
+		return -EINVAL;
+
+	return agp_unbind_memory(memory);
+}
+
+static long agp_ioctl(struct file *file,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct agp_file_private *curr_priv = file->private_data;
+	int ret_val = -ENOTTY;
+
+	DBG("priv=%p, cmd=%x", curr_priv, cmd);
+	mutex_lock(&(agp_fe.agp_mutex));
+
+	if ((agp_fe.current_controller == NULL) &&
+	    (cmd != AGPIOC_ACQUIRE)) {
+		ret_val = -EINVAL;
+		goto ioctl_out;
+	}
+	if ((agp_fe.backend_acquired != true) &&
+	    (cmd != AGPIOC_ACQUIRE)) {
+		ret_val = -EBUSY;
+		goto ioctl_out;
+	}
+	if (cmd != AGPIOC_ACQUIRE) {
+		if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+			ret_val = -EPERM;
+			goto ioctl_out;
+		}
+		/* Use the original pid of the controller,
+		 * in case it's threaded */
+
+		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+			ret_val = -EBUSY;
+			goto ioctl_out;
+		}
+	}
+
+	switch (cmd) {
+	case AGPIOC_INFO:
+		ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_ACQUIRE:
+		ret_val = agpioc_acquire_wrap(curr_priv);
+		break;
+
+	case AGPIOC_RELEASE:
+		ret_val = agpioc_release_wrap(curr_priv);
+		break;
+
+	case AGPIOC_SETUP:
+		ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_RESERVE:
+		ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_PROTECT:
+		ret_val = agpioc_protect_wrap(curr_priv);
+		break;
+
+	case AGPIOC_ALLOCATE:
+		ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_DEALLOCATE:
+		ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+		break;
+
+	case AGPIOC_BIND:
+		ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
+		break;
+
+	case AGPIOC_UNBIND:
+		ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+		break;
+	       
+	case AGPIOC_CHIPSET_FLUSH:
+		break;
+	}
+
+ioctl_out:
+	DBG("ioctl returns %d\n", ret_val);
+	mutex_unlock(&(agp_fe.agp_mutex));
+	return ret_val;
+}
+
+static const struct file_operations agp_fops =
+{
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.unlocked_ioctl	= agp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= compat_agp_ioctl,
+#endif
+	.mmap		= agp_mmap,
+	.open		= agp_open,
+	.release	= agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+	.minor	= AGPGART_MINOR,
+	.name	= "agpgart",
+	.fops	= &agp_fops
+};
+
+int agp_frontend_initialize(void)
+{
+	memset(&agp_fe, 0, sizeof(struct agp_front_data));
+	mutex_init(&(agp_fe.agp_mutex));
+
+	if (misc_register(&agp_miscdev)) {
+		printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+		return -EIO;
+	}
+	return 0;
+}
+
+void agp_frontend_cleanup(void)
+{
+	misc_deregister(&agp_miscdev);
+}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
new file mode 100644
index 0000000..658664a
--- /dev/null
+++ b/drivers/char/agp/generic.c
@@ -0,0 +1,1420 @@
+/*
+ * AGPGART driver.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2005 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+#include <asm/pgtable.h>
+#include "agp.h"
+
+__u32 *agp_gatt_table;
+int agp_memory_reserved;
+
+/*
+ * Needed by the Nforce GART driver for the time being. Would be
+ * nice to do this some other way instead of needing this export.
+ */
+EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+ */
+
+void agp_free_key(int key)
+{
+	if (key < 0)
+		return;
+
+	if (key < MAXKEY)
+		clear_bit(key, agp_bridge->key_list);
+}
+EXPORT_SYMBOL(agp_free_key);
+
+
+static int agp_get_key(void)
+{
+	int bit;
+
+	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
+	if (bit < MAXKEY) {
+		set_bit(bit, agp_bridge->key_list);
+		return bit;
+	}
+	return -1;
+}
+
+/*
+ * Use kmalloc if possible for the page list. Otherwise fall back to
+ * vmalloc. This speeds things up and also saves memory for small AGP
+ * regions.
+ */
+
+void agp_alloc_page_array(size_t size, struct agp_memory *mem)
+{
+	mem->pages = kvmalloc(size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(agp_alloc_page_array);
+
+static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
+{
+	struct agp_memory *new;
+	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+
+	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+		return NULL;
+
+	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+	if (new == NULL)
+		return NULL;
+
+	new->key = agp_get_key();
+
+	if (new->key < 0) {
+		kfree(new);
+		return NULL;
+	}
+
+	agp_alloc_page_array(alloc_size, new);
+
+	if (new->pages == NULL) {
+		agp_free_key(new->key);
+		kfree(new);
+		return NULL;
+	}
+	new->num_scratch_pages = 0;
+	return new;
+}
+
+struct agp_memory *agp_create_memory(int scratch_pages)
+{
+	struct agp_memory *new;
+
+	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+	if (new == NULL)
+		return NULL;
+
+	new->key = agp_get_key();
+
+	if (new->key < 0) {
+		kfree(new);
+		return NULL;
+	}
+
+	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
+
+	if (new->pages == NULL) {
+		agp_free_key(new->key);
+		kfree(new);
+		return NULL;
+	}
+	new->num_scratch_pages = scratch_pages;
+	new->type = AGP_NORMAL_MEMORY;
+	return new;
+}
+EXPORT_SYMBOL(agp_create_memory);
+
+/**
+ *	agp_free_memory - free memory associated with an agp_memory pointer.
+ *
+ *	@curr:		agp_memory pointer to be freed.
+ *
+ *	It is the only function that can be called when the backend is not owned
+ *	by the caller.  (So it can free memory on client death.)
+ */
+void agp_free_memory(struct agp_memory *curr)
+{
+	size_t i;
+
+	if (curr == NULL)
+		return;
+
+	if (curr->is_bound)
+		agp_unbind_memory(curr);
+
+	if (curr->type >= AGP_USER_TYPES) {
+		agp_generic_free_by_type(curr);
+		return;
+	}
+
+	if (curr->type != 0) {
+		curr->bridge->driver->free_by_type(curr);
+		return;
+	}
+	if (curr->page_count != 0) {
+		if (curr->bridge->driver->agp_destroy_pages) {
+			curr->bridge->driver->agp_destroy_pages(curr);
+		} else {
+
+			for (i = 0; i < curr->page_count; i++) {
+				curr->bridge->driver->agp_destroy_page(
+					curr->pages[i],
+					AGP_PAGE_DESTROY_UNMAP);
+			}
+			for (i = 0; i < curr->page_count; i++) {
+				curr->bridge->driver->agp_destroy_page(
+					curr->pages[i],
+					AGP_PAGE_DESTROY_FREE);
+			}
+		}
+	}
+	agp_free_key(curr->key);
+	agp_free_page_array(curr);
+	kfree(curr);
+}
+EXPORT_SYMBOL(agp_free_memory);
+
+#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
+
+/**
+ *	agp_allocate_memory  -  allocate a group of pages of a certain type.
+ *
+ *	@page_count:	size_t argument of the number of pages
+ *	@type:	u32 argument of the type of memory to be allocated.
+ *
+ *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
+ *	maps to physical ram.  Any other type is device dependent.
+ *
+ *	It returns NULL whenever memory is unavailable.
+ */
+struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+					size_t page_count, u32 type)
+{
+	int scratch_pages;
+	struct agp_memory *new;
+	size_t i;
+	int cur_memory;
+
+	if (!bridge)
+		return NULL;
+
+	cur_memory = atomic_read(&bridge->current_memory_agp);
+	if ((cur_memory + page_count > bridge->max_memory_agp) ||
+	    (cur_memory + page_count < page_count))
+		return NULL;
+
+	if (type >= AGP_USER_TYPES) {
+		new = agp_generic_alloc_user(page_count, type);
+		if (new)
+			new->bridge = bridge;
+		return new;
+	}
+
+	if (type != 0) {
+		new = bridge->driver->alloc_by_type(page_count, type);
+		if (new)
+			new->bridge = bridge;
+		return new;
+	}
+
+	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+	new = agp_create_memory(scratch_pages);
+
+	if (new == NULL)
+		return NULL;
+
+	if (bridge->driver->agp_alloc_pages) {
+		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
+			agp_free_memory(new);
+			return NULL;
+		}
+		new->bridge = bridge;
+		return new;
+	}
+
+	for (i = 0; i < page_count; i++) {
+		struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+		if (page == NULL) {
+			agp_free_memory(new);
+			return NULL;
+		}
+		new->pages[i] = page;
+		new->page_count++;
+	}
+	new->bridge = bridge;
+
+	return new;
+}
+EXPORT_SYMBOL(agp_allocate_memory);
+
+
+/* End - Generic routines for handling agp_memory structures */
+
+
+static int agp_return_size(void)
+{
+	int current_size;
+	void *temp;
+
+	temp = agp_bridge->current_size;
+
+	switch (agp_bridge->driver->size_type) {
+	case U8_APER_SIZE:
+		current_size = A_SIZE_8(temp)->size;
+		break;
+	case U16_APER_SIZE:
+		current_size = A_SIZE_16(temp)->size;
+		break;
+	case U32_APER_SIZE:
+		current_size = A_SIZE_32(temp)->size;
+		break;
+	case LVL2_APER_SIZE:
+		current_size = A_SIZE_LVL2(temp)->size;
+		break;
+	case FIXED_APER_SIZE:
+		current_size = A_SIZE_FIX(temp)->size;
+		break;
+	default:
+		current_size = 0;
+		break;
+	}
+
+	current_size -= (agp_memory_reserved / (1024*1024));
+	if (current_size <0)
+		current_size = 0;
+	return current_size;
+}
+
+
+int agp_num_entries(void)
+{
+	int num_entries;
+	void *temp;
+
+	temp = agp_bridge->current_size;
+
+	switch (agp_bridge->driver->size_type) {
+	case U8_APER_SIZE:
+		num_entries = A_SIZE_8(temp)->num_entries;
+		break;
+	case U16_APER_SIZE:
+		num_entries = A_SIZE_16(temp)->num_entries;
+		break;
+	case U32_APER_SIZE:
+		num_entries = A_SIZE_32(temp)->num_entries;
+		break;
+	case LVL2_APER_SIZE:
+		num_entries = A_SIZE_LVL2(temp)->num_entries;
+		break;
+	case FIXED_APER_SIZE:
+		num_entries = A_SIZE_FIX(temp)->num_entries;
+		break;
+	default:
+		num_entries = 0;
+		break;
+	}
+
+	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
+	if (num_entries<0)
+		num_entries = 0;
+	return num_entries;
+}
+EXPORT_SYMBOL_GPL(agp_num_entries);
+
+
+/**
+ *	agp_copy_info  -  copy bridge state information
+ *
+ *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
+ *
+ *	This function copies information about the agp bridge device and the state of
+ *	the agp backend into an agp_kern_info pointer.
+ */
+int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
+{
+	memset(info, 0, sizeof(struct agp_kern_info));
+	if (!bridge) {
+		info->chipset = NOT_SUPPORTED;
+		return -EIO;
+	}
+
+	info->version.major = bridge->version->major;
+	info->version.minor = bridge->version->minor;
+	info->chipset = SUPPORTED;
+	info->device = bridge->dev;
+	if (bridge->mode & AGPSTAT_MODE_3_0)
+		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
+	else
+		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
+	info->aper_base = bridge->gart_bus_addr;
+	info->aper_size = agp_return_size();
+	info->max_memory = bridge->max_memory_agp;
+	info->current_memory = atomic_read(&bridge->current_memory_agp);
+	info->cant_use_aperture = bridge->driver->cant_use_aperture;
+	info->vm_ops = bridge->vm_ops;
+	info->page_mask = ~0UL;
+	return 0;
+}
+EXPORT_SYMBOL(agp_copy_info);
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+/**
+ *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
+ *
+ *	@curr:		agp_memory pointer
+ *	@pg_start:	an offset into the graphics aperture translation table
+ *
+ *	It returns -EINVAL if the pointer == NULL.
+ *	It returns -EBUSY if the area of the table requested is already in use.
+ */
+int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
+{
+	int ret_val;
+
+	if (curr == NULL)
+		return -EINVAL;
+
+	if (curr->is_bound) {
+		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
+		return -EINVAL;
+	}
+	if (!curr->is_flushed) {
+		curr->bridge->driver->cache_flush();
+		curr->is_flushed = true;
+	}
+
+	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
+
+	if (ret_val != 0)
+		return ret_val;
+
+	curr->is_bound = true;
+	curr->pg_start = pg_start;
+	spin_lock(&agp_bridge->mapped_lock);
+	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
+	spin_unlock(&agp_bridge->mapped_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(agp_bind_memory);
+
+
+/**
+ *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
+ *
+ * @curr:	agp_memory pointer to be removed from the GATT.
+ *
+ * It returns -EINVAL if this piece of agp_memory is not currently bound to
+ * the graphics aperture translation table or if the agp_memory pointer == NULL
+ */
+int agp_unbind_memory(struct agp_memory *curr)
+{
+	int ret_val;
+
+	if (curr == NULL)
+		return -EINVAL;
+
+	if (!curr->is_bound) {
+		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
+		return -EINVAL;
+	}
+
+	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
+
+	if (ret_val != 0)
+		return ret_val;
+
+	curr->is_bound = false;
+	curr->pg_start = 0;
+	spin_lock(&curr->bridge->mapped_lock);
+	list_del(&curr->mapped_list);
+	spin_unlock(&curr->bridge->mapped_lock);
+	return 0;
+}
+EXPORT_SYMBOL(agp_unbind_memory);
+
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+
+/* Generic Agp routines - Start */
+static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+	u32 tmp;
+
+	if (*requested_mode & AGP2_RESERVED_MASK) {
+		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
+		*requested_mode &= ~AGP2_RESERVED_MASK;
+	}
+
+	/*
+	 * Some dumb bridges are programmed to disobey the AGP2 spec.
+	 * This is likely a BIOS misprogramming rather than poweron default, or
+	 * it would be a lot more common.
+	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
+	 * AGPv2 spec 6.1.9 states:
+	 *   The RATE field indicates the data transfer rates supported by this
+	 *   device. A.G.P. devices must report all that apply.
+	 * Fix them up as best we can.
+	 */
+	switch (*bridge_agpstat & 7) {
+	case 4:
+		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
+		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
+			"Fixing up support for x2 & x1\n");
+		break;
+	case 2:
+		*bridge_agpstat |= AGPSTAT2_1X;
+		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
+			"Fixing up support for x1\n");
+		break;
+	default:
+		break;
+	}
+
+	/* Check the speed bits make sense. Only one should be set. */
+	tmp = *requested_mode & 7;
+	switch (tmp) {
+		case 0:
+			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
+			*requested_mode |= AGPSTAT2_1X;
+			break;
+		case 1:
+		case 2:
+			break;
+		case 3:
+			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
+			break;
+		case 4:
+			break;
+		case 5:
+		case 6:
+		case 7:
+			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
+			break;
+	}
+
+	/* disable SBA if it's not supported */
+	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
+		*bridge_agpstat &= ~AGPSTAT_SBA;
+
+	/* Set rate */
+	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
+		*bridge_agpstat &= ~AGPSTAT2_4X;
+
+	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
+		*bridge_agpstat &= ~AGPSTAT2_2X;
+
+	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
+		*bridge_agpstat &= ~AGPSTAT2_1X;
+
+	/* Now we know what mode it should be, clear out the unwanted bits. */
+	if (*bridge_agpstat & AGPSTAT2_4X)
+		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
+
+	if (*bridge_agpstat & AGPSTAT2_2X)
+		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
+
+	if (*bridge_agpstat & AGPSTAT2_1X)
+		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
+
+	/* Apply any errata. */
+	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+		*bridge_agpstat &= ~AGPSTAT_FW;
+
+	if (agp_bridge->flags & AGP_ERRATA_SBA)
+		*bridge_agpstat &= ~AGPSTAT_SBA;
+
+	if (agp_bridge->flags & AGP_ERRATA_1X) {
+		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+		*bridge_agpstat |= AGPSTAT2_1X;
+	}
+
+	/* If we've dropped down to 1X, disable fast writes. */
+	if (*bridge_agpstat & AGPSTAT2_1X)
+		*bridge_agpstat &= ~AGPSTAT_FW;
+}
+
+/*
+ * requested_mode = Mode requested by (typically) X.
+ * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
+ * vga_agpstat = PCI_AGP_STATUS from graphic card.
+ */
+static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
+	u32 tmp;
+
+	if (*requested_mode & AGP3_RESERVED_MASK) {
+		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
+		*requested_mode &= ~AGP3_RESERVED_MASK;
+	}
+
+	/* Check the speed bits make sense. */
+	tmp = *requested_mode & 7;
+	if (tmp == 0) {
+		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
+		*requested_mode |= AGPSTAT3_4X;
+	}
+	if (tmp >= 3) {
+		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
+		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
+	}
+
+	/* ARQSZ - Set the value to the maximum one.
+	 * Don't allow the mode register to override values. */
+	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
+		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
+
+	/* Calibration cycle.
+	 * Don't allow the mode register to override values. */
+	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
+		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
+
+	/* SBA *must* be supported for AGP v3 */
+	*bridge_agpstat |= AGPSTAT_SBA;
+
+	/*
+	 * Set speed.
+	 * Check for invalid speeds. This can happen when applications
+	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+	 */
+	if (*requested_mode & AGPSTAT_MODE_3_0) {
+		/*
+		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
+		 * have been passed a 3.0 mode, but with 2.x speed bits set.
+		 * AGP2.x 4x -> AGP3.0 4x.
+		 */
+		if (*requested_mode & AGPSTAT2_4X) {
+			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+						current->comm, *requested_mode);
+			*requested_mode &= ~AGPSTAT2_4X;
+			*requested_mode |= AGPSTAT3_4X;
+		}
+	} else {
+		/*
+		 * The caller doesn't know what they are doing. We are in 3.0 mode,
+		 * but have been passed an AGP 2.x mode.
+		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+		 */
+		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+					current->comm, *requested_mode);
+		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+		*requested_mode |= AGPSTAT3_4X;
+	}
+
+	if (*requested_mode & AGPSTAT3_8X) {
+		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+			*bridge_agpstat |= AGPSTAT3_4X;
+			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
+			return;
+		}
+		if (!(*vga_agpstat & AGPSTAT3_8X)) {
+			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+			*bridge_agpstat |= AGPSTAT3_4X;
+			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
+			return;
+		}
+		/* All set, bridge & device can do AGP x8*/
+		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+		goto done;
+
+	} else if (*requested_mode & AGPSTAT3_4X) {
+		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+		*bridge_agpstat |= AGPSTAT3_4X;
+		goto done;
+
+	} else {
+
+		/*
+		 * If we didn't specify an AGP mode, we see if both
+		 * the graphics card, and the bridge can do x8, and use if so.
+		 * If not, we fall back to x4 mode.
+		 */
+		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
+			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
+				"supported by bridge & card (x8).\n");
+			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+		} else {
+			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
+			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
+					*bridge_agpstat, origbridge);
+				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+				*bridge_agpstat |= AGPSTAT3_4X;
+			}
+			if (!(*vga_agpstat & AGPSTAT3_8X)) {
+				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
+					*vga_agpstat, origvga);
+				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+				*vga_agpstat |= AGPSTAT3_4X;
+			}
+		}
+	}
+
+done:
+	/* Apply any errata. */
+	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+		*bridge_agpstat &= ~AGPSTAT_FW;
+
+	if (agp_bridge->flags & AGP_ERRATA_SBA)
+		*bridge_agpstat &= ~AGPSTAT_SBA;
+
+	if (agp_bridge->flags & AGP_ERRATA_1X) {
+		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+		*bridge_agpstat |= AGPSTAT2_1X;
+	}
+}
+
+
+/**
+ * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @requested_mode: requested agp_stat from userspace (Typically from X)
+ * @bridge_agpstat: current agp_stat from AGP bridge.
+ *
+ * This function will hunt for an AGP graphics card, and try to match
+ * the requested mode to the capabilities of both the bridge and the card.
+ */
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
+{
+	struct pci_dev *device = NULL;
+	u32 vga_agpstat;
+	u8 cap_ptr;
+
+	for (;;) {
+		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
+		if (!device) {
+			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+			return 0;
+		}
+		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (cap_ptr)
+			break;
+	}
+
+	/*
+	 * Ok, here we have a AGP device. Disable impossible
+	 * settings, and adjust the readqueue to the minimum.
+	 */
+	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
+
+	/* adjust RQ depth */
+	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
+	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
+		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
+
+	/* disable FW if it's not supported */
+	if (!((bridge_agpstat & AGPSTAT_FW) &&
+		 (vga_agpstat & AGPSTAT_FW) &&
+		 (requested_mode & AGPSTAT_FW)))
+		bridge_agpstat &= ~AGPSTAT_FW;
+
+	/* Check to see if we are operating in 3.0 mode */
+	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
+		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+	else
+		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+
+	pci_dev_put(device);
+	return bridge_agpstat;
+}
+EXPORT_SYMBOL(agp_collect_device_status);
+
+
+void agp_device_command(u32 bridge_agpstat, bool agp_v3)
+{
+	struct pci_dev *device = NULL;
+	int mode;
+
+	mode = bridge_agpstat & 0x7;
+	if (agp_v3)
+		mode *= 4;
+
+	for_each_pci_dev(device) {
+		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (!agp)
+			continue;
+
+		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
+			 agp_v3 ? 3 : 2, mode);
+		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
+	}
+}
+EXPORT_SYMBOL(agp_device_command);
+
+
+void get_agp_version(struct agp_bridge_data *bridge)
+{
+	u32 ncapid;
+
+	/* Exit early if already set by errata workarounds. */
+	if (bridge->major_version != 0)
+		return;
+
+	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
+	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
+}
+EXPORT_SYMBOL(get_agp_version);
+
+
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
+{
+	u32 bridge_agpstat, temp;
+
+	get_agp_version(agp_bridge);
+
+	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+		 agp_bridge->major_version, agp_bridge->minor_version);
+
+	pci_read_config_dword(agp_bridge->dev,
+		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
+
+	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
+	if (bridge_agpstat == 0)
+		/* Something bad happened. FIXME: Return error code? */
+		return;
+
+	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
+
+	/* Do AGP version specific frobbing. */
+	if (bridge->major_version >= 3) {
+		if (bridge->mode & AGPSTAT_MODE_3_0) {
+			/* If we have 3.5, we can do the isoch stuff. */
+			if (bridge->minor_version >= 5)
+				agp_3_5_enable(bridge);
+			agp_device_command(bridge_agpstat, true);
+			return;
+		} else {
+		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
+		    bridge_agpstat &= ~(7<<10) ;
+		    pci_read_config_dword(bridge->dev,
+					bridge->capndx+AGPCTRL, &temp);
+		    temp |= (1<<9);
+		    pci_write_config_dword(bridge->dev,
+					bridge->capndx+AGPCTRL, temp);
+
+		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
+		}
+	}
+
+	/* AGP v<3 */
+	agp_device_command(bridge_agpstat, false);
+}
+EXPORT_SYMBOL(agp_generic_enable);
+
+
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	char *table;
+	char *table_end;
+	int size;
+	int page_order;
+	int num_entries;
+	int i;
+	void *temp;
+	struct page *page;
+
+	/* The generic routines can't handle 2 level gatt's */
+	if (bridge->driver->size_type == LVL2_APER_SIZE)
+		return -EINVAL;
+
+	table = NULL;
+	i = bridge->aperture_size_idx;
+	temp = bridge->current_size;
+	size = page_order = num_entries = 0;
+
+	if (bridge->driver->size_type != FIXED_APER_SIZE) {
+		do {
+			switch (bridge->driver->size_type) {
+			case U8_APER_SIZE:
+				size = A_SIZE_8(temp)->size;
+				page_order =
+				    A_SIZE_8(temp)->page_order;
+				num_entries =
+				    A_SIZE_8(temp)->num_entries;
+				break;
+			case U16_APER_SIZE:
+				size = A_SIZE_16(temp)->size;
+				page_order = A_SIZE_16(temp)->page_order;
+				num_entries = A_SIZE_16(temp)->num_entries;
+				break;
+			case U32_APER_SIZE:
+				size = A_SIZE_32(temp)->size;
+				page_order = A_SIZE_32(temp)->page_order;
+				num_entries = A_SIZE_32(temp)->num_entries;
+				break;
+				/* This case will never really happen. */
+			case FIXED_APER_SIZE:
+			case LVL2_APER_SIZE:
+			default:
+				size = page_order = num_entries = 0;
+				break;
+			}
+
+			table = alloc_gatt_pages(page_order);
+
+			if (table == NULL) {
+				i++;
+				switch (bridge->driver->size_type) {
+				case U8_APER_SIZE:
+					bridge->current_size = A_IDX8(bridge);
+					break;
+				case U16_APER_SIZE:
+					bridge->current_size = A_IDX16(bridge);
+					break;
+				case U32_APER_SIZE:
+					bridge->current_size = A_IDX32(bridge);
+					break;
+				/* These cases will never really happen. */
+				case FIXED_APER_SIZE:
+				case LVL2_APER_SIZE:
+				default:
+					break;
+				}
+				temp = bridge->current_size;
+			} else {
+				bridge->aperture_size_idx = i;
+			}
+		} while (!table && (i < bridge->driver->num_aperture_sizes));
+	} else {
+		size = ((struct aper_size_info_fixed *) temp)->size;
+		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+		table = alloc_gatt_pages(page_order);
+	}
+
+	if (table == NULL)
+		return -ENOMEM;
+
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+		SetPageReserved(page);
+
+	bridge->gatt_table_real = (u32 *) table;
+	agp_gatt_table = (void *)table;
+
+	bridge->driver->cache_flush();
+#ifdef CONFIG_X86
+	if (set_memory_uc((unsigned long)table, 1 << page_order))
+		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
+
+	bridge->gatt_table = (u32 __iomem *)table;
+#else
+	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+					(PAGE_SIZE * (1 << page_order)));
+	bridge->driver->cache_flush();
+#endif
+
+	if (bridge->gatt_table == NULL) {
+		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+			ClearPageReserved(page);
+
+		free_gatt_pages(table, page_order);
+
+		return -ENOMEM;
+	}
+	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
+
+	/* AK: bogus, should encode addresses > 4GB */
+	for (i = 0; i < num_entries; i++) {
+		writel(bridge->scratch_page, bridge->gatt_table+i);
+		readl(bridge->gatt_table+i);	/* PCI Posting. */
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(agp_generic_create_gatt_table);
+
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	int page_order;
+	char *table, *table_end;
+	void *temp;
+	struct page *page;
+
+	temp = bridge->current_size;
+
+	switch (bridge->driver->size_type) {
+	case U8_APER_SIZE:
+		page_order = A_SIZE_8(temp)->page_order;
+		break;
+	case U16_APER_SIZE:
+		page_order = A_SIZE_16(temp)->page_order;
+		break;
+	case U32_APER_SIZE:
+		page_order = A_SIZE_32(temp)->page_order;
+		break;
+	case FIXED_APER_SIZE:
+		page_order = A_SIZE_FIX(temp)->page_order;
+		break;
+	case LVL2_APER_SIZE:
+		/* The generic routines can't deal with 2 level gatt's */
+		return -EINVAL;
+	default:
+		page_order = 0;
+		break;
+	}
+
+	/* Do not worry about freeing memory, because if this is
+	 * called, then all agp memory is deallocated and removed
+	 * from the table. */
+
+#ifdef CONFIG_X86
+	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
+#else
+	iounmap(bridge->gatt_table);
+#endif
+	table = (char *) bridge->gatt_table_real;
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+		ClearPageReserved(page);
+
+	free_gatt_pages(bridge->gatt_table_real, page_order);
+
+	agp_gatt_table = NULL;
+	bridge->gatt_table = NULL;
+	bridge->gatt_table_real = NULL;
+	bridge->gatt_bus_addr = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(agp_generic_free_gatt_table);
+
+
+int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+	int num_entries;
+	size_t i;
+	off_t j;
+	void *temp;
+	struct agp_bridge_data *bridge;
+	int mask_type;
+
+	bridge = mem->bridge;
+	if (!bridge)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	temp = bridge->current_size;
+
+	switch (bridge->driver->size_type) {
+	case U8_APER_SIZE:
+		num_entries = A_SIZE_8(temp)->num_entries;
+		break;
+	case U16_APER_SIZE:
+		num_entries = A_SIZE_16(temp)->num_entries;
+		break;
+	case U32_APER_SIZE:
+		num_entries = A_SIZE_32(temp)->num_entries;
+		break;
+	case FIXED_APER_SIZE:
+		num_entries = A_SIZE_FIX(temp)->num_entries;
+		break;
+	case LVL2_APER_SIZE:
+		/* The generic routines can't deal with 2 level gatt's */
+		return -EINVAL;
+	default:
+		num_entries = 0;
+		break;
+	}
+
+	num_entries -= agp_memory_reserved/PAGE_SIZE;
+	if (num_entries < 0) num_entries = 0;
+
+	if (type != mem->type)
+		return -EINVAL;
+
+	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+	if (mask_type != 0) {
+		/* The generic routines know nothing of memory types */
+		return -EINVAL;
+	}
+
+	if (((pg_start + mem->page_count) > num_entries) ||
+	    ((pg_start + mem->page_count) < pg_start))
+		return -EINVAL;
+
+	j = pg_start;
+
+	while (j < (pg_start + mem->page_count)) {
+		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		bridge->driver->cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		writel(bridge->driver->mask_memory(bridge,
+						   page_to_phys(mem->pages[i]),
+						   mask_type),
+		       bridge->gatt_table+j);
+	}
+	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
+
+	bridge->driver->tlb_flush(mem);
+	return 0;
+}
+EXPORT_SYMBOL(agp_generic_insert_memory);
+
+
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	size_t i;
+	struct agp_bridge_data *bridge;
+	int mask_type, num_entries;
+
+	bridge = mem->bridge;
+	if (!bridge)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	if (type != mem->type)
+		return -EINVAL;
+
+	num_entries = agp_num_entries();
+	if (((pg_start + mem->page_count) > num_entries) ||
+	    ((pg_start + mem->page_count) < pg_start))
+		return -EINVAL;
+
+	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+	if (mask_type != 0) {
+		/* The generic routines know nothing of memory types */
+		return -EINVAL;
+	}
+
+	/* AK: bogus, should encode addresses > 4GB */
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		writel(bridge->scratch_page, bridge->gatt_table+i);
+	}
+	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
+
+	bridge->driver->tlb_flush(mem);
+	return 0;
+}
+EXPORT_SYMBOL(agp_generic_remove_memory);
+
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+	return NULL;
+}
+EXPORT_SYMBOL(agp_generic_alloc_by_type);
+
+void agp_generic_free_by_type(struct agp_memory *curr)
+{
+	agp_free_page_array(curr);
+	agp_free_key(curr->key);
+	kfree(curr);
+}
+EXPORT_SYMBOL(agp_generic_free_by_type);
+
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
+{
+	struct agp_memory *new;
+	int i;
+	int pages;
+
+	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+	new = agp_create_user_memory(page_count);
+	if (new == NULL)
+		return NULL;
+
+	for (i = 0; i < page_count; i++)
+		new->pages[i] = NULL;
+	new->page_count = 0;
+	new->type = type;
+	new->num_scratch_pages = pages;
+
+	return new;
+}
+EXPORT_SYMBOL(agp_generic_alloc_user);
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation and by default they reserve the allocated
+ * memory.  They also handle incrementing the current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
+{
+	struct page * page;
+	int i, ret = -ENOMEM;
+
+	for (i = 0; i < num_pages; i++) {
+		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+		/* agp_free_memory() needs gart address */
+		if (page == NULL)
+			goto out;
+
+#ifndef CONFIG_X86
+		map_page_into_agp(page);
+#endif
+		get_page(page);
+		atomic_inc(&agp_bridge->current_memory_agp);
+
+		mem->pages[i] = page;
+		mem->page_count++;
+	}
+
+#ifdef CONFIG_X86
+	set_pages_array_uc(mem->pages, num_pages);
+#endif
+	ret = 0;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(agp_generic_alloc_pages);
+
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+{
+	struct page * page;
+
+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+	if (page == NULL)
+		return NULL;
+
+	map_page_into_agp(page);
+
+	get_page(page);
+	atomic_inc(&agp_bridge->current_memory_agp);
+	return page;
+}
+EXPORT_SYMBOL(agp_generic_alloc_page);
+
+void agp_generic_destroy_pages(struct agp_memory *mem)
+{
+	int i;
+	struct page *page;
+
+	if (!mem)
+		return;
+
+#ifdef CONFIG_X86
+	set_pages_array_wb(mem->pages, mem->page_count);
+#endif
+
+	for (i = 0; i < mem->page_count; i++) {
+		page = mem->pages[i];
+
+#ifndef CONFIG_X86
+		unmap_page_from_agp(page);
+#endif
+		put_page(page);
+		__free_page(page);
+		atomic_dec(&agp_bridge->current_memory_agp);
+		mem->pages[i] = NULL;
+	}
+}
+EXPORT_SYMBOL(agp_generic_destroy_pages);
+
+void agp_generic_destroy_page(struct page *page, int flags)
+{
+	if (page == NULL)
+		return;
+
+	if (flags & AGP_PAGE_DESTROY_UNMAP)
+		unmap_page_from_agp(page);
+
+	if (flags & AGP_PAGE_DESTROY_FREE) {
+		put_page(page);
+		__free_page(page);
+		atomic_dec(&agp_bridge->current_memory_agp);
+	}
+}
+EXPORT_SYMBOL(agp_generic_destroy_page);
+
+/* End Basic Page Allocation Routines */
+
+
+/**
+ * agp_enable  -  initialise the agp point-to-point connection.
+ *
+ * @mode:	agp mode register value to configure with.
+ */
+void agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	if (!bridge)
+		return;
+	bridge->driver->agp_enable(bridge, mode);
+}
+EXPORT_SYMBOL(agp_enable);
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
+{
+	if (list_empty(&agp_bridges))
+		return NULL;
+
+	return agp_bridge;
+}
+
+static void ipi_handler(void *null)
+{
+	flush_agp_cache();
+}
+
+void global_cache_flush(void)
+{
+	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
+		panic(PFX "timed out waiting for the other CPUs!\n");
+}
+EXPORT_SYMBOL(global_cache_flush);
+
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+				      dma_addr_t addr, int type)
+{
+	/* memory type is ignored in the generic routine */
+	if (bridge->driver->masks)
+		return addr | bridge->driver->masks[0].mask;
+	else
+		return addr;
+}
+EXPORT_SYMBOL(agp_generic_mask_memory);
+
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+				  int type)
+{
+	if (type >= AGP_USER_TYPES)
+		return 0;
+	return type;
+}
+EXPORT_SYMBOL(agp_generic_type_to_mask_type);
+
+/*
+ * These functions are implemented according to the AGPv3 spec,
+ * which covers implementation details that had previously been
+ * left open.
+ */
+
+int agp3_generic_fetch_size(void)
+{
+	u16 temp_size;
+	int i;
+	struct aper_size_info_16 *values;
+
+	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
+	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp_size == values[i].size_value) {
+			agp_bridge->previous_size =
+				agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(agp3_generic_fetch_size);
+
+void agp3_generic_tlbflush(struct agp_memory *mem)
+{
+	u32 ctrl;
+	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
+	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
+}
+EXPORT_SYMBOL(agp3_generic_tlbflush);
+
+int agp3_generic_configure(void)
+{
+	u32 temp;
+	struct aper_size_info_16 *current_size;
+
+	current_size = A_SIZE_16(agp_bridge->current_size);
+
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* set aperture size */
+	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
+	/* set gart pointer */
+	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
+	/* enable aperture and GTLB */
+	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
+	return 0;
+}
+EXPORT_SYMBOL(agp3_generic_configure);
+
+void agp3_generic_cleanup(void)
+{
+	u32 ctrl;
+	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
+}
+EXPORT_SYMBOL(agp3_generic_cleanup);
+
+const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+{
+	{4096, 1048576, 10,0x000},
+	{2048,  524288, 9, 0x800},
+	{1024,  262144, 8, 0xc00},
+	{ 512,  131072, 7, 0xe00},
+	{ 256,   65536, 6, 0xf00},
+	{ 128,   32768, 5, 0xf20},
+	{  64,   16384, 4, 0xf30},
+	{  32,    8192, 3, 0xf38},
+	{  16,    4096, 2, 0xf3c},
+	{   8,    2048, 1, 0xf3e},
+	{   4,    1024, 0, 0xf3f}
+};
+EXPORT_SYMBOL(agp3_generic_sizes);
+
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
new file mode 100644
index 0000000..3695773
--- /dev/null
+++ b/drivers/char/agp/hp-agp.c
@@ -0,0 +1,553 @@
+/*
+ * HP zx1 AGPGART routines.
+ *
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/acpi-ext.h>
+
+#include "agp.h"
+
+#define HP_ZX1_IOC_OFFSET	0x1000  /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE		0x300
+#define HP_ZX1_IMASK		0x308
+#define HP_ZX1_PCOM		0x310
+#define HP_ZX1_TCNFG		0x318
+#define HP_ZX1_PDIR_BASE	0x320
+
+#define HP_ZX1_IOVA_BASE	GB(1UL)
+#define HP_ZX1_IOVA_SIZE	GB(1UL)
+#define HP_ZX1_GART_SIZE	(HP_ZX1_IOVA_SIZE / 2)
+#define HP_ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
+
+#define HP_ZX1_PDIR_VALID_BIT	0x8000000000000000UL
+#define HP_ZX1_IOVA_TO_PDIR(va)	((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
+
+#define AGP8X_MODE_BIT		3
+#define AGP8X_MODE		(1 << AGP8X_MODE_BIT)
+
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
+static int hp_zx1_gart_found;
+
+static struct aper_size_info_fixed hp_zx1_sizes[] =
+{
+	{0, 0, 0},		/* filled in by hp_zx1_fetch_size() */
+};
+
+static struct gatt_mask hp_zx1_masks[] =
+{
+	{.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
+};
+
+static struct _hp_private {
+	volatile u8 __iomem *ioc_regs;
+	volatile u8 __iomem *lba_regs;
+	int lba_cap_offset;
+	u64 *io_pdir;		// PDIR for entire IOVA
+	u64 *gatt;		// PDIR just for GART (subset of above)
+	u64 gatt_entries;
+	u64 iova_base;
+	u64 gart_base;
+	u64 gart_size;
+	u64 io_pdir_size;
+	int io_pdir_owner;	// do we own it, or share it with sba_iommu?
+	int io_page_size;
+	int io_tlb_shift;
+	int io_tlb_ps;		// IOC ps config
+	int io_pages_per_kpage;
+} hp_private;
+
+static int __init hp_zx1_ioc_shared(void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
+
+	/*
+	 * IOC already configured by sba_iommu module; just use
+	 * its setup.  We assume:
+	 *	- IOVA space is 1Gb in size
+	 *	- first 512Mb is IOMMU, second 512Mb is GART
+	 */
+	hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
+	switch (hp->io_tlb_ps) {
+		case 0: hp->io_tlb_shift = 12; break;
+		case 1: hp->io_tlb_shift = 13; break;
+		case 2: hp->io_tlb_shift = 14; break;
+		case 3: hp->io_tlb_shift = 16; break;
+		default:
+			printk(KERN_ERR PFX "Invalid IOTLB page size "
+			       "configuration 0x%x\n", hp->io_tlb_ps);
+			hp->gatt = NULL;
+			hp->gatt_entries = 0;
+			return -ENODEV;
+	}
+	hp->io_page_size = 1 << hp->io_tlb_shift;
+	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+	hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
+	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
+
+	hp->gart_size = HP_ZX1_GART_SIZE;
+	hp->gatt_entries = hp->gart_size / hp->io_page_size;
+
+	hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+	hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+
+	if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+		/* Normal case when no AGP device in system */
+		hp->gatt = NULL;
+		hp->gatt_entries = 0;
+		printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
+		       "GART disabled\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int __init
+hp_zx1_ioc_owner (void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
+
+	/*
+	 * Select an IOV page size no larger than system page size.
+	 */
+	if (PAGE_SIZE >= KB(64)) {
+		hp->io_tlb_shift = 16;
+		hp->io_tlb_ps = 3;
+	} else if (PAGE_SIZE >= KB(16)) {
+		hp->io_tlb_shift = 14;
+		hp->io_tlb_ps = 2;
+	} else if (PAGE_SIZE >= KB(8)) {
+		hp->io_tlb_shift = 13;
+		hp->io_tlb_ps = 1;
+	} else {
+		hp->io_tlb_shift = 12;
+		hp->io_tlb_ps = 0;
+	}
+	hp->io_page_size = 1 << hp->io_tlb_shift;
+	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+	hp->iova_base = HP_ZX1_IOVA_BASE;
+	hp->gart_size = HP_ZX1_GART_SIZE;
+	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
+
+	hp->gatt_entries = hp->gart_size / hp->io_page_size;
+	hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
+
+	return 0;
+}
+
+static int __init
+hp_zx1_ioc_init (u64 hpa)
+{
+	struct _hp_private *hp = &hp_private;
+
+	hp->ioc_regs = ioremap(hpa, 1024);
+	if (!hp->ioc_regs)
+		return -ENOMEM;
+
+	/*
+	 * If the IOTLB is currently disabled, we can take it over.
+	 * Otherwise, we have to share with sba_iommu.
+	 */
+	hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
+
+	if (hp->io_pdir_owner)
+		return hp_zx1_ioc_owner();
+
+	return hp_zx1_ioc_shared();
+}
+
+static int
+hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
+{
+	u16 status;
+	u8 pos, id;
+	int ttl = 48;
+
+	status = readw(hpa+PCI_STATUS);
+	if (!(status & PCI_STATUS_CAP_LIST))
+		return 0;
+	pos = readb(hpa+PCI_CAPABILITY_LIST);
+	while (ttl-- && pos >= 0x40) {
+		pos &= ~3;
+		id = readb(hpa+pos+PCI_CAP_LIST_ID);
+		if (id == 0xff)
+			break;
+		if (id == cap)
+			return pos;
+		pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
+	}
+	return 0;
+}
+
+static int __init
+hp_zx1_lba_init (u64 hpa)
+{
+	struct _hp_private *hp = &hp_private;
+	int cap;
+
+	hp->lba_regs = ioremap(hpa, 256);
+	if (!hp->lba_regs)
+		return -ENOMEM;
+
+	hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
+
+	cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
+	if (cap != PCI_CAP_ID_AGP) {
+		printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
+		       cap, hp->lba_cap_offset);
+		iounmap(hp->lba_regs);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int
+hp_zx1_fetch_size(void)
+{
+	int size;
+
+	size = hp_private.gart_size / MB(1);
+	hp_zx1_sizes[0].size = size;
+	agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
+	return size;
+}
+
+static int
+hp_zx1_configure (void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	agp_bridge->gart_bus_addr = hp->gart_base;
+	agp_bridge->capndx = hp->lba_cap_offset;
+	agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+
+	if (hp->io_pdir_owner) {
+		writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+		readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
+		writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
+		readl(hp->ioc_regs+HP_ZX1_TCNFG);
+		writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
+		readl(hp->ioc_regs+HP_ZX1_IMASK);
+		writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
+		readl(hp->ioc_regs+HP_ZX1_IBASE);
+		writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
+		readl(hp->ioc_regs+HP_ZX1_PCOM);
+	}
+
+	return 0;
+}
+
+static void
+hp_zx1_cleanup (void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	if (hp->ioc_regs) {
+		if (hp->io_pdir_owner) {
+			writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
+			readq(hp->ioc_regs+HP_ZX1_IBASE);
+		}
+		iounmap(hp->ioc_regs);
+	}
+	if (hp->lba_regs)
+		iounmap(hp->lba_regs);
+}
+
+static void
+hp_zx1_tlbflush (struct agp_memory *mem)
+{
+	struct _hp_private *hp = &hp_private;
+
+	writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
+	readq(hp->ioc_regs+HP_ZX1_PCOM);
+}
+
+static int
+hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
+{
+	struct _hp_private *hp = &hp_private;
+	int i;
+
+	if (hp->io_pdir_owner) {
+		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+						get_order(hp->io_pdir_size));
+		if (!hp->io_pdir) {
+			printk(KERN_ERR PFX "Couldn't allocate contiguous "
+				"memory for I/O PDIR\n");
+			hp->gatt = NULL;
+			hp->gatt_entries = 0;
+			return -ENOMEM;
+		}
+		memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+	}
+
+	for (i = 0; i < hp->gatt_entries; i++) {
+		hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
+	}
+
+	return 0;
+}
+
+static int
+hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
+{
+	struct _hp_private *hp = &hp_private;
+
+	if (hp->io_pdir_owner)
+		free_pages((unsigned long) hp->io_pdir,
+			    get_order(hp->io_pdir_size));
+	else
+		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+	return 0;
+}
+
+static int
+hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, k;
+	off_t j, io_pg_start;
+	int io_pg_count;
+
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+		return -EINVAL;
+	}
+
+	j = io_pg_start;
+	while (j < (io_pg_start + io_pg_count)) {
+		if (hp->gatt[j]) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+		unsigned long paddr;
+
+		paddr = page_to_phys(mem->pages[i]);
+		for (k = 0;
+		     k < hp->io_pages_per_kpage;
+		     k++, j++, paddr += hp->io_page_size) {
+			hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
+		}
+	}
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static int
+hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, io_pg_start, io_pg_count;
+
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+		hp->gatt[i] = agp_bridge->scratch_page;
+	}
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static unsigned long
+hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
+{
+	return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static void
+hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
+{
+	struct _hp_private *hp = &hp_private;
+	u32 command;
+
+	command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+	command = agp_collect_device_status(bridge, mode, command);
+	command |= 0x00000100;
+
+	writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
+
+	agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+const struct agp_bridge_driver hp_zx1_driver = {
+	.owner			= THIS_MODULE,
+	.size_type		= FIXED_APER_SIZE,
+	.configure		= hp_zx1_configure,
+	.fetch_size		= hp_zx1_fetch_size,
+	.cleanup		= hp_zx1_cleanup,
+	.tlb_flush		= hp_zx1_tlbflush,
+	.mask_memory		= hp_zx1_mask_memory,
+	.masks			= hp_zx1_masks,
+	.agp_enable		= hp_zx1_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= hp_zx1_create_gatt_table,
+	.free_gatt_table	= hp_zx1_free_gatt_table,
+	.insert_memory		= hp_zx1_insert_memory,
+	.remove_memory		= hp_zx1_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture	= true,
+};
+
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
+{
+	struct agp_bridge_data *bridge;
+	int error = 0;
+
+	error = hp_zx1_ioc_init(ioc_hpa);
+	if (error)
+		goto fail;
+
+	error = hp_zx1_lba_init(lba_hpa);
+	if (error)
+		goto fail;
+
+	bridge = agp_alloc_bridge();
+	if (!bridge) {
+		error = -ENOMEM;
+		goto fail;
+	}
+	bridge->driver = &hp_zx1_driver;
+
+	fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+	fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
+	bridge->dev = &fake_bridge_dev;
+
+	error = agp_add_bridge(bridge);
+  fail:
+	if (error)
+		hp_zx1_cleanup();
+	return error;
+}
+
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
+{
+	acpi_handle handle, parent;
+	acpi_status status;
+	struct acpi_device_info *info;
+	u64 lba_hpa, sba_hpa, length;
+	int match;
+
+	status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+	if (ACPI_FAILURE(status))
+		return AE_OK; /* keep looking for another bridge */
+
+	/* Look for an enclosing IOC scope and find its CSR space */
+	handle = obj;
+	do {
+		status = acpi_get_object_info(handle, &info);
+		if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
+			/* TBD check _CID also */
+			match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
+			kfree(info);
+			if (match) {
+				status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+				if (ACPI_SUCCESS(status))
+					break;
+				else {
+					printk(KERN_ERR PFX "Detected HP ZX1 "
+					       "AGP LBA but no IOC.\n");
+					return AE_OK;
+				}
+			}
+		}
+
+		status = acpi_get_parent(handle, &parent);
+		handle = parent;
+	} while (ACPI_SUCCESS(status));
+
+	if (ACPI_FAILURE(status))
+		return AE_OK;	/* found no enclosing IOC */
+
+	if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+		return AE_OK;
+
+	printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
+		"(ioc=%llx, lba=%llx)\n", (char *)context,
+		sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+
+	hp_zx1_gart_found = 1;
+	return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
+}
+
+static int __init
+agp_hp_init (void)
+{
+	if (agp_off)
+		return -EINVAL;
+
+	acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
+	if (hp_zx1_gart_found)
+		return 0;
+
+	acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
+	if (hp_zx1_gart_found)
+		return 0;
+
+	return -ENODEV;
+}
+
+static void __exit
+agp_hp_cleanup (void)
+{
+}
+
+module_init(agp_hp_init);
+module_exit(agp_hp_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
new file mode 100644
index 0000000..15b240e
--- /dev/null
+++ b/drivers/char/agp/i460-agp.c
@@ -0,0 +1,659 @@
+/*
+ * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
+ * the "Intel 460GTX Chipset Software Developer's Manual":
+ * http://www.intel.com/design/archives/itanium/downloads/248704.htm 
+ */
+/*
+ * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
+ * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+
+#include "agp.h"
+
+#define INTEL_I460_BAPBASE		0x98
+#define INTEL_I460_GXBCTL		0xa0
+#define INTEL_I460_AGPSIZ		0xa2
+#define INTEL_I460_ATTBASE		0xfe200000
+#define INTEL_I460_GATT_VALID		(1UL << 24)
+#define INTEL_I460_GATT_COHERENT	(1UL << 25)
+
+/*
+ * The i460 can operate with large (4MB) pages, but there is no sane way to support this
+ * within the current kernel/DRM environment, so we disable the relevant code for now.
+ * See also comments in ia64_alloc_page()...
+ */
+#define I460_LARGE_IO_PAGES		0
+
+#if I460_LARGE_IO_PAGES
+# define I460_IO_PAGE_SHIFT		i460.io_page_shift
+#else
+# define I460_IO_PAGE_SHIFT		12
+#endif
+
+#define I460_IOPAGES_PER_KPAGE		(PAGE_SIZE >> I460_IO_PAGE_SHIFT)
+#define I460_KPAGES_PER_IOPAGE		(1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
+#define I460_SRAM_IO_DISABLE		(1 << 4)
+#define I460_BAPBASE_ENABLE		(1 << 3)
+#define I460_AGPSIZ_MASK		0x7
+#define I460_4M_PS			(1 << 1)
+
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG		(1UL << 0)
+#define I460_GXBCTL_BWC		(1UL << 2)
+
+/*
+ * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
+ * gatt_table and gatt_table_real pointers a "void *"...
+ */
+#define RD_GATT(index)		readl((u32 *) i460.gatt + (index))
+#define WR_GATT(index, val)	writel((val), (u32 *) i460.gatt + (index))
+/*
+ * The 460 spec says we have to read the last location written to make sure that all
+ * writes have taken effect
+ */
+#define WR_FLUSH_GATT(index)	RD_GATT(index)
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+				       dma_addr_t addr, int type);
+
+static struct {
+	void *gatt;				/* ioremap'd GATT area */
+
+	/* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
+	u8 io_page_shift;
+
+	/* BIOS configures chipset to one of 2 possible apbase values: */
+	u8 dynamic_apbase;
+
+	/* structure for tracking partial use of 4MB GART pages: */
+	struct lp_desc {
+		unsigned long *alloced_map;	/* bitmap of kernel-pages in use */
+		int refcount;			/* number of kernel pages using the large page */
+		u64 paddr;			/* physical address of large page */
+		struct page *page; 		/* page pointer */
+	} *lp_desc;
+} i460;
+
+static const struct aper_size_info_8 i460_sizes[3] =
+{
+	/*
+	 * The 32GB aperture is only available with a 4M GART page size.  Due to the
+	 * dynamic GART page size, we can't figure out page_order or num_entries until
+	 * runtime.
+	 */
+	{32768, 0, 0, 4},
+	{1024, 0, 0, 2},
+	{256, 0, 0, 1}
+};
+
+static struct gatt_mask i460_masks[] =
+{
+	{
+	  .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
+	  .type = 0
+	}
+};
+
+static int i460_fetch_size (void)
+{
+	int i;
+	u8 temp;
+	struct aper_size_info_8 *values;
+
+	/* Determine the GART page size */
+	pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
+	i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
+	pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
+
+	if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
+		printk(KERN_ERR PFX
+			"I/O (GART) page-size %luKB doesn't match expected "
+				"size %luKB\n",
+			1UL << (i460.io_page_shift - 10),
+			1UL << (I460_IO_PAGE_SHIFT));
+		return 0;
+	}
+
+	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+	pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+
+	/* Exit now if the IO drivers for the GART SRAMS are turned off */
+	if (temp & I460_SRAM_IO_DISABLE) {
+		printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
+		printk(KERN_ERR PFX "AGPGART operation not possible\n");
+		return 0;
+	}
+
+	/* Make sure we don't try to create an 2 ^ 23 entry GATT */
+	if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+		printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
+		return 0;
+	}
+
+	/* Determine the proper APBASE register */
+	if (temp & I460_BAPBASE_ENABLE)
+		i460.dynamic_apbase = INTEL_I460_BAPBASE;
+	else
+		i460.dynamic_apbase = AGP_APBASE;
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		/*
+		 * Dynamically calculate the proper num_entries and page_order values for
+		 * the define aperture sizes. Take care not to shift off the end of
+		 * values[i].size.
+		 */
+		values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
+		values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
+	}
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		/* Neglect control bits when matching up size_value */
+		if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
+			agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+/* There isn't anything to do here since 460 has no GART TLB. */
+static void i460_tlb_flush (struct agp_memory *mem)
+{
+	return;
+}
+
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void i460_write_agpsiz (u8 size_value)
+{
+	u8 temp;
+
+	pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+	pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
+			      ((temp & ~I460_AGPSIZ_MASK) | size_value));
+}
+
+static void i460_cleanup (void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	i460_write_agpsiz(previous_size->size_value);
+
+	if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
+		kfree(i460.lp_desc);
+}
+
+static int i460_configure (void)
+{
+	union {
+		u32 small[2];
+		u64 large;
+	} temp;
+	size_t size;
+	u8 scratch;
+	struct aper_size_info_8 *current_size;
+
+	temp.large = 0;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+	i460_write_agpsiz(current_size->size_value);
+
+	/*
+	 * Do the necessary rigmarole to read all eight bytes of APBASE.
+	 * This has to be done since the AGP aperture can be above 4GB on
+	 * 460 based systems.
+	 */
+	pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
+	pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
+
+	/* Clear BAR control bits */
+	agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
+
+	pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
+	pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
+			      (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+	/*
+	 * Initialize partial allocation trackers if a GART page is bigger than a kernel
+	 * page.
+	 */
+	if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
+		size = current_size->num_entries * sizeof(i460.lp_desc[0]);
+		i460.lp_desc = kzalloc(size, GFP_KERNEL);
+		if (!i460.lp_desc)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static int i460_create_gatt_table (struct agp_bridge_data *bridge)
+{
+	int page_order, num_entries, i;
+	void *temp;
+
+	/*
+	 * Load up the fixed address of the GART SRAMS which hold our GATT table.
+	 */
+	temp = agp_bridge->current_size;
+	page_order = A_SIZE_8(temp)->page_order;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
+	if (!i460.gatt) {
+		printk(KERN_ERR PFX "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	/* These are no good, the should be removed from the agp_bridge strucure... */
+	agp_bridge->gatt_table_real = NULL;
+	agp_bridge->gatt_table = NULL;
+	agp_bridge->gatt_bus_addr = 0;
+
+	for (i = 0; i < num_entries; ++i)
+		WR_GATT(i, 0);
+	WR_FLUSH_GATT(i - 1);
+	return 0;
+}
+
+static int i460_free_gatt_table (struct agp_bridge_data *bridge)
+{
+	int num_entries, i;
+	void *temp;
+
+	temp = agp_bridge->current_size;
+
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	for (i = 0; i < num_entries; ++i)
+		WR_GATT(i, 0);
+	WR_FLUSH_GATT(num_entries - 1);
+
+	iounmap(i460.gatt);
+	return 0;
+}
+
+/*
+ * The following functions are called when the I/O (GART) page size is smaller than
+ * PAGE_SIZE.
+ */
+
+static int i460_insert_memory_small_io_page (struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	unsigned long paddr, io_pg_start, io_page_size;
+	int i, j, k, num_entries;
+	void *temp;
+
+	pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
+		 mem, pg_start, type, page_to_phys(mem->pages[0]));
+
+	if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+		return -EINVAL;
+
+	io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
+
+	j = io_pg_start;
+	while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
+		if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
+			pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
+				 j, RD_GATT(j));
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	io_page_size = 1UL << I460_IO_PAGE_SHIFT;
+	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+		paddr = page_to_phys(mem->pages[i]);
+		for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
+			WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
+	}
+	WR_FLUSH_GATT(j - 1);
+	return 0;
+}
+
+static int i460_remove_memory_small_io_page(struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	int i;
+
+	pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
+		 mem, pg_start, type);
+
+	pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+	for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
+		WR_GATT(i, 0);
+	WR_FLUSH_GATT(i - 1);
+	return 0;
+}
+
+#if I460_LARGE_IO_PAGES
+
+/*
+ * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are smaller than a
+ * single GART page are possible.  The i460.lp_desc array tracks partial allocation of the
+ * large GART pages to work around this issue.
+ *
+ * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
+ * pg_num.  i460.lp_desc[pg_num].paddr is the physical address of the large page and
+ * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
+ */
+
+static int i460_alloc_large_page (struct lp_desc *lp)
+{
+	unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
+	size_t map_size;
+
+	lp->page = alloc_pages(GFP_KERNEL, order);
+	if (!lp->page) {
+		printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
+		return -ENOMEM;
+	}
+
+	map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
+	lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
+	if (!lp->alloced_map) {
+		__free_pages(lp->page, order);
+		printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
+		return -ENOMEM;
+	}
+
+	lp->paddr = page_to_phys(lp->page);
+	lp->refcount = 0;
+	atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+	return 0;
+}
+
+static void i460_free_large_page (struct lp_desc *lp)
+{
+	kfree(lp->alloced_map);
+	lp->alloced_map = NULL;
+
+	__free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+	atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+}
+
+static int i460_insert_memory_large_io_page (struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	int i, start_offset, end_offset, idx, pg, num_entries;
+	struct lp_desc *start, *end, *lp;
+	void *temp;
+
+	if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+		return -EINVAL;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+	end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+	start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+	end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+	if (end > i460.lp_desc + num_entries) {
+		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
+
+	/* Check if the requested region of the aperture is free */
+	for (lp = start; lp <= end; ++lp) {
+		if (!lp->alloced_map)
+			continue;	/* OK, the entire large page is available... */
+
+		for (idx = ((lp == start) ? start_offset : 0);
+		     idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+		     idx++)
+		{
+			if (test_bit(idx, lp->alloced_map))
+				return -EBUSY;
+		}
+	}
+
+	for (lp = start, i = 0; lp <= end; ++lp) {
+		if (!lp->alloced_map) {
+			/* Allocate new GART pages... */
+			if (i460_alloc_large_page(lp) < 0)
+				return -ENOMEM;
+			pg = lp - i460.lp_desc;
+			WR_GATT(pg, i460_mask_memory(agp_bridge,
+						     lp->paddr, 0));
+			WR_FLUSH_GATT(pg);
+		}
+
+		for (idx = ((lp == start) ? start_offset : 0);
+		     idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+		     idx++, i++)
+		{
+			mem->pages[i] = lp->page;
+			__set_bit(idx, lp->alloced_map);
+			++lp->refcount;
+		}
+	}
+	return 0;
+}
+
+static int i460_remove_memory_large_io_page (struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	int i, pg, start_offset, end_offset, idx, num_entries;
+	struct lp_desc *start, *end, *lp;
+	void *temp;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+	end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+	start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+	end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+	for (i = 0, lp = start; lp <= end; ++lp) {
+		for (idx = ((lp == start) ? start_offset : 0);
+		     idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+		     idx++, i++)
+		{
+			mem->pages[i] = NULL;
+			__clear_bit(idx, lp->alloced_map);
+			--lp->refcount;
+		}
+
+		/* Free GART pages if they are unused */
+		if (lp->refcount == 0) {
+			pg = lp - i460.lp_desc;
+			WR_GATT(pg, 0);
+			WR_FLUSH_GATT(pg);
+			i460_free_large_page(lp);
+		}
+	}
+	return 0;
+}
+
+/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
+
+static int i460_insert_memory (struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+		return i460_insert_memory_small_io_page(mem, pg_start, type);
+	else
+		return i460_insert_memory_large_io_page(mem, pg_start, type);
+}
+
+static int i460_remove_memory (struct agp_memory *mem,
+				off_t pg_start, int type)
+{
+	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+		return i460_remove_memory_small_io_page(mem, pg_start, type);
+	else
+		return i460_remove_memory_large_io_page(mem, pg_start, type);
+}
+
+/*
+ * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
+ * allocate memory until we know where it is to be bound in the aperture (a
+ * multi-kernel-page alloc might fit inside of an already allocated GART page).
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there before bind time
+ * (I don't think current drivers do)...
+ */
+static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
+{
+	void *page;
+
+	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+		page = agp_generic_alloc_page(agp_bridge);
+	} else
+		/* Returning NULL would cause problems */
+		/* AK: really dubious code. */
+		page = (void *)~0UL;
+	return page;
+}
+
+static void i460_destroy_page (struct page *page, int flags)
+{
+	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+		agp_generic_destroy_page(page, flags);
+	}
+}
+
+#endif /* I460_LARGE_IO_PAGES */
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+				       dma_addr_t addr, int type)
+{
+	/* Make sure the returned address is a valid GATT entry */
+	return bridge->driver->masks[0].mask
+		| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
+}
+
+const struct agp_bridge_driver intel_i460_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= i460_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 3,
+	.configure		= i460_configure,
+	.fetch_size		= i460_fetch_size,
+	.cleanup		= i460_cleanup,
+	.tlb_flush		= i460_tlb_flush,
+	.mask_memory		= i460_mask_memory,
+	.masks			= i460_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= i460_create_gatt_table,
+	.free_gatt_table	= i460_free_gatt_table,
+#if I460_LARGE_IO_PAGES
+	.insert_memory		= i460_insert_memory,
+	.remove_memory		= i460_remove_memory,
+	.agp_alloc_page		= i460_alloc_page,
+	.agp_destroy_page	= i460_destroy_page,
+#else
+	.insert_memory		= i460_insert_memory_small_io_page,
+	.remove_memory		= i460_remove_memory_small_io_page,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+#endif
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture	= true,
+};
+
+static int agp_intel_i460_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &intel_i460_driver;
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_intel_i460_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_intel_i460_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_INTEL,
+	.device		= PCI_DEVICE_ID_INTEL_84460GX,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
+
+static struct pci_driver agp_intel_i460_pci_driver = {
+	.name		= "agpgart-intel-i460",
+	.id_table	= agp_intel_i460_pci_table,
+	.probe		= agp_intel_i460_probe,
+	.remove		= agp_intel_i460_remove,
+};
+
+static int __init agp_intel_i460_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_intel_i460_pci_driver);
+}
+
+static void __exit agp_intel_i460_cleanup(void)
+{
+	pci_unregister_driver(&agp_intel_i460_pci_driver);
+}
+
+module_init(agp_intel_i460_init);
+module_exit(agp_intel_i460_cleanup);
+
+MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
new file mode 100644
index 0000000..9e4f27a
--- /dev/null
+++ b/drivers/char/agp/intel-agp.c
@@ -0,0 +1,924 @@
+/*
+ * Intel AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <drm/intel-gtt.h>
+
+static int intel_fetch_size(void)
+{
+	int i;
+	u16 temp;
+	struct aper_size_info_16 *values;
+
+	pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int __intel_8xx_fetch_size(u8 temp)
+{
+	int i;
+	struct aper_size_info_8 *values;
+
+	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+				agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+	u8 temp;
+
+	pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+	return __intel_8xx_fetch_size(temp);
+}
+
+static int intel_815_fetch_size(void)
+{
+	u8 temp;
+
+	/* Intel 815 chipsets have a _weird_ APSIZE register with only
+	 * one non-reserved bit, so mask the others out ... */
+	pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+	temp &= (1 << 3);
+
+	return __intel_8xx_fetch_size(temp);
+}
+
+static void intel_tlbflush(struct agp_memory *mem)
+{
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(struct agp_memory *mem)
+{
+	u32 temp;
+	pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+	pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+	u16 temp;
+	struct aper_size_info_16 *previous_size;
+
+	previous_size = A_SIZE_16(agp_bridge->previous_size);
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+	u16 temp;
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_16 *current_size;
+
+	current_size = A_SIZE_16(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+	/* paccfg/nbxcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+			(temp2 & ~(1 << 10)) | (1 << 9));
+	/* clear any possible error conditions */
+	pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+	return 0;
+}
+
+static int intel_815_configure(void)
+{
+	u32 addr;
+	u8 temp2;
+	struct aper_size_info_8 *current_size;
+
+	/* attbase - aperture base */
+	/* the Intel 815 chipset spec. says that bits 29-31 in the
+	* ATTBASE register are reserved -> try not to write them */
+	if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
+		dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
+		return -EINVAL;
+	}
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+			current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
+	addr &= INTEL_815_ATTBASE_MASK;
+	addr |= agp_bridge->gatt_bus_addr;
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* apcont */
+	pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
+	pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+	/* clear any possible error conditions */
+	/* Oddness : this chipset seems to have no ERRSTS register ! */
+	return 0;
+}
+
+static void intel_820_tlbflush(struct agp_memory *mem)
+{
+	return;
+}
+
+static void intel_820_cleanup(void)
+{
+	u8 temp;
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
+	pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
+			temp & ~(1 << 1));
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+			previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+	u8 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* global enable aperture access */
+	/* This flag is not accessed through MCHCFG register as in */
+	/* i850 chipset. */
+	pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
+	pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
+	return 0;
+}
+
+static int intel_840_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
+	return 0;
+}
+
+static int intel_845_configure(void)
+{
+	u8 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	if (agp_bridge->apbase_config != 0) {
+		pci_write_config_dword(agp_bridge->dev, AGP_APBASE,
+				       agp_bridge->apbase_config);
+	} else {
+		/* address to map to */
+		agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+							    AGP_APERTURE_BAR);
+		agp_bridge->apbase_config = agp_bridge->gart_bus_addr;
+	}
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* agpm */
+	pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
+	pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+	return 0;
+}
+
+static int intel_850_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
+	return 0;
+}
+
+static int intel_860_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
+	return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* gmch */
+	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
+	return 0;
+}
+
+static int intel_7505_configure(void)
+{
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mchcfg */
+	pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
+
+	return 0;
+}
+
+/* Setup function */
+static const struct gatt_mask intel_generic_masks[] =
+{
+	{.mask = 0x00000017, .type = 0}
+};
+
+static const struct aper_size_info_8 intel_815_sizes[2] =
+{
+	{64, 16384, 4, 0},
+	{32, 8192, 3, 8},
+};
+
+static const struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 48},
+	{32, 8192, 3, 56},
+	{16, 4096, 2, 60},
+	{8, 2048, 1, 62},
+	{4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_16 intel_generic_sizes[7] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 48},
+	{32, 8192, 3, 56},
+	{16, 4096, 2, 60},
+	{8, 2048, 1, 62},
+	{4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_8 intel_830mp_sizes[4] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 48},
+	{32, 8192, 3, 56}
+};
+
+static const struct agp_bridge_driver intel_generic_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_generic_sizes,
+	.size_type		= U16_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_configure,
+	.fetch_size		= intel_fetch_size,
+	.cleanup		= intel_cleanup,
+	.tlb_flush		= intel_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_815_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_815_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 2,
+	.needs_scratch_page	= true,
+	.configure		= intel_815_configure,
+	.fetch_size		= intel_815_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type	= agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_820_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_820_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_820_cleanup,
+	.tlb_flush		= intel_820_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830mp_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_830mp_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
+	.configure		= intel_830mp_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_840_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_840_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_845_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_845_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_850_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_850_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_860_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_860_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_7505_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_8xx_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= intel_7505_configure,
+	.fetch_size		= intel_8xx_fetch_size,
+	.cleanup		= intel_8xx_cleanup,
+	.tlb_flush		= intel_8xx_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= intel_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_agp_driver_description {
+	unsigned int chip_id;
+	char *name;
+	const struct agp_bridge_driver *driver;
+} intel_agp_chipsets[] = {
+	{ PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+	{ PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+	{ PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+	{ PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+	{ PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+	{ 0, NULL, NULL }
+};
+
+static int agp_intel_probe(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr = 0;
+	struct resource *r;
+	int i, err;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->capndx = cap_ptr;
+
+	if (intel_gmch_probe(pdev, NULL, bridge))
+		goto found_gmch;
+
+	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+		/* In case that multiple models of gfx chip may
+		   stand on same host bridge type, this can be
+		   sure we detect the right IGD. */
+		if (pdev->device == intel_agp_chipsets[i].chip_id) {
+			bridge->driver = intel_agp_chipsets[i].driver;
+			break;
+		}
+	}
+
+	if (!bridge->driver) {
+		if (cap_ptr)
+			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+				 pdev->vendor, pdev->device);
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
+	bridge->dev = pdev;
+	bridge->dev_private_data = NULL;
+
+	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+	/*
+	* The following fixes the case where the BIOS has "forgotten" to
+	* provide an address range for the GART.
+	* 20030610 - hamish@zot.org
+	* This happens before pci_enable_device() intentionally;
+	* calling pci_enable_device() before assigning the resource
+	* will result in the GART being disabled on machines with such
+	* BIOSs (the GART ends up with a BAR starting at 0, which
+	* conflicts a lot of other devices).
+	*/
+	r = &pdev->resource[0];
+	if (!r->start && r->end) {
+		if (pci_assign_resource(pdev, 0)) {
+			dev_err(&pdev->dev, "can't assign resource 0\n");
+			agp_put_bridge(bridge);
+			return -ENODEV;
+		}
+	}
+
+	/*
+	* If the device has not been properly setup, the following will catch
+	* the problem and should stop the system from crashing.
+	* 20030610 - hamish@zot.org
+	*/
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "can't enable PCI device\n");
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
+	/* Fill in the mode register */
+	if (cap_ptr) {
+		pci_read_config_dword(pdev,
+				bridge->capndx+PCI_AGP_STATUS,
+				&bridge->mode);
+	}
+
+found_gmch:
+	pci_set_drvdata(pdev, bridge);
+	err = agp_add_bridge(bridge);
+	return err;
+}
+
+static void agp_intel_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+
+	intel_gmch_remove();
+
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_intel_resume(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	bridge->driver->configure();
+
+	return 0;
+}
+#endif
+
+static const struct pci_device_id agp_intel_pci_table[] = {
+#define ID(x)						\
+	{						\
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),	\
+	.class_mask	= ~0,				\
+	.vendor		= PCI_VENDOR_ID_INTEL,		\
+	.device		= x,				\
+	.subvendor	= PCI_ANY_ID,			\
+	.subdevice	= PCI_ANY_ID,			\
+	}
+	ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
+	ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+	ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+	ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+	ID(PCI_DEVICE_ID_INTEL_82810_MC1),
+	ID(PCI_DEVICE_ID_INTEL_82810_MC3),
+	ID(PCI_DEVICE_ID_INTEL_82810E_MC),
+	ID(PCI_DEVICE_ID_INTEL_82815_MC),
+	ID(PCI_DEVICE_ID_INTEL_82820_HB),
+	ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
+	ID(PCI_DEVICE_ID_INTEL_82830_HB),
+	ID(PCI_DEVICE_ID_INTEL_82840_HB),
+	ID(PCI_DEVICE_ID_INTEL_82845_HB),
+	ID(PCI_DEVICE_ID_INTEL_82845G_HB),
+	ID(PCI_DEVICE_ID_INTEL_82850_HB),
+	ID(PCI_DEVICE_ID_INTEL_82854_HB),
+	ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
+	ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
+	ID(PCI_DEVICE_ID_INTEL_82860_HB),
+	ID(PCI_DEVICE_ID_INTEL_82865_HB),
+	ID(PCI_DEVICE_ID_INTEL_82875_HB),
+	ID(PCI_DEVICE_ID_INTEL_7505_0),
+	ID(PCI_DEVICE_ID_INTEL_7205_0),
+	ID(PCI_DEVICE_ID_INTEL_E7221_HB),
+	ID(PCI_DEVICE_ID_INTEL_82915G_HB),
+	ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
+	ID(PCI_DEVICE_ID_INTEL_82945G_HB),
+	ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+	ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
+	ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+	ID(PCI_DEVICE_ID_INTEL_82G35_HB),
+	ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+	ID(PCI_DEVICE_ID_INTEL_82965G_HB),
+	ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
+	ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
+	ID(PCI_DEVICE_ID_INTEL_G33_HB),
+	ID(PCI_DEVICE_ID_INTEL_Q35_HB),
+	ID(PCI_DEVICE_ID_INTEL_Q33_HB),
+	ID(PCI_DEVICE_ID_INTEL_GM45_HB),
+	ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
+	ID(PCI_DEVICE_ID_INTEL_Q45_HB),
+	ID(PCI_DEVICE_ID_INTEL_G45_HB),
+	ID(PCI_DEVICE_ID_INTEL_G41_HB),
+	ID(PCI_DEVICE_ID_INTEL_B43_HB),
+	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+
+static struct pci_driver agp_intel_pci_driver = {
+	.name		= "agpgart-intel",
+	.id_table	= agp_intel_pci_table,
+	.probe		= agp_intel_probe,
+	.remove		= agp_intel_remove,
+#ifdef CONFIG_PM
+	.resume		= agp_intel_resume,
+#endif
+};
+
+static int __init agp_intel_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_intel_pci_driver);
+}
+
+static void __exit agp_intel_cleanup(void)
+{
+	pci_unregister_driver(&agp_intel_pci_driver);
+}
+
+module_init(agp_intel_init);
+module_exit(agp_intel_cleanup);
+
+MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 0000000..164bf65
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
+
+/* Intel registers */
+#define INTEL_APSIZE	0xb4
+#define INTEL_ATTBASE	0xb8
+#define INTEL_AGPCTRL	0xb0
+#define INTEL_NBXCFG	0x50
+#define INTEL_ERRSTS	0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL			0x52
+#define I830_GMCH_ENABLED		0x4
+#define I830_GMCH_MEM_MASK		0x1
+#define I830_GMCH_MEM_64M		0x1
+#define I830_GMCH_MEM_128M		0
+#define I830_GMCH_GMS_MASK		0x70
+#define I830_GMCH_GMS_DISABLED		0x00
+#define I830_GMCH_GMS_LOCAL		0x10
+#define I830_GMCH_GMS_STOLEN_512	0x20
+#define I830_GMCH_GMS_STOLEN_1024	0x30
+#define I830_GMCH_GMS_STOLEN_8192	0x40
+#define I830_RDRAM_CHANNEL_TYPE		0x03010
+#define I830_RDRAM_ND(x)		(((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x)		(((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS	0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK		0xF0
+#define I855_GMCH_GMS_STOLEN_0M		0x0
+#define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+#define I85X_CAPID			0x44
+#define I85X_VARIANT_MASK		0x7
+#define I85X_VARIANT_SHIFT		5
+#define I855_GME			0x0
+#define I855_GM				0x4
+#define I852_GME			0x2
+#define I852_GM				0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM		0x51
+#define INTEL_I845_ERRSTS	0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG	0x50
+#define INTEL_I860_ERRSTS	0xc8
+
+/* Intel i810 registers */
+#define I810_GMADR_BAR		0
+#define I810_MMADR_BAR		1
+#define I810_PTE_BASE		0x10000
+#define I810_PTE_MAIN_UNCACHED	0x00000000
+#define I810_PTE_LOCAL		0x00000002
+#define I810_PTE_VALID		0x00000001
+#define I830_PTE_SYSTEM_CACHED  0x00000006
+
+#define I810_SMRAM_MISCC	0x70
+#define I810_GFX_MEM_WIN_SIZE	0x00010000
+#define I810_GFX_MEM_WIN_32M	0x00010000
+#define I810_GMS		0x000000c0
+#define I810_GMS_DISABLE	0x00000000
+#define I810_PGETBL_CTL		0x2020
+#define I810_PGETBL_ENABLED	0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2	0x20c4
+#define I965_PGETBL_SIZE_MASK	0x0000000e
+#define I965_PGETBL_SIZE_512KB	(0 << 1)
+#define I965_PGETBL_SIZE_256KB	(1 << 1)
+#define I965_PGETBL_SIZE_128KB	(2 << 1)
+#define I965_PGETBL_SIZE_1MB	(3 << 1)
+#define I965_PGETBL_SIZE_2MB	(4 << 1)
+#define I965_PGETBL_SIZE_1_5MB	(5 << 1)
+#define G33_GMCH_SIZE_MASK	(3 << 8)
+#define G33_GMCH_SIZE_1M	(1 << 8)
+#define G33_GMCH_SIZE_2M	(2 << 8)
+#define G4x_GMCH_SIZE_MASK	(0xf << 8)
+#define G4x_GMCH_SIZE_1M	(0x1 << 8)
+#define G4x_GMCH_SIZE_2M	(0x3 << 8)
+#define G4x_GMCH_SIZE_VT_EN	(0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M	(G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M	((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M	(G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
+
+#define GFX_FLSH_CNTL		0x2170 /* 915+ */
+
+#define I810_DRAM_CTL		0x3000
+#define I810_DRAM_ROW_0		0x00000001
+#define I810_DRAM_ROW_0_SDRAM	0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT	0x51
+#define INTEL_815_ATTBASE_MASK	~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR		0x51
+#define INTEL_I820_ERRSTS	0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG	0x50
+#define INTEL_I840_ERRSTS	0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG	0x50
+#define INTEL_I850_ERRSTS	0xc8
+
+/* intel 915G registers */
+#define I915_GMADR_BAR	2
+#define I915_MMADR_BAR	0
+#define I915_PTE_BAR	3
+#define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
+
+#define I915_IFPADDR    0x60
+#define I830_HIC        0x70
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR    0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE	0x74
+#define INTEL_I7505_NCAPID	0x60
+#define INTEL_I7505_NISTAT	0x6c
+#define INTEL_I7505_ATTBASE	0x78
+#define INTEL_I7505_ERRSTS	0x42
+#define INTEL_I7505_AGPCTRL	0x70
+#define INTEL_I7505_MCHCFG	0x50
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB	0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG	0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG      0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB     0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG     0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG       0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG       0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG      0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB        0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG        0x2E92
+#define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB	    0x0069
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
+
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 0000000..c6271ce
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1459 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <drm/intel-gtt.h>
+#include <asm/set_memory.h>
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
+#endif
+
+struct intel_gtt_driver {
+	unsigned int gen : 8;
+	unsigned int is_g33 : 1;
+	unsigned int is_pineview : 1;
+	unsigned int is_ironlake : 1;
+	unsigned int has_pgtbl_enable : 1;
+	unsigned int dma_mask_size : 8;
+	/* Chipset specific GTT setup */
+	int (*setup)(void);
+	/* This should undo anything done in ->setup() save the unmapping
+	 * of the mmio register file, that's done in the generic code. */
+	void (*cleanup)(void);
+	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+	/* Flags is a more or less chipset specific opaque value.
+	 * For chipsets that need to support old ums (non-gem) code, this
+	 * needs to be identical to the various supported agp memory types! */
+	bool (*check_flags)(unsigned int flags);
+	void (*chipset_flush)(void);
+};
+
+static struct _intel_private {
+	const struct intel_gtt_driver *driver;
+	struct pci_dev *pcidev;	/* device one */
+	struct pci_dev *bridge_dev;
+	u8 __iomem *registers;
+	phys_addr_t gtt_phys_addr;
+	u32 PGETBL_save;
+	u32 __iomem *gtt;		/* I915G */
+	bool clear_fake_agp; /* on first access via agp, fill with scratch */
+	int num_dcache_entries;
+	void __iomem *i9xx_flush_page;
+	char *i81x_gtt_table;
+	struct resource ifp_resource;
+	int resource_valid;
+	struct page *scratch_page;
+	phys_addr_t scratch_page_dma;
+	int refcount;
+	/* Whether i915 needs to use the dmar apis or not. */
+	unsigned int needs_dmar : 1;
+	phys_addr_t gma_bus_addr;
+	/*  Size of memory reserved for graphics by the BIOS */
+	resource_size_t stolen_size;
+	/* Total number of gtt entries. */
+	unsigned int gtt_total_entries;
+	/* Part of the gtt that is mappable by the cpu, for those chips where
+	 * this is not the full gtt. */
+	unsigned int gtt_mappable_entries;
+} intel_private;
+
+#define INTEL_GTT_GEN	intel_private.driver->gen
+#define IS_G33		intel_private.driver->is_g33
+#define IS_PINEVIEW	intel_private.driver->is_pineview
+#define IS_IRONLAKE	intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_gtt_map_memory(struct page **pages,
+				unsigned int num_entries,
+				struct sg_table *st)
+{
+	struct scatterlist *sg;
+	int i;
+
+	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
+
+	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
+		goto err;
+
+	for_each_sg(st->sgl, sg, num_entries, i)
+		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
+
+	if (!pci_map_sg(intel_private.pcidev,
+			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
+		goto err;
+
+	return 0;
+
+err:
+	sg_free_table(st);
+	return -ENOMEM;
+}
+
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+{
+	struct sg_table st;
+	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+	pci_unmap_sg(intel_private.pcidev, sg_list,
+		     num_sg, PCI_DMA_BIDIRECTIONAL);
+
+	st.sgl = sg_list;
+	st.orig_nents = st.nents = num_sg;
+
+	sg_free_table(&st);
+}
+
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+	struct page *page;
+
+	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+	if (page == NULL)
+		return NULL;
+
+	if (set_pages_uc(page, 4) < 0) {
+		set_pages_wb(page, 4);
+		__free_pages(page, 2);
+		return NULL;
+	}
+	atomic_inc(&agp_bridge->current_memory_agp);
+	return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+	if (page == NULL)
+		return;
+
+	set_pages_wb(page, 4);
+	__free_pages(page, 2);
+	atomic_dec(&agp_bridge->current_memory_agp);
+}
+#endif
+
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
+{
+	phys_addr_t reg_addr;
+	char *gtt_table;
+
+	/* i81x does not preallocate the gtt. It's always 64kb in size. */
+	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+	if (gtt_table == NULL)
+		return -ENOMEM;
+	intel_private.i81x_gtt_table = gtt_table;
+
+	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
+
+	intel_private.registers = ioremap(reg_addr, KB(64));
+	if (!intel_private.registers)
+		return -ENOMEM;
+
+	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+	       intel_private.registers+I810_PGETBL_CTL);
+
+	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
+
+	if ((readl(intel_private.registers+I810_DRAM_CTL)
+		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+		dev_info(&intel_private.pcidev->dev,
+			 "detected 4MB dedicated video ram\n");
+		intel_private.num_dcache_entries = 1024;
+	}
+
+	return 0;
+}
+
+static void i810_cleanup(void)
+{
+	writel(0, intel_private.registers+I810_PGETBL_CTL);
+	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+				      int type)
+{
+	int i;
+
+	if ((pg_start + mem->page_count)
+			> intel_private.num_dcache_entries)
+		return -EINVAL;
+
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+		dma_addr_t addr = i << PAGE_SHIFT;
+		intel_private.driver->write_entry(addr,
+						  i, type);
+	}
+	wmb();
+
+	return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+	struct agp_memory *new;
+	struct page *page;
+
+	switch (pg_count) {
+	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+		break;
+	case 4:
+		/* kludge to get 4 physical pages for ARGB cursor */
+		page = i8xx_alloc_pages();
+		break;
+	default:
+		return NULL;
+	}
+
+	if (page == NULL)
+		return NULL;
+
+	new = agp_create_memory(pg_count);
+	if (new == NULL)
+		return NULL;
+
+	new->pages[0] = page;
+	if (pg_count == 4) {
+		/* kludge to get 4 physical pages for ARGB cursor */
+		new->pages[1] = new->pages[0] + 1;
+		new->pages[2] = new->pages[1] + 1;
+		new->pages[3] = new->pages[2] + 1;
+	}
+	new->page_count = pg_count;
+	new->num_scratch_pages = pg_count;
+	new->type = AGP_PHYS_MEMORY;
+	new->physical = page_to_phys(new->pages[0]);
+	return new;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+	agp_free_key(curr->key);
+	if (curr->type == AGP_PHYS_MEMORY) {
+		if (curr->page_count == 4)
+			i8xx_destroy_pages(curr->pages[0]);
+		else {
+			agp_bridge->driver->agp_destroy_page(curr->pages[0],
+							     AGP_PAGE_DESTROY_UNMAP);
+			agp_bridge->driver->agp_destroy_page(curr->pages[0],
+							     AGP_PAGE_DESTROY_FREE);
+		}
+		agp_free_page_array(curr);
+	}
+	kfree(curr);
+}
+#endif
+
+static int intel_gtt_setup_scratch_page(void)
+{
+	struct page *page;
+	dma_addr_t dma_addr;
+
+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+	if (page == NULL)
+		return -ENOMEM;
+	set_pages_uc(page, 1);
+
+	if (intel_private.needs_dmar) {
+		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+			return -EINVAL;
+
+		intel_private.scratch_page_dma = dma_addr;
+	} else
+		intel_private.scratch_page_dma = page_to_phys(page);
+
+	intel_private.scratch_page = page;
+
+	return 0;
+}
+
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+			     unsigned int flags)
+{
+	u32 pte_flags = I810_PTE_VALID;
+
+	switch (flags) {
+	case AGP_DCACHE_MEMORY:
+		pte_flags |= I810_PTE_LOCAL;
+		break;
+	case AGP_USER_CACHED_MEMORY:
+		pte_flags |= I830_PTE_SYSTEM_CACHED;
+		break;
+	}
+
+	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static resource_size_t intel_gtt_stolen_size(void)
+{
+	u16 gmch_ctrl;
+	u8 rdct;
+	int local = 0;
+	static const int ddt[4] = { 0, 16, 32, 64 };
+	resource_size_t stolen_size = 0;
+
+	if (INTEL_GTT_GEN == 1)
+		return 0; /* no stolen mem on i81x */
+
+	pci_read_config_word(intel_private.bridge_dev,
+			     I830_GMCH_CTRL, &gmch_ctrl);
+
+	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+		case I830_GMCH_GMS_STOLEN_512:
+			stolen_size = KB(512);
+			break;
+		case I830_GMCH_GMS_STOLEN_1024:
+			stolen_size = MB(1);
+			break;
+		case I830_GMCH_GMS_STOLEN_8192:
+			stolen_size = MB(8);
+			break;
+		case I830_GMCH_GMS_LOCAL:
+			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+					MB(ddt[I830_RDRAM_DDT(rdct)]);
+			local = 1;
+			break;
+		default:
+			stolen_size = 0;
+			break;
+		}
+	} else {
+		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+		case I855_GMCH_GMS_STOLEN_1M:
+			stolen_size = MB(1);
+			break;
+		case I855_GMCH_GMS_STOLEN_4M:
+			stolen_size = MB(4);
+			break;
+		case I855_GMCH_GMS_STOLEN_8M:
+			stolen_size = MB(8);
+			break;
+		case I855_GMCH_GMS_STOLEN_16M:
+			stolen_size = MB(16);
+			break;
+		case I855_GMCH_GMS_STOLEN_32M:
+			stolen_size = MB(32);
+			break;
+		case I915_GMCH_GMS_STOLEN_48M:
+			stolen_size = MB(48);
+			break;
+		case I915_GMCH_GMS_STOLEN_64M:
+			stolen_size = MB(64);
+			break;
+		case G33_GMCH_GMS_STOLEN_128M:
+			stolen_size = MB(128);
+			break;
+		case G33_GMCH_GMS_STOLEN_256M:
+			stolen_size = MB(256);
+			break;
+		case INTEL_GMCH_GMS_STOLEN_96M:
+			stolen_size = MB(96);
+			break;
+		case INTEL_GMCH_GMS_STOLEN_160M:
+			stolen_size = MB(160);
+			break;
+		case INTEL_GMCH_GMS_STOLEN_224M:
+			stolen_size = MB(224);
+			break;
+		case INTEL_GMCH_GMS_STOLEN_352M:
+			stolen_size = MB(352);
+			break;
+		default:
+			stolen_size = 0;
+			break;
+		}
+	}
+
+	if (stolen_size > 0) {
+		dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
+		       (u64)stolen_size / KB(1), local ? "local" : "stolen");
+	} else {
+		dev_info(&intel_private.bridge_dev->dev,
+		       "no pre-allocated video memory detected\n");
+		stolen_size = 0;
+	}
+
+	return stolen_size;
+}
+
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+	u32 pgetbl_ctl, pgetbl_ctl2;
+
+	/* ensure that ppgtt is disabled */
+	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+	/* write the new ggtt size */
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+	pgetbl_ctl |= size_flag;
+	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
+}
+
+static unsigned int i965_gtt_total_entries(void)
+{
+	int size;
+	u32 pgetbl_ctl;
+	u16 gmch_ctl;
+
+	pci_read_config_word(intel_private.bridge_dev,
+			     I830_GMCH_CTRL, &gmch_ctl);
+
+	if (INTEL_GTT_GEN == 5) {
+		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+		case G4x_GMCH_SIZE_1M:
+		case G4x_GMCH_SIZE_VT_1M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
+			break;
+		case G4x_GMCH_SIZE_VT_1_5M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
+			break;
+		case G4x_GMCH_SIZE_2M:
+		case G4x_GMCH_SIZE_VT_2M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
+			break;
+		}
+	}
+
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+	case I965_PGETBL_SIZE_128KB:
+		size = KB(128);
+		break;
+	case I965_PGETBL_SIZE_256KB:
+		size = KB(256);
+		break;
+	case I965_PGETBL_SIZE_512KB:
+		size = KB(512);
+		break;
+	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+	case I965_PGETBL_SIZE_1MB:
+		size = KB(1024);
+		break;
+	case I965_PGETBL_SIZE_2MB:
+		size = KB(2048);
+		break;
+	case I965_PGETBL_SIZE_1_5MB:
+		size = KB(1024 + 512);
+		break;
+	default:
+		dev_info(&intel_private.pcidev->dev,
+			 "unknown page table size, assuming 512KB\n");
+		size = KB(512);
+	}
+
+	return size/4;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+		return i965_gtt_total_entries();
+	else {
+		/* On previous hardware, the GTT size was just what was
+		 * required to map the aperture.
+		 */
+		return intel_private.gtt_mappable_entries;
+	}
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+	unsigned int aperture_size;
+
+	if (INTEL_GTT_GEN == 1) {
+		u32 smram_miscc;
+
+		pci_read_config_dword(intel_private.bridge_dev,
+				      I810_SMRAM_MISCC, &smram_miscc);
+
+		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+				== I810_GFX_MEM_WIN_32M)
+			aperture_size = MB(32);
+		else
+			aperture_size = MB(64);
+	} else if (INTEL_GTT_GEN == 2) {
+		u16 gmch_ctrl;
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+
+		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+			aperture_size = MB(64);
+		else
+			aperture_size = MB(128);
+	} else {
+		/* 9xx supports large sizes, just look at the length */
+		aperture_size = pci_resource_len(intel_private.pcidev, 2);
+	}
+
+	return aperture_size >> PAGE_SHIFT;
+}
+
+static void intel_gtt_teardown_scratch_page(void)
+{
+	set_pages_wb(intel_private.scratch_page, 1);
+	if (intel_private.needs_dmar)
+		pci_unmap_page(intel_private.pcidev,
+			       intel_private.scratch_page_dma,
+			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	__free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+	intel_private.driver->cleanup();
+
+	iounmap(intel_private.gtt);
+	iounmap(intel_private.registers);
+
+	intel_gtt_teardown_scratch_page();
+}
+
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	const unsigned short gpu_devid = intel_private.pcidev->device;
+
+	/* Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+	     intel_iommu_gfx_mapped)
+		return 1;
+#endif
+	return 0;
+}
+
+static bool intel_gtt_can_wc(void)
+{
+	if (INTEL_GTT_GEN <= 2)
+		return false;
+
+	if (INTEL_GTT_GEN >= 6)
+		return false;
+
+	/* Reports of major corruption with ILK vt'd enabled */
+	if (needs_ilk_vtd_wa())
+		return false;
+
+	return true;
+}
+
+static int intel_gtt_init(void)
+{
+	u32 gtt_map_size;
+	int ret, bar;
+
+	ret = intel_private.driver->setup();
+	if (ret != 0)
+		return ret;
+
+	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+	intel_private.gtt_total_entries = intel_gtt_total_entries();
+
+	/* save the PGETBL reg for resume */
+	intel_private.PGETBL_save =
+		readl(intel_private.registers+I810_PGETBL_CTL)
+			& ~I810_PGETBL_ENABLED;
+	/* we only ever restore the register when enabling the PGTBL... */
+	if (HAS_PGTBL_EN)
+		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
+	dev_info(&intel_private.bridge_dev->dev,
+			"detected gtt size: %dK total, %dK mappable\n",
+			intel_private.gtt_total_entries * 4,
+			intel_private.gtt_mappable_entries * 4);
+
+	gtt_map_size = intel_private.gtt_total_entries * 4;
+
+	intel_private.gtt = NULL;
+	if (intel_gtt_can_wc())
+		intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
+					       gtt_map_size);
+	if (intel_private.gtt == NULL)
+		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
+					    gtt_map_size);
+	if (intel_private.gtt == NULL) {
+		intel_private.driver->cleanup();
+		iounmap(intel_private.registers);
+		return -ENOMEM;
+	}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+	global_cache_flush();   /* FIXME: ? */
+#endif
+
+	intel_private.stolen_size = intel_gtt_stolen_size();
+
+	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+
+	ret = intel_gtt_setup_scratch_page();
+	if (ret != 0) {
+		intel_gtt_cleanup();
+		return ret;
+	}
+
+	if (INTEL_GTT_GEN <= 2)
+		bar = I810_GMADR_BAR;
+	else
+		bar = I915_GMADR_BAR;
+
+	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+	{32, 8192, 3},
+	{64, 16384, 4},
+	{128, 32768, 5},
+	{256, 65536, 6},
+	{512, 131072, 7},
+};
+
+static int intel_fake_agp_fetch_size(void)
+{
+	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+	unsigned int aper_size;
+	int i;
+
+	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
+
+	for (i = 0; i < num_sizes; i++) {
+		if (aper_size == intel_fake_agp_sizes[i].size) {
+			agp_bridge->current_size =
+				(void *) (intel_fake_agp_sizes + i);
+			return aper_size;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static void i830_cleanup(void)
+{
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out.  It appears to work.
+ */
+static void i830_chipset_flush(void)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+	/* Forcibly evict everything from the CPU write buffers.
+	 * clflush appears to be insufficient.
+	 */
+	wbinvd_on_all_cpus();
+
+	/* Now we've only seen documents for this magic bit on 855GM,
+	 * we hope it exists for the other gen2 chipsets...
+	 *
+	 * Also works as advertised on my 845G.
+	 */
+	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+	       intel_private.registers+I830_HIC);
+
+	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+		if (time_after(jiffies, timeout))
+			break;
+
+		udelay(50);
+	}
+}
+
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+			     unsigned int flags)
+{
+	u32 pte_flags = I810_PTE_VALID;
+
+	if (flags ==  AGP_USER_CACHED_MEMORY)
+		pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+bool intel_enable_gtt(void)
+{
+	u8 __iomem *reg;
+
+	if (INTEL_GTT_GEN == 2) {
+		u16 gmch_ctrl;
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		gmch_ctrl |= I830_GMCH_ENABLED;
+		pci_write_config_word(intel_private.bridge_dev,
+				      I830_GMCH_CTRL, gmch_ctrl);
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+			dev_err(&intel_private.pcidev->dev,
+				"failed to enable the GTT: GMCH_CTRL=%x\n",
+				gmch_ctrl);
+			return false;
+		}
+	}
+
+	/* On the resume path we may be adjusting the PGTBL value, so
+	 * be paranoid and flush all chipset write buffers...
+	 */
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	reg = intel_private.registers+I810_PGETBL_CTL;
+	writel(intel_private.PGETBL_save, reg);
+	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+		dev_err(&intel_private.pcidev->dev,
+			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
+			readl(reg), intel_private.PGETBL_save);
+		return false;
+	}
+
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	return true;
+}
+EXPORT_SYMBOL(intel_enable_gtt);
+
+static int i830_setup(void)
+{
+	phys_addr_t reg_addr;
+
+	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
+
+	intel_private.registers = ioremap(reg_addr, KB(64));
+	if (!intel_private.registers)
+		return -ENOMEM;
+
+	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	agp_bridge->gatt_table_real = NULL;
+	agp_bridge->gatt_table = NULL;
+	agp_bridge->gatt_bus_addr = 0;
+
+	return 0;
+}
+
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	return 0;
+}
+
+static int intel_fake_agp_configure(void)
+{
+	if (!intel_enable_gtt())
+	    return -EIO;
+
+	intel_private.clear_fake_agp = true;
+	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+
+	return 0;
+}
+#endif
+
+static bool i830_check_flags(unsigned int flags)
+{
+	switch (flags) {
+	case 0:
+	case AGP_PHYS_MEMORY:
+	case AGP_USER_CACHED_MEMORY:
+	case AGP_USER_MEMORY:
+		return true;
+	}
+
+	return false;
+}
+
+void intel_gtt_insert_page(dma_addr_t addr,
+			   unsigned int pg,
+			   unsigned int flags)
+{
+	intel_private.driver->write_entry(addr, pg, flags);
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_insert_page);
+
+void intel_gtt_insert_sg_entries(struct sg_table *st,
+				 unsigned int pg_start,
+				 unsigned int flags)
+{
+	struct scatterlist *sg;
+	unsigned int len, m;
+	int i, j;
+
+	j = pg_start;
+
+	/* sg may merge pages, but we have to separate
+	 * per-page addr for GTT */
+	for_each_sg(st->sgl, sg, st->nents, i) {
+		len = sg_dma_len(sg) >> PAGE_SHIFT;
+		for (m = 0; m < len; m++) {
+			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+			intel_private.driver->write_entry(addr, j, flags);
+			j++;
+		}
+	}
+	wmb();
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static void intel_gtt_insert_pages(unsigned int first_entry,
+				   unsigned int num_entries,
+				   struct page **pages,
+				   unsigned int flags)
+{
+	int i, j;
+
+	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+		dma_addr_t addr = page_to_phys(pages[i]);
+		intel_private.driver->write_entry(addr,
+						  j, flags);
+	}
+	wmb();
+}
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+					 off_t pg_start, int type)
+{
+	int ret = -EINVAL;
+
+	if (intel_private.clear_fake_agp) {
+		int start = intel_private.stolen_size / PAGE_SIZE;
+		int end = intel_private.gtt_mappable_entries;
+		intel_gtt_clear_range(start, end - start);
+		intel_private.clear_fake_agp = false;
+	}
+
+	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+		return i810_insert_dcache_entries(mem, pg_start, type);
+
+	if (mem->page_count == 0)
+		goto out;
+
+	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
+		goto out_err;
+
+	if (type != mem->type)
+		goto out_err;
+
+	if (!intel_private.driver->check_flags(type))
+		goto out_err;
+
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	if (intel_private.needs_dmar) {
+		struct sg_table st;
+
+		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
+		if (ret != 0)
+			return ret;
+
+		intel_gtt_insert_sg_entries(&st, pg_start, type);
+		mem->sg_list = st.sgl;
+		mem->num_sg = st.nents;
+	} else
+		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+				       type);
+
+out:
+	ret = 0;
+out_err:
+	mem->is_flushed = true;
+	return ret;
+}
+#endif
+
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+{
+	unsigned int i;
+
+	for (i = first_entry; i < (first_entry + num_entries); i++) {
+		intel_private.driver->write_entry(intel_private.scratch_page_dma,
+						  i, 0);
+	}
+	wmb();
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+					 off_t pg_start, int type)
+{
+	if (mem->page_count == 0)
+		return 0;
+
+	intel_gtt_clear_range(pg_start, mem->page_count);
+
+	if (intel_private.needs_dmar) {
+		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+		mem->sg_list = NULL;
+		mem->num_sg = 0;
+	}
+
+	return 0;
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+						       int type)
+{
+	struct agp_memory *new;
+
+	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+		if (pg_count != intel_private.num_dcache_entries)
+			return NULL;
+
+		new = agp_create_memory(1);
+		if (new == NULL)
+			return NULL;
+
+		new->type = AGP_DCACHE_MEMORY;
+		new->page_count = pg_count;
+		new->num_scratch_pages = 0;
+		agp_free_page_array(new);
+		return new;
+	}
+	if (type == AGP_PHYS_MEMORY)
+		return alloc_agpphysmem_i8xx(pg_count, type);
+	/* always return NULL for other allocation types for now */
+	return NULL;
+}
+#endif
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+	int ret;
+	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+				     pcibios_align_resource, intel_private.bridge_dev);
+
+	return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+	int ret;
+	u32 temp;
+
+	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
+	if (!(temp & 0x1)) {
+		intel_alloc_chipset_flush_resource();
+		intel_private.resource_valid = 1;
+		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+	} else {
+		temp &= ~1;
+
+		intel_private.resource_valid = 1;
+		intel_private.ifp_resource.start = temp;
+		intel_private.ifp_resource.end = temp + PAGE_SIZE;
+		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+		/* some BIOSes reserve this area in a pnp some don't */
+		if (ret)
+			intel_private.resource_valid = 0;
+	}
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+	u32 temp_hi, temp_lo;
+	int ret;
+
+	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
+
+	if (!(temp_lo & 0x1)) {
+
+		intel_alloc_chipset_flush_resource();
+
+		intel_private.resource_valid = 1;
+		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
+			upper_32_bits(intel_private.ifp_resource.start));
+		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+	} else {
+		u64 l64;
+
+		temp_lo &= ~0x1;
+		l64 = ((u64)temp_hi << 32) | temp_lo;
+
+		intel_private.resource_valid = 1;
+		intel_private.ifp_resource.start = l64;
+		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+		/* some BIOSes reserve this area in a pnp some don't */
+		if (ret)
+			intel_private.resource_valid = 0;
+	}
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+	/* return if already configured */
+	if (intel_private.ifp_resource.start)
+		return;
+
+	if (INTEL_GTT_GEN == 6)
+		return;
+
+	/* setup a resource for this object */
+	intel_private.ifp_resource.name = "Intel Flush Page";
+	intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+	/* Setup chipset flush for 915 */
+	if (IS_G33 || INTEL_GTT_GEN >= 4) {
+		intel_i965_g33_setup_chipset_flush();
+	} else {
+		intel_i915_setup_chipset_flush();
+	}
+
+	if (intel_private.ifp_resource.start)
+		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+	if (!intel_private.i9xx_flush_page)
+		dev_err(&intel_private.pcidev->dev,
+			"can't ioremap flush page - no chipset flushing\n");
+}
+
+static void i9xx_cleanup(void)
+{
+	if (intel_private.i9xx_flush_page)
+		iounmap(intel_private.i9xx_flush_page);
+	if (intel_private.resource_valid)
+		release_resource(&intel_private.ifp_resource);
+	intel_private.ifp_resource.start = 0;
+	intel_private.resource_valid = 0;
+}
+
+static void i9xx_chipset_flush(void)
+{
+	if (intel_private.i9xx_flush_page)
+		writel(1, intel_private.i9xx_flush_page);
+}
+
+static void i965_write_entry(dma_addr_t addr,
+			     unsigned int entry,
+			     unsigned int flags)
+{
+	u32 pte_flags;
+
+	pte_flags = I810_PTE_VALID;
+	if (flags == AGP_USER_CACHED_MEMORY)
+		pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+	/* Shift high bits down */
+	addr |= (addr >> 28) & 0xf0;
+	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static int i9xx_setup(void)
+{
+	phys_addr_t reg_addr;
+	int size = KB(512);
+
+	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
+
+	intel_private.registers = ioremap(reg_addr, size);
+	if (!intel_private.registers)
+		return -ENOMEM;
+
+	switch (INTEL_GTT_GEN) {
+	case 3:
+		intel_private.gtt_phys_addr =
+			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
+		break;
+	case 5:
+		intel_private.gtt_phys_addr = reg_addr + MB(2);
+		break;
+	default:
+		intel_private.gtt_phys_addr = reg_addr + KB(512);
+		break;
+	}
+
+	intel_i9xx_setup_flush();
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static const struct agp_bridge_driver intel_fake_agp_driver = {
+	.owner			= THIS_MODULE,
+	.size_type		= FIXED_APER_SIZE,
+	.aperture_sizes		= intel_fake_agp_sizes,
+	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
+	.configure		= intel_fake_agp_configure,
+	.fetch_size		= intel_fake_agp_fetch_size,
+	.cleanup		= intel_gtt_cleanup,
+	.agp_enable		= intel_fake_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= intel_fake_agp_create_gatt_table,
+	.free_gatt_table	= intel_fake_agp_free_gatt_table,
+	.insert_memory		= intel_fake_agp_insert_entries,
+	.remove_memory		= intel_fake_agp_remove_entries,
+	.alloc_by_type		= intel_fake_agp_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+};
+#endif
+
+static const struct intel_gtt_driver i81x_gtt_driver = {
+	.gen = 1,
+	.has_pgtbl_enable = 1,
+	.dma_mask_size = 32,
+	.setup = i810_setup,
+	.cleanup = i810_cleanup,
+	.check_flags = i830_check_flags,
+	.write_entry = i810_write_entry,
+};
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+	.gen = 2,
+	.has_pgtbl_enable = 1,
+	.setup = i830_setup,
+	.cleanup = i830_cleanup,
+	.write_entry = i830_write_entry,
+	.dma_mask_size = 32,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i830_chipset_flush,
+};
+static const struct intel_gtt_driver i915_gtt_driver = {
+	.gen = 3,
+	.has_pgtbl_enable = 1,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
+	.write_entry = i830_write_entry,
+	.dma_mask_size = 32,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+	.gen = 3,
+	.is_g33 = 1,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	.write_entry = i965_write_entry,
+	.dma_mask_size = 36,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+	.gen = 3,
+	.is_pineview = 1, .is_g33 = 1,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	.write_entry = i965_write_entry,
+	.dma_mask_size = 36,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+	.gen = 4,
+	.has_pgtbl_enable = 1,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	.write_entry = i965_write_entry,
+	.dma_mask_size = 36,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+	.gen = 5,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	.write_entry = i965_write_entry,
+	.dma_mask_size = 36,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+	.gen = 5,
+	.is_ironlake = 1,
+	.setup = i9xx_setup,
+	.cleanup = i9xx_cleanup,
+	.write_entry = i965_write_entry,
+	.dma_mask_size = 36,
+	.check_flags = i830_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+	unsigned int gmch_chip_id;
+	char *name;
+	const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
+		&i81x_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
+		&i81x_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
+		&i81x_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
+		&i81x_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+		&i915_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+		&i965_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+		&g33_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+		&g33_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+		&g33_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+		&pineview_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+		&pineview_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+		&g4x_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+	    "HD Graphics", &ironlake_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+	    "HD Graphics", &ironlake_gtt_driver },
+	{ 0, NULL, NULL }
+};
+
+static int find_gmch(u16 device)
+{
+	struct pci_dev *gmch_device;
+
+	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+					     device, gmch_device);
+	}
+
+	if (!gmch_device)
+		return 0;
+
+	intel_private.pcidev = gmch_device;
+	return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+		     struct agp_bridge_data *bridge)
+{
+	int i, mask;
+
+	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+		if (gpu_pdev) {
+			if (gpu_pdev->device ==
+			    intel_gtt_chipsets[i].gmch_chip_id) {
+				intel_private.pcidev = pci_dev_get(gpu_pdev);
+				intel_private.driver =
+					intel_gtt_chipsets[i].gtt_driver;
+
+				break;
+			}
+		} else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+			intel_private.driver =
+				intel_gtt_chipsets[i].gtt_driver;
+			break;
+		}
+	}
+
+	if (!intel_private.driver)
+		return 0;
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+	if (bridge) {
+		if (INTEL_GTT_GEN > 1)
+			return 0;
+
+		bridge->driver = &intel_fake_agp_driver;
+		bridge->dev_private_data = &intel_private;
+		bridge->dev = bridge_pdev;
+	}
+#endif
+
+
+	/*
+	 * Can be called from the fake agp driver but also directly from
+	 * drm/i915.ko. Hence we need to check whether everything is set up
+	 * already.
+	 */
+	if (intel_private.refcount++)
+		return 1;
+
+	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
+
+	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+	mask = intel_private.driver->dma_mask_size;
+	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+		dev_err(&intel_private.pcidev->dev,
+			"set gfx device dma mask %d-bit failed!\n", mask);
+	else
+		pci_set_consistent_dma_mask(intel_private.pcidev,
+					    DMA_BIT_MASK(mask));
+
+	if (intel_gtt_init() != 0) {
+		intel_gmch_remove();
+
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+void intel_gtt_get(u64 *gtt_total,
+		   phys_addr_t *mappable_base,
+		   resource_size_t *mappable_end)
+{
+	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+	*mappable_base = intel_private.gma_bus_addr;
+	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
+}
+EXPORT_SYMBOL(intel_gtt_get);
+
+void intel_gtt_chipset_flush(void)
+{
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
+void intel_gmch_remove(void)
+{
+	if (--intel_private.refcount)
+		return;
+
+	if (intel_private.scratch_page)
+		intel_gtt_teardown_scratch_page();
+	if (intel_private.pcidev)
+		pci_dev_put(intel_private.pcidev);
+	if (intel_private.bridge_dev)
+		pci_dev_put(intel_private.bridge_dev);
+	intel_private.driver = NULL;
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
new file mode 100644
index 0000000..31c374b
--- /dev/null
+++ b/drivers/char/agp/isoch.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Setup routines for AGP 3.5 compliant bridges.
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "agp.h"
+
+/* Generic AGP 3.5 enabling routines */
+
+struct agp_3_5_dev {
+	struct list_head list;
+	u8 capndx;
+	u32 maxbw;
+	struct pci_dev *dev;
+};
+
+static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
+{
+	struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
+	struct list_head *pos;
+
+	list_for_each(pos, head) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+		if (cur->maxbw > n->maxbw)
+			break;
+	}
+	list_add_tail(new, pos);
+}
+
+static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
+{
+	struct agp_3_5_dev *cur;
+	struct pci_dev *dev;
+	struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
+	u32 nistat;
+
+	INIT_LIST_HEAD(head);
+
+	for (pos=start; pos!=head; ) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+		dev = cur->dev;
+
+		pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
+		cur->maxbw = (nistat >> 16) & 0xff;
+
+		tmp = pos;
+		pos = pos->next;
+		agp_3_5_dev_list_insert(head, tmp);
+	}
+}
+
+/*
+ * Initialize all isochronous transfer parameters for an AGP 3.0
+ * node (i.e. a host bridge in combination with the adapters
+ * lying behind it...)
+ */
+
+static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
+		struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+	/*
+	 * Convenience structure to make the calculations clearer
+	 * here.  The field names come straight from the AGP 3.0 spec.
+	 */
+	struct isoch_data {
+		u32 maxbw;
+		u32 n;
+		u32 y;
+		u32 l;
+		u32 rq;
+		struct agp_3_5_dev *dev;
+	};
+
+	struct pci_dev *td = bridge->dev, *dev;
+	struct list_head *head = &dev_list->list, *pos;
+	struct agp_3_5_dev *cur;
+	struct isoch_data *master, target;
+	unsigned int cdev = 0;
+	u32 mnistat, tnistat, tstatus, mcmd;
+	u16 tnicmd, mnicmd;
+	u8 mcapndx;
+	u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
+	u32 step, rem, rem_isoch, rem_async;
+	int ret = 0;
+
+	/*
+	 * We'll work with an array of isoch_data's (one for each
+	 * device in dev_list) throughout this function.
+	 */
+	master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL);
+	if (master == NULL) {
+		ret = -ENOMEM;
+		goto get_out;
+	}
+
+	/*
+	 * Sort the device list by maxbw.  We need to do this because the
+	 * spec suggests that the devices with the smallest requirements
+	 * have their resources allocated first, with all remaining resources
+	 * falling to the device with the largest requirement.
+	 *
+	 * We don't exactly do this, we divide target resources by ndevs
+	 * and split them amongst the AGP 3.0 devices.  The remainder of such
+	 * division operations are dropped on the last device, sort of like
+	 * the spec mentions it should be done.
+	 *
+	 * We can't do this sort when we initially construct the dev_list
+	 * because we don't know until this function whether isochronous
+	 * transfers are enabled and consequently whether maxbw will mean
+	 * anything.
+	 */
+	agp_3_5_dev_list_sort(dev_list, ndevs);
+
+	pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+	pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+
+	/* Extract power-on defaults from the target */
+	target.maxbw = (tnistat >> 16) & 0xff;
+	target.n     = (tnistat >> 8)  & 0xff;
+	target.y     = (tnistat >> 6)  & 0x3;
+	target.l     = (tnistat >> 3)  & 0x7;
+	target.rq    = (tstatus >> 24) & 0xff;
+
+	y_max = target.y;
+
+	/*
+	 * Extract power-on defaults for each device in dev_list.  Along
+	 * the way, calculate the total isochronous bandwidth required
+	 * by these devices and the largest requested payload size.
+	 */
+	list_for_each(pos, head) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+		dev = cur->dev;
+
+		mcapndx = cur->capndx;
+
+		pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
+
+		master[cdev].maxbw = (mnistat >> 16) & 0xff;
+		master[cdev].n     = (mnistat >> 8)  & 0xff;
+		master[cdev].y     = (mnistat >> 6)  & 0x3;
+		master[cdev].dev   = cur;
+
+		tot_bw += master[cdev].maxbw;
+		y_max = max(y_max, master[cdev].y);
+
+		cdev++;
+	}
+
+	/* Check if this configuration has any chance of working */
+	if (tot_bw > target.maxbw) {
+		dev_err(&td->dev, "isochronous bandwidth required "
+			"by AGP 3.0 devices exceeds that which is supported by "
+			"the AGP 3.0 bridge!\n");
+		ret = -ENODEV;
+		goto free_and_exit;
+	}
+
+	target.y = y_max;
+
+	/*
+	 * Write the calculated payload size into the target's NICMD
+	 * register.  Doing this directly effects the ISOCH_N value
+	 * in the target's NISTAT register, so we need to do this now
+	 * to get an accurate value for ISOCH_N later.
+	 */
+	pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
+	tnicmd &= ~(0x3 << 6);
+	tnicmd |= target.y << 6;
+	pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
+
+	/* Reread the target's ISOCH_N */
+	pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+	target.n = (tnistat >> 8) & 0xff;
+
+	/* Calculate the minimum ISOCH_N needed by each master */
+	for (cdev=0; cdev<ndevs; cdev++) {
+		master[cdev].y = target.y;
+		master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
+
+		tot_n += master[cdev].n;
+	}
+
+	/* Exit if the minimal ISOCH_N allocation among the masters is more
+	 * than the target can handle. */
+	if (tot_n > target.n) {
+		dev_err(&td->dev, "number of isochronous "
+			"transactions per period required by AGP 3.0 devices "
+			"exceeds that which is supported by the AGP 3.0 "
+			"bridge!\n");
+		ret = -ENODEV;
+		goto free_and_exit;
+	}
+
+	/* Calculate left over ISOCH_N capability in the target.  We'll give
+	 * this to the hungriest device (as per the spec) */
+	rem  = target.n - tot_n;
+
+	/*
+	 * Calculate the minimum isochronous RQ depth needed by each master.
+	 * Along the way, distribute the extra ISOCH_N capability calculated
+	 * above.
+	 */
+	for (cdev=0; cdev<ndevs; cdev++) {
+		/*
+		 * This is a little subtle.  If ISOCH_Y > 64B, then ISOCH_Y
+		 * byte isochronous writes will be broken into 64B pieces.
+		 * This means we need to budget more RQ depth to account for
+		 * these kind of writes (each isochronous write is actually
+		 * many writes on the AGP bus).
+		 */
+		master[cdev].rq = master[cdev].n;
+		if (master[cdev].y > 0x1)
+			master[cdev].rq *= (1 << (master[cdev].y - 1));
+
+		tot_rq += master[cdev].rq;
+	}
+	master[ndevs-1].n += rem;
+
+	/* Figure the number of isochronous and asynchronous RQ slots the
+	 * target is providing. */
+	rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
+	rq_async = target.rq - rq_isoch;
+
+	/* Exit if the minimal RQ needs of the masters exceeds what the target
+	 * can provide. */
+	if (tot_rq > rq_isoch) {
+		dev_err(&td->dev, "number of request queue slots "
+			"required by the isochronous bandwidth requested by "
+			"AGP 3.0 devices exceeds the number provided by the "
+			"AGP 3.0 bridge!\n");
+		ret = -ENODEV;
+		goto free_and_exit;
+	}
+
+	/* Calculate asynchronous RQ capability in the target (per master) as
+	 * well as the total number of leftover isochronous RQ slots. */
+	step      = rq_async / ndevs;
+	rem_async = step + (rq_async % ndevs);
+	rem_isoch = rq_isoch - tot_rq;
+
+	/* Distribute the extra RQ slots calculated above and write our
+	 * isochronous settings out to the actual devices. */
+	for (cdev=0; cdev<ndevs; cdev++) {
+		cur = master[cdev].dev;
+		dev = cur->dev;
+
+		mcapndx = cur->capndx;
+
+		master[cdev].rq += (cdev == ndevs - 1)
+		              ? (rem_async + rem_isoch) : step;
+
+		pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
+		pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
+
+		mnicmd &= ~(0xff << 8);
+		mnicmd &= ~(0x3  << 6);
+		mcmd   &= ~(0xff << 24);
+
+		mnicmd |= master[cdev].n  << 8;
+		mnicmd |= master[cdev].y  << 6;
+		mcmd   |= master[cdev].rq << 24;
+
+		pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
+		pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
+	}
+
+free_and_exit:
+	kfree(master);
+
+get_out:
+	return ret;
+}
+
+/*
+ * This function basically allocates request queue slots among the
+ * AGP 3.0 systems in nonisochronous nodes.  The algorithm is
+ * pretty stupid, divide the total number of RQ slots provided by the
+ * target by ndevs.  Distribute this many slots to each AGP 3.0 device,
+ * giving any left over slots to the last device in dev_list.
+ */
+static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
+		struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+	struct agp_3_5_dev *cur;
+	struct list_head *head = &dev_list->list, *pos;
+	u32 tstatus, mcmd;
+	u32 trq, mrq, rem;
+	unsigned int cdev = 0;
+
+	pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
+
+	trq = (tstatus >> 24) & 0xff;
+	mrq = trq / ndevs;
+
+	rem = mrq + (trq % ndevs);
+
+	for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+
+		pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
+		mcmd &= ~(0xff << 24);
+		mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
+		pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
+	}
+}
+
+/*
+ * Fully configure and enable an AGP 3.0 host bridge and all the devices
+ * lying behind it.
+ */
+int agp_3_5_enable(struct agp_bridge_data *bridge)
+{
+	struct pci_dev *td = bridge->dev, *dev = NULL;
+	u8 mcapndx;
+	u32 isoch, arqsz;
+	u32 tstatus, mstatus, ncapid;
+	u32 mmajor;
+	u16 mpstat;
+	struct agp_3_5_dev *dev_list, *cur;
+	struct list_head *head, *pos;
+	unsigned int ndevs = 0;
+	int ret = 0;
+
+	/* Extract some power-on defaults from the target */
+	pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+	isoch     = (tstatus >> 17) & 0x1;
+	if (isoch == 0)	/* isoch xfers not available, bail out. */
+		return -ENODEV;
+
+	arqsz     = (tstatus >> 13) & 0x7;
+
+	/*
+	 * Allocate a head for our AGP 3.5 device list
+	 * (multiple AGP v3 devices are allowed behind a single bridge).
+	 */
+	if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
+		ret = -ENOMEM;
+		goto get_out;
+	}
+	head = &dev_list->list;
+	INIT_LIST_HEAD(head);
+
+	/* Find all AGP devices, and add them to dev_list. */
+	for_each_pci_dev(dev) {
+		mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
+		if (mcapndx == 0)
+			continue;
+
+		switch ((dev->class >>8) & 0xff00) {
+			case 0x0600:    /* Bridge */
+				/* Skip bridges. We should call this function for each one. */
+				continue;
+
+			case 0x0001:    /* Unclassified device */
+				/* Don't know what this is, but log it for investigation. */
+				if (mcapndx != 0) {
+					dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
+						 pci_name(dev),
+						 dev->vendor, dev->device);
+				}
+				continue;
+
+			case 0x0300:    /* Display controller */
+			case 0x0400:    /* Multimedia controller */
+				if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
+					ret = -ENOMEM;
+					goto free_and_exit;
+				}
+				cur->dev = dev;
+
+				pos = &cur->list;
+				list_add(pos, head);
+				ndevs++;
+				continue;
+
+			default:
+				continue;
+		}
+	}
+
+	/*
+	 * Take an initial pass through the devices lying behind our host
+	 * bridge.  Make sure each one is actually an AGP 3.0 device, otherwise
+	 * exit with an error message.  Along the way store the AGP 3.0
+	 * cap_ptr for each device
+	 */
+	list_for_each(pos, head) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+		dev = cur->dev;
+
+		pci_read_config_word(dev, PCI_STATUS, &mpstat);
+		if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
+			continue;
+
+		pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
+		if (mcapndx != 0) {
+			do {
+				pci_read_config_dword(dev, mcapndx, &ncapid);
+				if ((ncapid & 0xff) != 2)
+					mcapndx = (ncapid >> 8) & 0xff;
+			}
+			while (((ncapid & 0xff) != 2) && (mcapndx != 0));
+		}
+
+		if (mcapndx == 0) {
+			dev_err(&td->dev, "woah!  Non-AGP device %s on "
+				"secondary bus of AGP 3.5 bridge!\n",
+				pci_name(dev));
+			ret = -ENODEV;
+			goto free_and_exit;
+		}
+
+		mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+		if (mmajor < 3) {
+			dev_err(&td->dev, "woah!  AGP 2.0 device %s on "
+				"secondary bus of AGP 3.5 bridge operating "
+				"with AGP 3.0 electricals!\n", pci_name(dev));
+			ret = -ENODEV;
+			goto free_and_exit;
+		}
+
+		cur->capndx = mcapndx;
+
+		pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
+
+		if (((mstatus >> 3) & 0x1) == 0) {
+			dev_err(&td->dev, "woah!  AGP 3.x device %s not "
+				"operating in AGP 3.x mode on secondary bus "
+				"of AGP 3.5 bridge operating with AGP 3.0 "
+				"electricals!\n", pci_name(dev));
+			ret = -ENODEV;
+			goto free_and_exit;
+		}
+	}		
+
+	/*
+	 * Call functions to divide target resources amongst the AGP 3.0
+	 * masters.  This process is dramatically different depending on
+	 * whether isochronous transfers are supported.
+	 */
+	if (isoch) {
+		ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
+		if (ret) {
+			dev_info(&td->dev, "something bad happened setting "
+				 "up isochronous xfers; falling back to "
+				 "non-isochronous xfer mode\n");
+		} else {
+			goto free_and_exit;
+		}
+	}
+	agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
+
+free_and_exit:
+	/* Be sure to free the dev_list */
+	for (pos=head->next; pos!=head; ) {
+		cur = list_entry(pos, struct agp_3_5_dev, list);
+
+		pos = pos->next;
+		kfree(cur);
+	}
+	kfree(dev_list);
+
+get_out:
+	return ret;
+}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
new file mode 100644
index 0000000..623205b
--- /dev/null
+++ b/drivers/char/agp/nvidia-agp.c
@@ -0,0 +1,482 @@
+/*
+ * Nvidia AGPGART routines.
+ * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
+ * to work in 2.5 by Dave Jones.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include "agp.h"
+
+/* NVIDIA registers */
+#define NVIDIA_0_APSIZE		0x80
+#define NVIDIA_1_WBC		0xf0
+#define NVIDIA_2_GARTCTRL	0xd0
+#define NVIDIA_2_APBASE		0xd8
+#define NVIDIA_2_APLIMIT	0xdc
+#define NVIDIA_2_ATTBASE(i)	(0xe0 + (i) * 4)
+#define NVIDIA_3_APBASE		0x50
+#define NVIDIA_3_APLIMIT	0x54
+
+
+static struct _nvidia_private {
+	struct pci_dev *dev_1;
+	struct pci_dev *dev_2;
+	struct pci_dev *dev_3;
+	volatile u32 __iomem *aperture;
+	int num_active_entries;
+	off_t pg_offset;
+	u32 wbc_mask;
+} nvidia_private;
+
+
+static int nvidia_fetch_size(void)
+{
+	int i;
+	u8 size_value;
+	struct aper_size_info_8 *values;
+
+	pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
+	size_value &= 0x0f;
+	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (size_value == values[i].size_value) {
+			agp_bridge->previous_size =
+				agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+#define SYSCFG          0xC0010010
+#define IORR_BASE0      0xC0010016
+#define IORR_MASK0      0xC0010017
+#define AMD_K7_NUM_IORR 2
+
+static int nvidia_init_iorr(u32 base, u32 size)
+{
+	u32 base_hi, base_lo;
+	u32 mask_hi, mask_lo;
+	u32 sys_hi, sys_lo;
+	u32 iorr_addr, free_iorr_addr;
+
+	/* Find the iorr that is already used for the base */
+	/* If not found, determine the uppermost available iorr */
+	free_iorr_addr = AMD_K7_NUM_IORR;
+	for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
+		rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+		rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+		if ((base_lo & 0xfffff000) == (base & 0xfffff000))
+			break;
+
+		if ((mask_lo & 0x00000800) == 0)
+			free_iorr_addr = iorr_addr;
+	}
+
+	if (iorr_addr >= AMD_K7_NUM_IORR) {
+		iorr_addr = free_iorr_addr;
+		if (iorr_addr >= AMD_K7_NUM_IORR)
+			return -EINVAL;
+	}
+    base_hi = 0x0;
+    base_lo = (base & ~0xfff) | 0x18;
+    mask_hi = 0xf;
+    mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
+    wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+    wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+    rdmsr(SYSCFG, sys_lo, sys_hi);
+    sys_lo |= 0x00100000;
+    wrmsr(SYSCFG, sys_lo, sys_hi);
+
+	return 0;
+}
+
+static int nvidia_configure(void)
+{
+	int i, rc, num_dirs;
+	u32 apbase, aplimit;
+	phys_addr_t apbase_phys;
+	struct aper_size_info_8 *current_size;
+	u32 temp;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+		current_size->size_value);
+
+	/* address to map to */
+	apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+	agp_bridge->gart_bus_addr = apbase;
+	aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
+	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
+	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
+	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
+	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
+	if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
+		return rc;
+
+	/* directory size is 64k */
+	num_dirs = current_size->size / 64;
+	nvidia_private.num_active_entries = current_size->num_entries;
+	nvidia_private.pg_offset = 0;
+	if (num_dirs == 0) {
+		num_dirs = 1;
+		nvidia_private.num_active_entries /= (64 / current_size->size);
+		nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
+			~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
+	}
+
+	/* attbase */
+	for (i = 0; i < 8; i++) {
+		pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
+			(agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
+	}
+
+	/* gtlb control */
+	pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
+
+	/* gart control */
+	pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+	pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
+
+	/* map aperture */
+	apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR);
+	nvidia_private.aperture =
+		(volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
+
+	if (!nvidia_private.aperture)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void nvidia_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+	u32 temp;
+
+	/* gart control */
+	pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+	pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
+
+	/* gtlb control */
+	pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
+
+	/* unmap aperture */
+	iounmap((void __iomem *) nvidia_private.aperture);
+
+	/* restore previous aperture size */
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+		previous_size->size_value);
+
+	/* restore iorr for previous aperture size */
+	nvidia_init_iorr(agp_bridge->gart_bus_addr,
+		previous_size->size * 1024 * 1024);
+}
+
+
+/*
+ * Note we can't use the generic routines, even though they are 99% the same.
+ * Aperture sizes <64M still requires a full 64k GART directory, but
+ * only use the portion of the TLB entries that correspond to the apertures
+ * alignment inside the surrounding 64M block.
+ */
+extern int agp_memory_reserved;
+
+static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i, j;
+	int mask_type;
+
+	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+	if (mask_type != 0 || type != mem->type)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	if ((pg_start + mem->page_count) >
+		(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
+		return -EINVAL;
+
+	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
+			return -EBUSY;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		writel(agp_bridge->driver->mask_memory(agp_bridge,
+			       page_to_phys(mem->pages[i]), mask_type),
+			agp_bridge->gatt_table+nvidia_private.pg_offset+j);
+	}
+
+	/* PCI Posting. */
+	readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+
+static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i;
+
+	int mask_type;
+
+	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+	if (mask_type != 0 || type != mem->type)
+		return -EINVAL;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++)
+		writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+
+static void nvidia_tlbflush(struct agp_memory *mem)
+{
+	unsigned long end;
+	u32 wbc_reg, temp;
+	int i;
+
+	/* flush chipset */
+	if (nvidia_private.wbc_mask) {
+		pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
+		wbc_reg |= nvidia_private.wbc_mask;
+		pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
+
+		end = jiffies + 3*HZ;
+		do {
+			pci_read_config_dword(nvidia_private.dev_1,
+					NVIDIA_1_WBC, &wbc_reg);
+			if (time_before_eq(end, jiffies)) {
+				printk(KERN_ERR PFX
+				    "TLB flush took more than 3 seconds.\n");
+			}
+		} while (wbc_reg & nvidia_private.wbc_mask);
+	}
+
+	/* flush TLB entries */
+	for (i = 0; i < 32 + 1; i++)
+		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+	for (i = 0; i < 32 + 1; i++)
+		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+}
+
+
+static const struct aper_size_info_8 nvidia_generic_sizes[5] =
+{
+	{512, 131072, 7, 0},
+	{256, 65536, 6, 8},
+	{128, 32768, 5, 12},
+	{64, 16384, 4, 14},
+	/* The 32M mode still requires a 64k gatt */
+	{32, 16384, 4, 15}
+};
+
+
+static const struct gatt_mask nvidia_generic_masks[] =
+{
+	{ .mask = 1, .type = 0}
+};
+
+
+static const struct agp_bridge_driver nvidia_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= nvidia_generic_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 5,
+	.needs_scratch_page	= true,
+	.configure		= nvidia_configure,
+	.fetch_size		= nvidia_fetch_size,
+	.cleanup		= nvidia_cleanup,
+	.tlb_flush		= nvidia_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= nvidia_generic_masks,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= nvidia_insert_memory,
+	.remove_memory		= nvidia_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static int agp_nvidia_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+
+	nvidia_private.dev_1 =
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+					    (unsigned int)pdev->bus->number,
+					    PCI_DEVFN(0, 1));
+	nvidia_private.dev_2 =
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+					    (unsigned int)pdev->bus->number,
+					    PCI_DEVFN(0, 2));
+	nvidia_private.dev_3 =
+		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+					    (unsigned int)pdev->bus->number,
+					    PCI_DEVFN(30, 0));
+
+	if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
+		printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
+			"chipset, but could not find the secondary devices.\n");
+		return -ENODEV;
+	}
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_NVIDIA_NFORCE:
+		printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
+		nvidia_private.wbc_mask = 0x00010000;
+		break;
+	case PCI_DEVICE_ID_NVIDIA_NFORCE2:
+		printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
+		nvidia_private.wbc_mask = 0x80000000;
+		break;
+	default:
+		printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
+			    pdev->device);
+		return -ENODEV;
+	}
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &nvidia_driver;
+	bridge->dev_private_data = &nvidia_private,
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev,
+			bridge->capndx+PCI_AGP_STATUS,
+			&bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_nvidia_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int agp_nvidia_resume(struct pci_dev *pdev)
+{
+	/* set power state 0 and restore PCI space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/* reconfigure AGP hardware again */
+	nvidia_configure();
+
+	return 0;
+}
+#endif
+
+
+static const struct pci_device_id agp_nvidia_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_NVIDIA,
+	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_NVIDIA,
+	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE2,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
+
+static struct pci_driver agp_nvidia_pci_driver = {
+	.name		= "agpgart-nvidia",
+	.id_table	= agp_nvidia_pci_table,
+	.probe		= agp_nvidia_probe,
+	.remove		= agp_nvidia_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_nvidia_suspend,
+	.resume		= agp_nvidia_resume,
+#endif
+};
+
+static int __init agp_nvidia_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_nvidia_pci_driver);
+}
+
+static void __exit agp_nvidia_cleanup(void)
+{
+	pci_unregister_driver(&agp_nvidia_pci_driver);
+	pci_dev_put(nvidia_private.dev_1);
+	pci_dev_put(nvidia_private.dev_2);
+	pci_dev_put(nvidia_private.dev_3);
+}
+
+module_init(agp_nvidia_init);
+module_exit(agp_nvidia_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("NVIDIA Corporation");
+
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
new file mode 100644
index 0000000..15f2e70
--- /dev/null
+++ b/drivers/char/agp/parisc-agp.c
@@ -0,0 +1,426 @@
+/*
+ * HP Quicksilver AGP GART routines
+ *
+ * Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
+ *
+ * Based on drivers/char/agpgart/hp-agp.c which is
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/klist.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/parisc-device.h>
+#include <asm/ropes.h>
+
+#include "agp.h"
+
+#define DRVNAME	"quicksilver"
+#define DRVPFX	DRVNAME ": "
+
+#define AGP8X_MODE_BIT		3
+#define AGP8X_MODE		(1 << AGP8X_MODE_BIT)
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+		       int type);
+
+static struct _parisc_agp_info {
+	void __iomem *ioc_regs;
+	void __iomem *lba_regs;
+
+	int lba_cap_offset;
+
+	u64 *gatt;
+	u64 gatt_entries;
+
+	u64 gart_base;
+	u64 gart_size;
+
+	int io_page_size;
+	int io_pages_per_kpage;
+} parisc_agp_info;
+
+static struct gatt_mask parisc_agp_masks[] =
+{
+        {
+		.mask = SBA_PDIR_VALID_BIT,
+		.type = 0
+	}
+};
+
+static struct aper_size_info_fixed parisc_agp_sizes[] =
+{
+        {0, 0, 0},              /* filled in by parisc_agp_fetch_size() */
+};
+
+static int
+parisc_agp_fetch_size(void)
+{
+	int size;
+
+	size = parisc_agp_info.gart_size / MB(1);
+	parisc_agp_sizes[0].size = size;
+	agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
+
+	return size;
+}
+
+static int
+parisc_agp_configure(void)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+
+	agp_bridge->gart_bus_addr = info->gart_base;
+	agp_bridge->capndx = info->lba_cap_offset;
+	agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
+
+	return 0;
+}
+
+static void
+parisc_agp_tlbflush(struct agp_memory *mem)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+
+	writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
+	readq(info->ioc_regs+IOC_PCOM);	/* flush */
+}
+
+static int
+parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+	int i;
+
+	for (i = 0; i < info->gatt_entries; i++) {
+		info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
+	}
+
+	return 0;
+}
+
+static int
+parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+
+	info->gatt[0] = SBA_AGPGART_COOKIE;
+
+	return 0;
+}
+
+static int
+parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+	int i, k;
+	off_t j, io_pg_start;
+	int io_pg_count;
+
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+		return -EINVAL;
+	}
+
+	io_pg_start = info->io_pages_per_kpage * pg_start;
+	io_pg_count = info->io_pages_per_kpage * mem->page_count;
+	if ((io_pg_start + io_pg_count) > info->gatt_entries) {
+		return -EINVAL;
+	}
+
+	j = io_pg_start;
+	while (j < (io_pg_start + io_pg_count)) {
+		if (info->gatt[j])
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+		unsigned long paddr;
+
+		paddr = page_to_phys(mem->pages[i]);
+		for (k = 0;
+		     k < info->io_pages_per_kpage;
+		     k++, j++, paddr += info->io_page_size) {
+			info->gatt[j] =
+				parisc_agp_mask_memory(agp_bridge,
+					paddr, type);
+		}
+	}
+
+	agp_bridge->driver->tlb_flush(mem);
+
+	return 0;
+}
+
+static int
+parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+	int i, io_pg_start, io_pg_count;
+
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+		return -EINVAL;
+	}
+
+	io_pg_start = info->io_pages_per_kpage * pg_start;
+	io_pg_count = info->io_pages_per_kpage * mem->page_count;
+	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+		info->gatt[i] = agp_bridge->scratch_page;
+	}
+
+	agp_bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+		       int type)
+{
+	return SBA_PDIR_VALID_BIT | addr;
+}
+
+static void
+parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+	u32 command;
+
+	command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
+
+	command = agp_collect_device_status(bridge, mode, command);
+	command |= 0x00000100;
+
+	writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
+
+	agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+static const struct agp_bridge_driver parisc_agp_driver = {
+	.owner			= THIS_MODULE,
+	.size_type		= FIXED_APER_SIZE,
+	.configure		= parisc_agp_configure,
+	.fetch_size		= parisc_agp_fetch_size,
+	.tlb_flush		= parisc_agp_tlbflush,
+	.mask_memory		= parisc_agp_mask_memory,
+	.masks			= parisc_agp_masks,
+	.agp_enable		= parisc_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= parisc_agp_create_gatt_table,
+	.free_gatt_table	= parisc_agp_free_gatt_table,
+	.insert_memory		= parisc_agp_insert_memory,
+	.remove_memory		= parisc_agp_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture	= true,
+};
+
+static int __init
+agp_ioc_init(void __iomem *ioc_regs)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+        u64 iova_base, *io_pdir, io_tlb_ps;
+        int io_tlb_shift;
+
+        printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+
+        info->ioc_regs = ioc_regs;
+
+        io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
+        switch (io_tlb_ps) {
+        case 0: io_tlb_shift = 12; break;
+        case 1: io_tlb_shift = 13; break;
+        case 2: io_tlb_shift = 14; break;
+        case 3: io_tlb_shift = 16; break;
+        default:
+                printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
+                       "configuration 0x%llx\n", io_tlb_ps);
+                info->gatt = NULL;
+                info->gatt_entries = 0;
+                return -ENODEV;
+        }
+        info->io_page_size = 1 << io_tlb_shift;
+        info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
+
+        iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
+        info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
+
+        info->gart_size = PLUTO_GART_SIZE;
+        info->gatt_entries = info->gart_size / info->io_page_size;
+
+        io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
+        info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
+
+        if (info->gatt[0] != SBA_AGPGART_COOKIE) {
+                info->gatt = NULL;
+                info->gatt_entries = 0;
+                printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
+                       "GART disabled\n");
+                return -ENODEV;
+        }
+
+        return 0;
+}
+
+static int
+lba_find_capability(int cap)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+        u16 status;
+        u8 pos, id;
+        int ttl = 48;
+
+        status = readw(info->lba_regs + PCI_STATUS);
+        if (!(status & PCI_STATUS_CAP_LIST))
+                return 0;
+        pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
+        while (ttl-- && pos >= 0x40) {
+                pos &= ~3;
+                id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
+                if (id == 0xff)
+                        break;
+                if (id == cap)
+                        return pos;
+                pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
+        }
+        return 0;
+}
+
+static int __init
+agp_lba_init(void __iomem *lba_hpa)
+{
+	struct _parisc_agp_info *info = &parisc_agp_info;
+        int cap;
+
+	info->lba_regs = lba_hpa;
+        info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
+
+        cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
+        if (cap != PCI_CAP_ID_AGP) {
+                printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
+                       cap, info->lba_cap_offset);
+                return -ENODEV;
+        }
+
+        return 0;
+}
+
+static int __init
+parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
+{
+	struct pci_dev *fake_bridge_dev = NULL;
+	struct agp_bridge_data *bridge;
+	int error = 0;
+
+	fake_bridge_dev = pci_alloc_dev(NULL);
+	if (!fake_bridge_dev) {
+		error = -ENOMEM;
+		goto fail;
+	}
+
+	error = agp_ioc_init(ioc_hpa);
+	if (error)
+		goto fail;
+
+	error = agp_lba_init(lba_hpa);
+	if (error)
+		goto fail;
+
+	bridge = agp_alloc_bridge();
+	if (!bridge) {
+		error = -ENOMEM;
+		goto fail;
+	}
+	bridge->driver = &parisc_agp_driver;
+
+	fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
+	fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
+	bridge->dev = fake_bridge_dev;
+
+	error = agp_add_bridge(bridge);
+	if (error)
+		goto fail;
+	return 0;
+
+fail:
+	kfree(fake_bridge_dev);
+	return error;
+}
+
+static int
+find_quicksilver(struct device *dev, void *data)
+{
+	struct parisc_device **lba = data;
+	struct parisc_device *padev = to_parisc_device(dev);
+
+	if (IS_QUICKSILVER(padev))
+		*lba = padev;
+
+	return 0;
+}
+
+static int
+parisc_agp_init(void)
+{
+	extern struct sba_device *sba_list;
+
+	int err = -1;
+	struct parisc_device *sba = NULL, *lba = NULL;
+	struct lba_device *lbadev = NULL;
+
+	if (!sba_list)
+		goto out;
+
+	/* Find our parent Pluto */
+	sba = sba_list->dev;
+	if (!IS_PLUTO(sba)) {
+		printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
+		goto out;
+	}
+
+	/* Now search our Pluto for our precious AGP device... */
+	device_for_each_child(&sba->dev, &lba, find_quicksilver);
+
+	if (!lba) {
+		printk(KERN_INFO DRVPFX "No AGP devices found.\n");
+		goto out;
+	}
+
+	lbadev = parisc_get_drvdata(lba);
+
+	/* w00t, let's go find our cookies... */
+	parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
+
+	return 0;
+
+out:
+	return err;
+}
+
+module_init(parisc_agp_init);
+
+MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
new file mode 100644
index 0000000..e7d5bdc
--- /dev/null
+++ b/drivers/char/agp/sgi-agp.c
@@ -0,0 +1,338 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+/*
+ * SGI TIOCA AGPGART routines.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioca_provider.h>
+#include "agp.h"
+
+extern int agp_memory_reserved;
+extern uint32_t tioca_gart_found;
+extern struct list_head tioca_list;
+static struct agp_bridge_data **sgi_tioca_agp_bridges;
+
+/*
+ * The aperature size and related information is set up at TIOCA init time.
+ * Values for this table will be extracted and filled in at
+ * sgi_tioca_fetch_size() time.
+ */
+
+static struct aper_size_info_fixed sgi_tioca_sizes[] = {
+	{0, 0, 0},
+};
+
+static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
+{
+	struct page *page;
+	int nid;
+	struct tioca_kernel *info =
+	    (struct tioca_kernel *)bridge->dev_private_data;
+
+	nid = info->ca_closest_node;
+	page = alloc_pages_node(nid, GFP_KERNEL, 0);
+	if (!page)
+		return NULL;
+
+	get_page(page);
+	atomic_inc(&agp_bridge->current_memory_agp);
+	return page;
+}
+
+/*
+ * Flush GART tlb's.  Cannot selectively flush based on memory so the mem
+ * arg is ignored.
+ */
+
+static void sgi_tioca_tlbflush(struct agp_memory *mem)
+{
+	tioca_tlbflush(mem->bridge->dev_private_data);
+}
+
+/*
+ * Given an address of a host physical page, turn it into a valid gart
+ * entry.
+ */
+static unsigned long
+sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+		      int type)
+{
+	return tioca_physpage_to_gart(addr);
+}
+
+static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	tioca_fastwrite_enable(bridge->dev_private_data);
+}
+
+/*
+ * sgi_tioca_configure() doesn't have anything to do since the base CA driver
+ * has alreay set up the GART.
+ */
+
+static int sgi_tioca_configure(void)
+{
+	return 0;
+}
+
+/*
+ * Determine gfx aperature size.  This has already been determined by the
+ * CA driver init, so just need to set agp_bridge values accordingly.
+ */
+
+static int sgi_tioca_fetch_size(void)
+{
+	struct tioca_kernel *info =
+	    (struct tioca_kernel *)agp_bridge->dev_private_data;
+
+	sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
+	sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
+
+	return sgi_tioca_sizes[0].size;
+}
+
+static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct tioca_kernel *info =
+	    (struct tioca_kernel *)bridge->dev_private_data;
+
+	bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
+	bridge->gatt_table = bridge->gatt_table_real;
+	bridge->gatt_bus_addr = info->ca_gfxgart_base;
+
+	return 0;
+}
+
+static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	return 0;
+}
+
+static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
+				   int type)
+{
+	int num_entries;
+	size_t i;
+	off_t j;
+	void *temp;
+	struct agp_bridge_data *bridge;
+	u64 *table;
+
+	bridge = mem->bridge;
+	if (!bridge)
+		return -EINVAL;
+
+	table = (u64 *)bridge->gatt_table;
+
+	temp = bridge->current_size;
+
+	switch (bridge->driver->size_type) {
+	case U8_APER_SIZE:
+		num_entries = A_SIZE_8(temp)->num_entries;
+		break;
+	case U16_APER_SIZE:
+		num_entries = A_SIZE_16(temp)->num_entries;
+		break;
+	case U32_APER_SIZE:
+		num_entries = A_SIZE_32(temp)->num_entries;
+		break;
+	case FIXED_APER_SIZE:
+		num_entries = A_SIZE_FIX(temp)->num_entries;
+		break;
+	case LVL2_APER_SIZE:
+		return -EINVAL;
+	default:
+		num_entries = 0;
+		break;
+	}
+
+	num_entries -= agp_memory_reserved / PAGE_SIZE;
+	if (num_entries < 0)
+		num_entries = 0;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+
+	while (j < (pg_start + mem->page_count)) {
+		if (table[j])
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		bridge->driver->cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		table[j] =
+		    bridge->driver->mask_memory(bridge,
+						page_to_phys(mem->pages[i]),
+						mem->type);
+	}
+
+	bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
+				   int type)
+{
+	size_t i;
+	struct agp_bridge_data *bridge;
+	u64 *table;
+
+	bridge = mem->bridge;
+	if (!bridge)
+		return -EINVAL;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	table = (u64 *)bridge->gatt_table;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		table[i] = 0;
+	}
+
+	bridge->driver->tlb_flush(mem);
+	return 0;
+}
+
+static void sgi_tioca_cache_flush(void)
+{
+}
+
+/*
+ * Cleanup.  Nothing to do as the CA driver owns the GART.
+ */
+
+static void sgi_tioca_cleanup(void)
+{
+}
+
+static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge;
+
+	list_for_each_entry(bridge, &agp_bridges, list) {
+		if (bridge->dev->bus == pdev->bus)
+			break;
+	}
+	return bridge;
+}
+
+const struct agp_bridge_driver sgi_tioca_driver = {
+	.owner = THIS_MODULE,
+	.size_type = U16_APER_SIZE,
+	.configure = sgi_tioca_configure,
+	.fetch_size = sgi_tioca_fetch_size,
+	.cleanup = sgi_tioca_cleanup,
+	.tlb_flush = sgi_tioca_tlbflush,
+	.mask_memory = sgi_tioca_mask_memory,
+	.agp_enable = sgi_tioca_agp_enable,
+	.cache_flush = sgi_tioca_cache_flush,
+	.create_gatt_table = sgi_tioca_create_gatt_table,
+	.free_gatt_table = sgi_tioca_free_gatt_table,
+	.insert_memory = sgi_tioca_insert_memory,
+	.remove_memory = sgi_tioca_remove_memory,
+	.alloc_by_type = agp_generic_alloc_by_type,
+	.free_by_type = agp_generic_free_by_type,
+	.agp_alloc_page = sgi_tioca_alloc_page,
+	.agp_destroy_page = agp_generic_destroy_page,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture = true,
+	.needs_scratch_page = false,
+	.num_aperture_sizes = 1,
+};
+
+static int agp_sgi_init(void)
+{
+	unsigned int j;
+	struct tioca_kernel *info;
+	struct pci_dev *pdev = NULL;
+
+	if (tioca_gart_found)
+		printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
+	else
+		return 0;
+
+	sgi_tioca_agp_bridges = kmalloc_array(tioca_gart_found,
+					      sizeof(struct agp_bridge_data *),
+					      GFP_KERNEL);
+	if (!sgi_tioca_agp_bridges)
+		return -ENOMEM;
+
+	j = 0;
+	list_for_each_entry(info, &tioca_list, ca_list) {
+		if (list_empty(info->ca_devices))
+			continue;
+		list_for_each_entry(pdev, info->ca_devices, bus_list) {
+			u8 cap_ptr;
+
+			if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
+				continue;
+			cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+			if (!cap_ptr)
+				continue;
+		}
+		sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
+		printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
+		       sgi_tioca_agp_bridges[j]);
+		if (sgi_tioca_agp_bridges[j]) {
+			sgi_tioca_agp_bridges[j]->dev = pdev;
+			sgi_tioca_agp_bridges[j]->dev_private_data = info;
+			sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
+			sgi_tioca_agp_bridges[j]->gart_bus_addr =
+			    info->ca_gfxap_base;
+			sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) |	/* 126 requests */
+			    (0x1 << 9) |	/* SBA supported */
+			    (0x1 << 5) |	/* 64-bit addresses supported */
+			    (0x1 << 4) |	/* FW supported */
+			    (0x1 << 3) |	/* AGP 3.0 mode */
+			    0x2;	/* 8x transfer only */
+			sgi_tioca_agp_bridges[j]->current_size =
+			    sgi_tioca_agp_bridges[j]->previous_size =
+			    (void *)&sgi_tioca_sizes[0];
+			agp_add_bridge(sgi_tioca_agp_bridges[j]);
+		}
+		j++;
+	}
+
+	agp_find_bridge = &sgi_tioca_find_bridge;
+	return 0;
+}
+
+static void agp_sgi_cleanup(void)
+{
+	kfree(sgi_tioca_agp_bridges);
+	sgi_tioca_agp_bridges = NULL;
+}
+
+module_init(agp_sgi_init);
+module_exit(agp_sgi_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
new file mode 100644
index 0000000..14909fc
--- /dev/null
+++ b/drivers/char/agp/sis-agp.c
@@ -0,0 +1,452 @@
+/*
+ * SiS AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include "agp.h"
+
+#define SIS_ATTBASE	0x90
+#define SIS_APSIZE	0x94
+#define SIS_TLBCNTRL	0x97
+#define SIS_TLBFLUSH	0x98
+
+#define PCI_DEVICE_ID_SI_662	0x0662
+#define PCI_DEVICE_ID_SI_671	0x0671
+
+static bool agp_sis_force_delay = 0;
+static int agp_sis_agp_spec = -1;
+
+static int sis_fetch_size(void)
+{
+	u8 temp_size;
+	int i;
+	struct aper_size_info_8 *values;
+
+	pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
+	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if ((temp_size == values[i].size_value) ||
+		    ((temp_size & ~(0x07)) ==
+		     (values[i].size_value & ~(0x07)))) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void sis_tlbflush(struct agp_memory *mem)
+{
+	pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+	pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+	pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
+			       agp_bridge->gatt_bus_addr);
+	pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+			      current_size->size_value);
+	return 0;
+}
+
+static void sis_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+			      (previous_size->size_value & ~(0x03)));
+}
+
+static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	struct pci_dev *device = NULL;
+	u32 command;
+	int rate;
+
+	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+		 agp_bridge->major_version, agp_bridge->minor_version);
+
+	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
+	command = agp_collect_device_status(bridge, mode, command);
+	command |= AGPSTAT_AGP_ENABLE;
+	rate = (command & 0x7) << 2;
+
+	for_each_pci_dev(device) {
+		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (!agp)
+			continue;
+
+		dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
+			 pci_name(device), rate);
+
+		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
+
+		/*
+		 * Weird: on some sis chipsets any rate change in the target
+		 * command register triggers a 5ms screwup during which the master
+		 * cannot be configured
+		 */
+		if (device->device == bridge->dev->device) {
+			dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
+			msleep(10);
+		}
+	}
+}
+
+static const struct aper_size_info_8 sis_generic_sizes[7] =
+{
+	{256, 65536, 6, 99},
+	{128, 32768, 5, 83},
+	{64, 16384, 4, 67},
+	{32, 8192, 3, 51},
+	{16, 4096, 2, 35},
+	{8, 2048, 1, 19},
+	{4, 1024, 0, 3}
+};
+
+static struct agp_bridge_driver sis_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= sis_generic_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
+	.configure		= sis_configure,
+	.fetch_size		= sis_fetch_size,
+	.cleanup		= sis_cleanup,
+	.tlb_flush		= sis_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+// chipsets that require the 'delay hack'
+static int sis_broken_chipsets[] = {
+	PCI_DEVICE_ID_SI_648,
+	PCI_DEVICE_ID_SI_746,
+	0 // terminator
+};
+
+static void sis_get_driver(struct agp_bridge_data *bridge)
+{
+	int i;
+
+	for (i=0; sis_broken_chipsets[i]!=0; ++i)
+		if (bridge->dev->device==sis_broken_chipsets[i])
+			break;
+
+	if (sis_broken_chipsets[i] || agp_sis_force_delay)
+		sis_driver.agp_enable=sis_delayed_enable;
+
+	// sis chipsets that indicate less than agp3.5
+	// are not actually fully agp3 compliant
+	if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
+	     && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
+		sis_driver.aperture_sizes = agp3_generic_sizes;
+		sis_driver.size_type = U16_APER_SIZE;
+		sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
+		sis_driver.configure = agp3_generic_configure;
+		sis_driver.fetch_size = agp3_generic_fetch_size;
+		sis_driver.cleanup = agp3_generic_cleanup;
+		sis_driver.tlb_flush = agp3_generic_tlbflush;
+	}
+}
+
+
+static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	u8 cap_ptr;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+
+	dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
+		 pdev->vendor, pdev->device);
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &sis_driver;
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+
+	get_agp_version(bridge);
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+	sis_get_driver(bridge);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_sis_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int agp_sis_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return sis_driver.configure();
+}
+
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id agp_sis_pci_table[] = {
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_5591,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_530,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_540,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_550,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_620,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_630,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_635,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_645,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_646,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_648,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_650,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_651,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_655,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_661,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_662,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_671,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_730,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_735,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_740,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_741,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_745,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+		.class_mask	= ~0,
+		.vendor		= PCI_VENDOR_ID_SI,
+		.device		= PCI_DEVICE_ID_SI_746,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+
+static struct pci_driver agp_sis_pci_driver = {
+	.name		= "agpgart-sis",
+	.id_table	= agp_sis_pci_table,
+	.probe		= agp_sis_probe,
+	.remove		= agp_sis_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_sis_suspend,
+	.resume		= agp_sis_resume,
+#endif
+};
+
+static int __init agp_sis_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_sis_pci_driver);
+}
+
+static void __exit agp_sis_cleanup(void)
+{
+	pci_unregister_driver(&agp_sis_pci_driver);
+}
+
+module_init(agp_sis_init);
+module_exit(agp_sis_cleanup);
+
+module_param(agp_sis_force_delay, bool, 0);
+MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
+module_param(agp_sis_agp_spec, int, 0);
+MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
new file mode 100644
index 0000000..7729414
--- /dev/null
+++ b/drivers/char/agp/sworks-agp.c
@@ -0,0 +1,571 @@
+/*
+ * Serverworks AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/agp_backend.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define SVWRKS_COMMAND		0x04
+#define SVWRKS_APSIZE		0x10
+#define SVWRKS_MMBASE		0x14
+#define SVWRKS_CACHING		0x4b
+#define SVWRKS_AGP_ENABLE	0x60
+#define SVWRKS_FEATURE		0x68
+
+#define SVWRKS_SIZE_MASK	0xfe000000
+
+/* Memory mapped registers */
+#define SVWRKS_GART_CACHE	0x02
+#define SVWRKS_GATTBASE		0x04
+#define SVWRKS_TLBFLUSH		0x10
+#define SVWRKS_POSTFLUSH	0x14
+#define SVWRKS_DIRFLUSH		0x0c
+
+
+struct serverworks_page_map {
+	unsigned long *real;
+	unsigned long __iomem *remapped;
+};
+
+static struct _serverworks_private {
+	struct pci_dev *svrwrks_dev;	/* device one */
+	volatile u8 __iomem *registers;
+	struct serverworks_page_map **gatt_pages;
+	int num_tables;
+	struct serverworks_page_map scratch_dir;
+
+	int gart_addr_ofs;
+	int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(struct serverworks_page_map *page_map)
+{
+	int i;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL) {
+		return -ENOMEM;
+	}
+
+	set_memory_uc((unsigned long)page_map->real, 1);
+	page_map->remapped = page_map->real;
+
+	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+		writel(agp_bridge->scratch_page, page_map->remapped+i);
+		/* Red Pen: Everyone else does pci posting flush here */
+
+	return 0;
+}
+
+static void serverworks_free_page_map(struct serverworks_page_map *page_map)
+{
+	set_memory_wb((unsigned long)page_map->real, 1);
+	free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+	int i;
+	struct serverworks_page_map **tables;
+	struct serverworks_page_map *entry;
+
+	tables = serverworks_private.gatt_pages;
+	for (i = 0; i < serverworks_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL) {
+				serverworks_free_page_map(entry);
+			}
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+	struct serverworks_page_map **tables;
+	struct serverworks_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *),
+			 GFP_KERNEL);
+	if (tables == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_tables; i++) {
+		entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		tables[i] = entry;
+		retval = serverworks_create_page_map(entry);
+		if (retval != 0) break;
+	}
+	serverworks_private.num_tables = nr_tables;
+	serverworks_private.gatt_pages = tables;
+
+	if (retval != 0) serverworks_free_gatt_pages();
+
+	return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct aper_size_info_lvl2 *value;
+	struct serverworks_page_map page_dir;
+	int retval;
+	u32 temp;
+	int i;
+
+	value = A_SIZE_LVL2(agp_bridge->current_size);
+	retval = serverworks_create_page_map(&page_dir);
+	if (retval != 0) {
+		return retval;
+	}
+	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+	if (retval != 0) {
+		serverworks_free_page_map(&page_dir);
+		return retval;
+	}
+	/* Create a fake scratch directory */
+	for (i = 0; i < 1024; i++) {
+		writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
+		writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+	}
+
+	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		serverworks_free_page_map(&page_dir);
+		serverworks_free_page_map(&serverworks_private.scratch_dir);
+		return retval;
+	}
+
+	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+	/* Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+
+	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* Calculate the agp offset */
+	for (i = 0; i < value->num_entries / 1024; i++)
+		writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+
+	return 0;
+}
+
+static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	struct serverworks_page_map page_dir;
+
+	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+	serverworks_free_gatt_pages();
+	serverworks_free_page_map(&page_dir);
+	serverworks_free_page_map(&serverworks_private.scratch_dir);
+	return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	u32 temp2;
+	struct aper_size_info_lvl2 *values;
+
+	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
+					SVWRKS_SIZE_MASK);
+	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
+	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
+	temp2 &= SVWRKS_SIZE_MASK;
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp2 == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually.  However
+ * currently it just flushes the whole table.  Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+static void serverworks_tlbflush(struct agp_memory *temp)
+{
+	unsigned long timeout;
+
+	writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
+	timeout = jiffies + 3*HZ;
+	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
+		cpu_relax();
+		if (time_after(jiffies, timeout)) {
+			dev_err(&serverworks_private.svrwrks_dev->dev,
+				"TLB post flush took more than 3 seconds\n");
+			break;
+		}
+	}
+
+	writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
+	timeout = jiffies + 3*HZ;
+	while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
+		cpu_relax();
+		if (time_after(jiffies, timeout)) {
+			dev_err(&serverworks_private.svrwrks_dev->dev,
+				"TLB Dir flush took more than 3 seconds\n");
+			break;
+		}
+	}
+}
+
+static int serverworks_configure(void)
+{
+	struct aper_size_info_lvl2 *current_size;
+	u32 temp;
+	u8 enable_reg;
+	u16 cap_reg;
+
+	current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+	/* Get the memory mapped registers */
+	pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
+	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+	if (!serverworks_private.registers) {
+		dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
+		return -ENOMEM;
+	}
+
+	writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
+	readb(serverworks_private.registers+SVWRKS_GART_CACHE);	/* PCI Posting. */
+
+	writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
+	readl(serverworks_private.registers+SVWRKS_GATTBASE);	/* PCI Posting. */
+
+	cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
+	cap_reg &= ~0x0007;
+	cap_reg |= 0x4;
+	writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
+	readw(serverworks_private.registers+SVWRKS_COMMAND);
+
+	pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
+	enable_reg |= 0x1; /* Agp Enable bit */
+	pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
+	serverworks_tlbflush(NULL);
+
+	agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
+
+	/* Fill in the mode register */
+	pci_read_config_dword(serverworks_private.svrwrks_dev,
+			      agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
+
+	pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
+	enable_reg &= ~0x3;
+	pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
+
+	pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
+	enable_reg |= (1<<6);
+	pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
+
+	return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+	iounmap((void __iomem *) serverworks_private.registers);
+}
+
+static int serverworks_insert_memory(struct agp_memory *mem,
+			     off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+
+	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+	if ((pg_start + mem->page_count) > num_entries) {
+		return -EINVAL;
+	}
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+			return -EBUSY;
+		j++;
+	}
+
+	if (!mem->is_flushed) {
+		global_cache_flush();
+		mem->is_flushed = true;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		writel(agp_bridge->driver->mask_memory(agp_bridge, 
+				page_to_phys(mem->pages[i]), mem->type),
+		       cur_gatt+GET_GATT_OFF(addr));
+	}
+	serverworks_tlbflush(mem);
+	return 0;
+}
+
+static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
+			     int type)
+{
+	int i;
+	unsigned long __iomem *cur_gatt;
+	unsigned long addr;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	global_cache_flush();
+	serverworks_tlbflush(mem);
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+	}
+
+	serverworks_tlbflush(mem);
+	return 0;
+}
+
+static const struct gatt_mask serverworks_masks[] =
+{
+	{.mask = 1, .type = 0}
+};
+
+static const struct aper_size_info_lvl2 serverworks_sizes[7] =
+{
+	{2048, 524288, 0x80000000},
+	{1024, 262144, 0xc0000000},
+	{512, 131072, 0xe0000000},
+	{256, 65536, 0xf0000000},
+	{128, 32768, 0xf8000000},
+	{64, 16384, 0xfc000000},
+	{32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	u32 command;
+
+	pci_read_config_dword(serverworks_private.svrwrks_dev,
+			      bridge->capndx + PCI_AGP_STATUS,
+			      &command);
+
+	command = agp_collect_device_status(bridge, mode, command);
+
+	command &= ~0x10;	/* disable FW */
+	command &= ~0x08;
+
+	command |= 0x100;
+
+	pci_write_config_dword(serverworks_private.svrwrks_dev,
+			       bridge->capndx + PCI_AGP_COMMAND,
+			       command);
+
+	agp_device_command(command, false);
+}
+
+static const struct agp_bridge_driver sworks_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= serverworks_sizes,
+	.size_type		= LVL2_APER_SIZE,
+	.num_aperture_sizes	= 7,
+	.configure		= serverworks_configure,
+	.fetch_size		= serverworks_fetch_size,
+	.cleanup		= serverworks_cleanup,
+	.tlb_flush		= serverworks_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= serverworks_masks,
+	.agp_enable		= serverworks_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= serverworks_create_gatt_table,
+	.free_gatt_table	= serverworks_free_gatt_table,
+	.insert_memory		= serverworks_insert_memory,
+	.remove_memory		= serverworks_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static int agp_serverworks_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct agp_bridge_data *bridge;
+	struct pci_dev *bridge_dev;
+	u32 temp, temp2;
+	u8 cap_ptr = 0;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+	switch (pdev->device) {
+	case 0x0006:
+		dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
+		return -ENODEV;
+
+	case PCI_DEVICE_ID_SERVERWORKS_HE:
+	case PCI_DEVICE_ID_SERVERWORKS_LE:
+	case 0x0007:
+		break;
+
+	default:
+		if (cap_ptr)
+			dev_err(&pdev->dev, "unsupported Serverworks chipset "
+				"[%04x/%04x]\n", pdev->vendor, pdev->device);
+		return -ENODEV;
+	}
+
+	/* Everything is on func 1 here so we are hardcoding function one */
+	bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+			(unsigned int)pdev->bus->number,
+			PCI_DEVFN(0, 1));
+	if (!bridge_dev) {
+		dev_info(&pdev->dev, "can't find secondary device\n");
+		return -ENODEV;
+	}
+
+	serverworks_private.svrwrks_dev = bridge_dev;
+	serverworks_private.gart_addr_ofs = 0x10;
+
+	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
+	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
+		if (temp2 != 0) {
+			dev_info(&pdev->dev, "64 bit aperture address, "
+				 "but top bits are not zero; disabling AGP\n");
+			return -ENODEV;
+		}
+		serverworks_private.mm_addr_ofs = 0x18;
+	} else
+		serverworks_private.mm_addr_ofs = 0x14;
+
+	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
+	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		pci_read_config_dword(pdev,
+				serverworks_private.mm_addr_ofs + 4, &temp2);
+		if (temp2 != 0) {
+			dev_info(&pdev->dev, "64 bit MMIO address, but top "
+				 "bits are not zero; disabling AGP\n");
+			return -ENODEV;
+		}
+	}
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->driver = &sworks_driver;
+	bridge->dev_private_data = &serverworks_private,
+	bridge->dev = pci_dev_get(pdev);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_serverworks_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	pci_dev_put(bridge->dev);
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+	pci_dev_put(serverworks_private.svrwrks_dev);
+	serverworks_private.svrwrks_dev = NULL;
+}
+
+static struct pci_device_id agp_serverworks_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_SERVERWORKS,
+	.device		= PCI_ANY_ID,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
+
+static struct pci_driver agp_serverworks_pci_driver = {
+	.name		= "agpgart-serverworks",
+	.id_table	= agp_serverworks_pci_table,
+	.probe		= agp_serverworks_probe,
+	.remove		= agp_serverworks_remove,
+};
+
+static int __init agp_serverworks_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_serverworks_pci_driver);
+}
+
+static void __exit agp_serverworks_cleanup(void)
+{
+	pci_unregister_driver(&agp_serverworks_pci_driver);
+}
+
+module_init(agp_serverworks_init);
+module_exit(agp_serverworks_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
new file mode 100644
index 0000000..31fcd04
--- /dev/null
+++ b/drivers/char/agp/uninorth-agp.c
@@ -0,0 +1,727 @@
+/*
+ * UniNorth AGPGART routines.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/uninorth.h>
+#include <asm/prom.h>
+#include <asm/pmac_feature.h>
+#include "agp.h"
+
+/*
+ * NOTES for uninorth3 (G5 AGP) supports :
+ *
+ * There maybe also possibility to have bigger cache line size for
+ * agp (see pmac_pci.c and look for cache line). Need to be investigated
+ * by someone.
+ *
+ * PAGE size are hardcoded but this may change, see asm/page.h.
+ *
+ * Jerome Glisse <j.glisse@gmail.com>
+ */
+static int uninorth_rev;
+static int is_u3;
+static u32 scratch_value;
+
+#define DEFAULT_APERTURE_SIZE 256
+#define DEFAULT_APERTURE_STRING "256"
+static char *aperture = NULL;
+
+static int uninorth_fetch_size(void)
+{
+	int i, size = 0;
+	struct aper_size_info_32 *values =
+	    A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+	if (aperture) {
+		char *save = aperture;
+
+		size = memparse(aperture, &aperture) >> 20;
+		aperture = save;
+
+		for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+			if (size == values[i].size)
+				break;
+
+		if (i == agp_bridge->driver->num_aperture_sizes) {
+			dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
+				"using default\n");
+			size = 0;
+			aperture = NULL;
+		}
+	}
+
+	if (!size) {
+		for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+			if (values[i].size == DEFAULT_APERTURE_SIZE)
+				break;
+	}
+
+	agp_bridge->previous_size =
+	    agp_bridge->current_size = (void *)(values + i);
+	agp_bridge->aperture_size_idx = i;
+	return values[i].size;
+}
+
+static void uninorth_tlbflush(struct agp_memory *mem)
+{
+	u32 ctrl = UNI_N_CFG_GART_ENABLE;
+
+	if (is_u3)
+		ctrl |= U3_N_CFG_GART_PERFRD;
+	pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+			       ctrl | UNI_N_CFG_GART_INVAL);
+	pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
+
+	if (!mem && uninorth_rev <= 0x30) {
+		pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+				       ctrl | UNI_N_CFG_GART_2xRESET);
+		pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+				       ctrl);
+	}
+}
+
+static void uninorth_cleanup(void)
+{
+	u32 tmp;
+
+	pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
+	if (!(tmp & UNI_N_CFG_GART_ENABLE))
+		return;
+	tmp |= UNI_N_CFG_GART_INVAL;
+	pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
+	pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
+
+	if (uninorth_rev <= 0x30) {
+		pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+				       UNI_N_CFG_GART_2xRESET);
+		pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+				       0);
+	}
+}
+
+static int uninorth_configure(void)
+{
+	struct aper_size_info_32 *current_size;
+
+	current_size = A_SIZE_32(agp_bridge->current_size);
+
+	dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
+		 current_size->size_value);
+
+	/* aperture size and gatt addr */
+	pci_write_config_dword(agp_bridge->dev,
+		UNI_N_CFG_GART_BASE,
+		(agp_bridge->gatt_bus_addr & 0xfffff000)
+			| current_size->size_value);
+
+	/* HACK ALERT
+	 * UniNorth seem to be buggy enough not to handle properly when
+	 * the AGP aperture isn't mapped at bus physical address 0
+	 */
+	agp_bridge->gart_bus_addr = 0;
+#ifdef CONFIG_PPC64
+	/* Assume U3 or later on PPC64 systems */
+	/* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
+	pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
+			       (agp_bridge->gatt_bus_addr >> 32) & 0xf);
+#else
+	pci_write_config_dword(agp_bridge->dev,
+		UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
+#endif
+
+	if (is_u3) {
+		pci_write_config_dword(agp_bridge->dev,
+				       UNI_N_CFG_GART_DUMMY_PAGE,
+				       page_to_phys(agp_bridge->scratch_page_page) >> 12);
+	}
+
+	return 0;
+}
+
+static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	int i, num_entries;
+	void *temp;
+	u32 *gp;
+	int mask_type;
+
+	if (type != mem->type)
+		return -EINVAL;
+
+	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+	if (mask_type != 0) {
+		/* We know nothing of memory types */
+		return -EINVAL;
+	}
+
+	if (mem->page_count == 0)
+		return 0;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_32(temp)->num_entries;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+	for (i = 0; i < mem->page_count; ++i) {
+		if (gp[i] != scratch_value) {
+			dev_info(&agp_bridge->dev->dev,
+				 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
+				 i, gp[i]);
+			return -EBUSY;
+		}
+	}
+
+	for (i = 0; i < mem->page_count; i++) {
+		if (is_u3)
+			gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+		else
+			gp[i] =	cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
+					    0x1UL);
+		flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
+				   (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
+	}
+	mb();
+	uninorth_tlbflush(mem);
+
+	return 0;
+}
+
+static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+	size_t i;
+	u32 *gp;
+	int mask_type;
+
+	if (type != mem->type)
+		return -EINVAL;
+
+	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+	if (mask_type != 0) {
+		/* We know nothing of memory types */
+		return -EINVAL;
+	}
+
+	if (mem->page_count == 0)
+		return 0;
+
+	gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+	for (i = 0; i < mem->page_count; ++i) {
+		gp[i] = scratch_value;
+	}
+	mb();
+	uninorth_tlbflush(mem);
+
+	return 0;
+}
+
+static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	u32 command, scratch, status;
+	int timeout;
+
+	pci_read_config_dword(bridge->dev,
+			      bridge->capndx + PCI_AGP_STATUS,
+			      &status);
+
+	command = agp_collect_device_status(bridge, mode, status);
+	command |= PCI_AGP_COMMAND_AGP;
+
+	if (uninorth_rev == 0x21) {
+		/*
+		 * Darwin disable AGP 4x on this revision, thus we
+		 * may assume it's broken. This is an AGP2 controller.
+		 */
+		command &= ~AGPSTAT2_4X;
+	}
+
+	if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
+		/*
+		 * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
+		 * 2.2 and 2.3, Darwin do so.
+		 */
+		if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
+			command = (command & ~AGPSTAT_RQ_DEPTH)
+				| (7 << AGPSTAT_RQ_DEPTH_SHIFT);
+	}
+
+	uninorth_tlbflush(NULL);
+
+	timeout = 0;
+	do {
+		pci_write_config_dword(bridge->dev,
+				       bridge->capndx + PCI_AGP_COMMAND,
+				       command);
+		pci_read_config_dword(bridge->dev,
+				      bridge->capndx + PCI_AGP_COMMAND,
+				       &scratch);
+	} while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
+	if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
+		dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
+			"command register\n");
+
+	if (uninorth_rev >= 0x30) {
+		/* This is an AGP V3 */
+		agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
+	} else {
+		/* AGP V2 */
+		agp_device_command(command, false);
+	}
+
+	uninorth_tlbflush(NULL);
+}
+
+#ifdef CONFIG_PM
+/*
+ * These Power Management routines are _not_ called by the normal PCI PM layer,
+ * but directly by the video driver through function pointers in the device
+ * tree.
+ */
+static int agp_uninorth_suspend(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge;
+	u32 cmd;
+	u8 agp;
+	struct pci_dev *device = NULL;
+
+	bridge = agp_find_bridge(pdev);
+	if (bridge == NULL)
+		return -ENODEV;
+
+	/* Only one suspend supported */
+	if (bridge->dev_private_data)
+		return 0;
+
+	/* turn off AGP on the video chip, if it was enabled */
+	for_each_pci_dev(device) {
+		/* Don't touch the bridge yet, device first */
+		if (device == pdev)
+			continue;
+		/* Only deal with devices on the same bus here, no Mac has a P2P
+		 * bridge on the AGP port, and mucking around the entire PCI
+		 * tree is source of problems on some machines because of a bug
+		 * in some versions of pci_find_capability() when hitting a dead
+		 * device
+		 */
+		if (device->bus != pdev->bus)
+			continue;
+		agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (!agp)
+			continue;
+		pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
+		if (!(cmd & PCI_AGP_COMMAND_AGP))
+			continue;
+		dev_info(&pdev->dev, "disabling AGP on device %s\n",
+			 pci_name(device));
+		cmd &= ~PCI_AGP_COMMAND_AGP;
+		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
+	}
+
+	/* turn off AGP on the bridge */
+	agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
+	bridge->dev_private_data = (void *)(long)cmd;
+	if (cmd & PCI_AGP_COMMAND_AGP) {
+		dev_info(&pdev->dev, "disabling AGP on bridge\n");
+		cmd &= ~PCI_AGP_COMMAND_AGP;
+		pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
+	}
+	/* turn off the GART */
+	uninorth_cleanup();
+
+	return 0;
+}
+
+static int agp_uninorth_resume(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge;
+	u32 command;
+
+	bridge = agp_find_bridge(pdev);
+	if (bridge == NULL)
+		return -ENODEV;
+
+	command = (long)bridge->dev_private_data;
+	bridge->dev_private_data = NULL;
+	if (!(command & PCI_AGP_COMMAND_AGP))
+		return 0;
+
+	uninorth_agp_enable(bridge, command);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct {
+	struct page **pages_arr;
+} uninorth_priv;
+
+static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	char *table;
+	char *table_end;
+	int size;
+	int page_order;
+	int num_entries;
+	int i;
+	void *temp;
+	struct page *page;
+
+	/* We can't handle 2 level gatt's */
+	if (bridge->driver->size_type == LVL2_APER_SIZE)
+		return -EINVAL;
+
+	table = NULL;
+	i = bridge->aperture_size_idx;
+	temp = bridge->current_size;
+	size = page_order = num_entries = 0;
+
+	do {
+		size = A_SIZE_32(temp)->size;
+		page_order = A_SIZE_32(temp)->page_order;
+		num_entries = A_SIZE_32(temp)->num_entries;
+
+		table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+
+		if (table == NULL) {
+			i++;
+			bridge->current_size = A_IDX32(bridge);
+		} else {
+			bridge->aperture_size_idx = i;
+		}
+	} while (!table && (i < bridge->driver->num_aperture_sizes));
+
+	if (table == NULL)
+		return -ENOMEM;
+
+	uninorth_priv.pages_arr = kmalloc_array(1 << page_order,
+						sizeof(struct page *),
+						GFP_KERNEL);
+	if (uninorth_priv.pages_arr == NULL)
+		goto enomem;
+
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
+	     page++, i++) {
+		SetPageReserved(page);
+		uninorth_priv.pages_arr[i] = page;
+	}
+
+	bridge->gatt_table_real = (u32 *) table;
+	/* Need to clear out any dirty data still sitting in caches */
+	flush_dcache_range((unsigned long)table,
+			   (unsigned long)table_end + 1);
+	bridge->gatt_table = vmap(uninorth_priv.pages_arr, (1 << page_order), 0, PAGE_KERNEL_NCG);
+
+	if (bridge->gatt_table == NULL)
+		goto enomem;
+
+	bridge->gatt_bus_addr = virt_to_phys(table);
+
+	if (is_u3)
+		scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+	else
+		scratch_value =	cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+				0x1UL);
+	for (i = 0; i < num_entries; i++)
+		bridge->gatt_table[i] = scratch_value;
+
+	return 0;
+
+enomem:
+	kfree(uninorth_priv.pages_arr);
+	if (table)
+		free_pages((unsigned long)table, page_order);
+	return -ENOMEM;
+}
+
+static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	int page_order;
+	char *table, *table_end;
+	void *temp;
+	struct page *page;
+
+	temp = bridge->current_size;
+	page_order = A_SIZE_32(temp)->page_order;
+
+	/* Do not worry about freeing memory, because if this is
+	 * called, then all agp memory is deallocated and removed
+	 * from the table.
+	 */
+
+	vunmap(bridge->gatt_table);
+	kfree(uninorth_priv.pages_arr);
+	table = (char *) bridge->gatt_table_real;
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+		ClearPageReserved(page);
+
+	free_pages((unsigned long) bridge->gatt_table_real, page_order);
+
+	return 0;
+}
+
+static void null_cache_flush(void)
+{
+	mb();
+}
+
+/* Setup function */
+
+static const struct aper_size_info_32 uninorth_sizes[] =
+{
+	{256, 65536, 6, 64},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 16},
+	{32, 8192, 3, 8},
+	{16, 4096, 2, 4},
+	{8, 2048, 1, 2},
+	{4, 1024, 0, 1}
+};
+
+/*
+ * Not sure that u3 supports that high aperture sizes but it
+ * would strange if it did not :)
+ */
+static const struct aper_size_info_32 u3_sizes[] =
+{
+	{512, 131072, 7, 128},
+	{256, 65536, 6, 64},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 16},
+	{32, 8192, 3, 8},
+	{16, 4096, 2, 4},
+	{8, 2048, 1, 2},
+	{4, 1024, 0, 1}
+};
+
+const struct agp_bridge_driver uninorth_agp_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= (void *)uninorth_sizes,
+	.size_type		= U32_APER_SIZE,
+	.num_aperture_sizes	= ARRAY_SIZE(uninorth_sizes),
+	.configure		= uninorth_configure,
+	.fetch_size		= uninorth_fetch_size,
+	.cleanup		= uninorth_cleanup,
+	.tlb_flush		= uninorth_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.cache_flush		= null_cache_flush,
+	.agp_enable		= uninorth_agp_enable,
+	.create_gatt_table	= uninorth_create_gatt_table,
+	.free_gatt_table	= uninorth_free_gatt_table,
+	.insert_memory		= uninorth_insert_memory,
+	.remove_memory		= uninorth_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture	= true,
+	.needs_scratch_page	= true,
+};
+
+const struct agp_bridge_driver u3_agp_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= (void *)u3_sizes,
+	.size_type		= U32_APER_SIZE,
+	.num_aperture_sizes	= ARRAY_SIZE(u3_sizes),
+	.configure		= uninorth_configure,
+	.fetch_size		= uninorth_fetch_size,
+	.cleanup		= uninorth_cleanup,
+	.tlb_flush		= uninorth_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.cache_flush		= null_cache_flush,
+	.agp_enable		= uninorth_agp_enable,
+	.create_gatt_table	= uninorth_create_gatt_table,
+	.free_gatt_table	= uninorth_free_gatt_table,
+	.insert_memory		= uninorth_insert_memory,
+	.remove_memory		= uninorth_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+	.cant_use_aperture	= true,
+	.needs_scratch_page	= true,
+};
+
+static struct agp_device_ids uninorth_agp_device_ids[] = {
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_UNI_N_AGP,
+		.chipset_name	= "UniNorth",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
+		.chipset_name	= "UniNorth/Pangea",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
+		.chipset_name	= "UniNorth 1.5",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
+		.chipset_name	= "UniNorth 2",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_U3_AGP,
+		.chipset_name	= "U3",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_U3L_AGP,
+		.chipset_name	= "U3L",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_U3H_AGP,
+		.chipset_name	= "U3H",
+	},
+	{
+		.device_id	= PCI_DEVICE_ID_APPLE_IPID2_AGP,
+		.chipset_name	= "UniNorth/Intrepid2",
+	},
+};
+
+static int agp_uninorth_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	struct agp_device_ids *devs = uninorth_agp_device_ids;
+	struct agp_bridge_data *bridge;
+	struct device_node *uninorth_node;
+	u8 cap_ptr;
+	int j;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (cap_ptr == 0)
+		return -ENODEV;
+
+	/* probe for known chipsets */
+	for (j = 0; devs[j].chipset_name != NULL; ++j) {
+		if (pdev->device == devs[j].device_id) {
+			dev_info(&pdev->dev, "Apple %s chipset\n",
+				 devs[j].chipset_name);
+			goto found;
+		}
+	}
+
+	dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+
+ found:
+	/* Set revision to 0 if we could not read it. */
+	uninorth_rev = 0;
+	is_u3 = 0;
+	/* Locate core99 Uni-N */
+	uninorth_node = of_find_node_by_name(NULL, "uni-n");
+	/* Locate G5 u3 */
+	if (uninorth_node == NULL) {
+		is_u3 = 1;
+		uninorth_node = of_find_node_by_name(NULL, "u3");
+	}
+	if (uninorth_node) {
+		const int *revprop = of_get_property(uninorth_node,
+				"device-rev", NULL);
+		if (revprop != NULL)
+			uninorth_rev = *revprop & 0x3f;
+		of_node_put(uninorth_node);
+	}
+
+#ifdef CONFIG_PM
+	/* Inform platform of our suspend/resume caps */
+	pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume);
+#endif
+
+	/* Allocate & setup our driver */
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	if (is_u3)
+		bridge->driver = &u3_agp_driver;
+	else
+		bridge->driver = &uninorth_agp_driver;
+
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+	bridge->flags = AGP_ERRATA_FASTWRITES;
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_uninorth_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_PM
+	/* Inform platform of our suspend/resume caps */
+	pmac_register_agp_pm(pdev, NULL, NULL);
+#endif
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_uninorth_pci_table[] = {
+	{
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
+	.class_mask	= ~0,
+	.vendor		= PCI_VENDOR_ID_APPLE,
+	.device		= PCI_ANY_ID,
+	.subvendor	= PCI_ANY_ID,
+	.subdevice	= PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
+
+static struct pci_driver agp_uninorth_pci_driver = {
+	.name		= "agpgart-uninorth",
+	.id_table	= agp_uninorth_pci_table,
+	.probe		= agp_uninorth_probe,
+	.remove		= agp_uninorth_remove,
+};
+
+static int __init agp_uninorth_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_uninorth_pci_driver);
+}
+
+static void __exit agp_uninorth_cleanup(void)
+{
+	pci_unregister_driver(&agp_uninorth_pci_driver);
+}
+
+module_init(agp_uninorth_init);
+module_exit(agp_uninorth_cleanup);
+
+module_param(aperture, charp, 0);
+MODULE_PARM_DESC(aperture,
+		 "Aperture size, must be power of two between 4MB and an\n"
+		 "\t\tupper limit specific to the UniNorth revision.\n"
+		 "\t\tDefault: " DEFAULT_APERTURE_STRING "M");
+
+MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
new file mode 100644
index 0000000..a4961d3
--- /dev/null
+++ b/drivers/char/agp/via-agp.c
@@ -0,0 +1,598 @@
+/*
+ * VIA AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static const struct pci_device_id agp_via_pci_table[];
+
+#define VIA_GARTCTRL	0x80
+#define VIA_APSIZE	0x84
+#define VIA_ATTBASE	0x88
+
+#define VIA_AGP3_GARTCTRL	0x90
+#define VIA_AGP3_APSIZE		0x94
+#define VIA_AGP3_ATTBASE	0x98
+#define VIA_AGPSEL		0xfd
+
+static int via_fetch_size(void)
+{
+	int i;
+	u8 temp;
+	struct aper_size_info_8 *values;
+
+	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+	pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+			    agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
+	return 0;
+}
+
+
+static int via_configure(void)
+{
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge->current_size);
+	/* aperture size */
+	pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+			      current_size->size_value);
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* GART control register */
+	pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
+
+	/* attbase - aperture GATT base */
+	pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
+			    (agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
+	return 0;
+}
+
+
+static void via_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge->previous_size);
+	pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+			      previous_size->size_value);
+	/* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+	 * during reinitialization.
+	 */
+}
+
+
+static void via_tlbflush(struct agp_memory *mem)
+{
+	u32 temp;
+
+	pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
+	temp |= (1<<7);
+	pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+	temp &= ~(1<<7);
+	pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+}
+
+
+static const struct aper_size_info_8 via_generic_sizes[9] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 128},
+	{64, 16384, 4, 192},
+	{32, 8192, 3, 224},
+	{16, 4096, 2, 240},
+	{8, 2048, 1, 248},
+	{4, 1024, 0, 252},
+	{2, 512, 0, 254},
+	{1, 256, 0, 255}
+};
+
+
+static int via_fetch_size_agp3(void)
+{
+	int i;
+	u16 temp;
+	struct aper_size_info_16 *values;
+
+	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+	pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
+	temp &= 0xfff;
+
+	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge->previous_size =
+				agp_bridge->current_size = (void *) (values + i);
+			agp_bridge->aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	return 0;
+}
+
+
+static int via_configure_agp3(void)
+{
+	u32 temp;
+	struct aper_size_info_16 *current_size;
+
+	current_size = A_SIZE_16(agp_bridge->current_size);
+
+	/* address to map to */
+	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+						    AGP_APERTURE_BAR);
+
+	/* attbase - aperture GATT base */
+	pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
+		agp_bridge->gatt_bus_addr & 0xfffff000);
+
+	/* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
+	 *    translation table first.
+	 * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
+	 *    graphics AGP aperture for the AGP3.0 port.
+	 */
+	pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
+	return 0;
+}
+
+
+static void via_cleanup_agp3(void)
+{
+	struct aper_size_info_16 *previous_size;
+
+	previous_size = A_SIZE_16(agp_bridge->previous_size);
+	pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
+}
+
+
+static void via_tlbflush_agp3(struct agp_memory *mem)
+{
+	u32 temp;
+
+	pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+	pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
+	pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
+}
+
+
+static const struct agp_bridge_driver via_agp3_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= agp3_generic_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 10,
+	.needs_scratch_page	= true,
+	.configure		= via_configure_agp3,
+	.fetch_size		= via_fetch_size_agp3,
+	.cleanup		= via_cleanup_agp3,
+	.tlb_flush		= via_tlbflush_agp3,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver via_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= via_generic_sizes,
+	.size_type		= U8_APER_SIZE,
+	.num_aperture_sizes	= 9,
+	.needs_scratch_page	= true,
+	.configure		= via_configure,
+	.fetch_size		= via_fetch_size,
+	.cleanup		= via_cleanup,
+	.tlb_flush		= via_tlbflush,
+	.mask_memory		= agp_generic_mask_memory,
+	.masks			= NULL,
+	.agp_enable		= agp_generic_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= agp_generic_insert_memory,
+	.remove_memory		= agp_generic_remove_memory,
+	.alloc_by_type		= agp_generic_alloc_by_type,
+	.free_by_type		= agp_generic_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages	= agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages	= agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids via_agp_device_ids[] =
+{
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_82C597_0,
+		.chipset_name	= "Apollo VP3",
+	},
+
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_82C598_0,
+		.chipset_name	= "Apollo MVP3",
+	},
+
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8501_0,
+		.chipset_name	= "Apollo MVP4",
+	},
+
+	/* VT8601 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8601_0,
+		.chipset_name	= "Apollo ProMedia/PLE133Ta",
+	},
+
+	/* VT82C693A / VT28C694T */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_82C691_0,
+		.chipset_name	= "Apollo Pro 133",
+	},
+
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8371_0,
+		.chipset_name	= "KX133",
+	},
+
+	/* VT8633 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8633_0,
+		.chipset_name	= "Pro 266",
+	},
+
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_XN266,
+		.chipset_name	= "Apollo Pro266",
+	},
+
+	/* VT8361 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8361,
+		.chipset_name	= "KLE133",
+	},
+
+	/* VT8365 / VT8362 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8363_0,
+		.chipset_name	= "Twister-K/KT133x/KM133",
+	},
+
+	/* VT8753A */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8753_0,
+		.chipset_name	= "P4X266",
+	},
+
+	/* VT8366 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8367_0,
+		.chipset_name	= "KT266/KY266x/KT333",
+	},
+
+	/* VT8633 (for CuMine/ Celeron) */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8653_0,
+		.chipset_name	= "Pro266T",
+	},
+
+	/* KM266 / PM266 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_XM266,
+		.chipset_name	= "PM266/KM266",
+	},
+
+	/* CLE266 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_862X_0,
+		.chipset_name	= "CLE266",
+	},
+
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8377_0,
+		.chipset_name	= "KT400/KT400A/KT600",
+	},
+
+	/* VT8604 / VT8605 / VT8603
+	 * (Apollo Pro133A chipset with S3 Savage4) */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8605_0,
+		.chipset_name	= "ProSavage PM133/PL133/PN133"
+	},
+
+	/* P4M266x/P4N266 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8703_51_0,
+		.chipset_name	= "P4M266x/P4N266",
+	},
+
+	/* VT8754 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8754C_0,
+		.chipset_name	= "PT800",
+	},
+
+	/* P4X600 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8763_0,
+		.chipset_name	= "P4X600"
+	},
+
+	/* KM400 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8378_0,
+		.chipset_name	= "KM400/KM400A",
+	},
+
+	/* PT880 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_PT880,
+		.chipset_name	= "PT880",
+	},
+
+	/* PT880 Ultra */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_PT880ULTRA,
+		.chipset_name	= "PT880 Ultra",
+	},
+
+	/* PT890 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_8783_0,
+		.chipset_name	= "PT890",
+	},
+
+	/* PM800/PN800/PM880/PN880 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_PX8X0_0,
+		.chipset_name	= "PM800/PN800/PM880/PN880",
+	},
+	/* KT880 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_3269_0,
+		.chipset_name	= "KT880",
+	},
+	/* KTxxx/Px8xx */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_83_87XX_1,
+		.chipset_name	= "VT83xx/VT87xx/KTxxx/Px8xx",
+	},
+	/* P4M800 */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_3296_0,
+		.chipset_name	= "P4M800",
+	},
+	/* P4M800CE */
+	{
+		.device_id	= PCI_DEVICE_ID_VIA_P4M800CE,
+		.chipset_name	= "VT3314",
+	},
+	/* VT3324 / CX700 */
+	{
+		.device_id  = PCI_DEVICE_ID_VIA_VT3324,
+		.chipset_name   = "CX700",
+	},
+	/* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
+	 * architecture, the AGP resource and behavior are different from
+	 * the traditional AGP which resides only in chipset. AGP is used
+	 * by 3D driver which wasn't available for the VT3336 and VT3364
+	 * generation until now.  Unfortunately, by testing, VT3364 works
+	 * but VT3336 doesn't. - explanation from via, just leave this as
+	 * as a placeholder to avoid future patches adding it back in.
+	 */
+#if 0
+	{
+		.device_id  = PCI_DEVICE_ID_VIA_VT3336,
+		.chipset_name   = "VT3336",
+	},
+#endif
+	/* P4M890 */
+	{
+		.device_id  = PCI_DEVICE_ID_VIA_P4M890,
+		.chipset_name   = "P4M890",
+	},
+	/* P4M900 */
+	{
+		.device_id  = PCI_DEVICE_ID_VIA_VT3364,
+		.chipset_name   = "P4M900",
+	},
+	{ }, /* dummy final entry, always present */
+};
+
+
+/*
+ * VIA's AGP3 chipsets do magick to put the AGP bridge compliant
+ * with the same standards version as the graphics card.
+ */
+static void check_via_agp3 (struct agp_bridge_data *bridge)
+{
+	u8 reg;
+
+	pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
+	/* Check AGP 2.0 compatibility mode. */
+	if ((reg & (1<<1))==0)
+		bridge->driver = &via_agp3_driver;
+}
+
+
+static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct agp_device_ids *devs = via_agp_device_ids;
+	struct agp_bridge_data *bridge;
+	int j = 0;
+	u8 cap_ptr;
+
+	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+	if (!cap_ptr)
+		return -ENODEV;
+
+	j = ent - agp_via_pci_table;
+	printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
+
+	bridge = agp_alloc_bridge();
+	if (!bridge)
+		return -ENOMEM;
+
+	bridge->dev = pdev;
+	bridge->capndx = cap_ptr;
+	bridge->driver = &via_driver;
+
+	/*
+	 * Garg, there are KT400s with KT266 IDs.
+	 */
+	if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
+		/* Is there a KT400 subsystem ? */
+		if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
+			printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
+			check_via_agp3(bridge);
+		}
+	}
+
+	/* If this is an AGP3 bridge, check which mode its in and adjust. */
+	get_agp_version(bridge);
+	if (bridge->major_version >= 3)
+		check_via_agp3(bridge);
+
+	/* Fill in the mode register */
+	pci_read_config_dword(pdev,
+			bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+	pci_set_drvdata(pdev, bridge);
+	return agp_add_bridge(bridge);
+}
+
+static void agp_via_remove(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	agp_remove_bridge(bridge);
+	agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state (pdev);
+	pci_set_power_state (pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int agp_via_resume(struct pci_dev *pdev)
+{
+	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+	pci_set_power_state (pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (bridge->driver == &via_agp3_driver)
+		return via_configure_agp3();
+	else if (bridge->driver == &via_driver)
+		return via_configure();
+
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/* must be the same order as name table above */
+static const struct pci_device_id agp_via_pci_table[] = {
+#define ID(x) \
+	{						\
+	.class		= (PCI_CLASS_BRIDGE_HOST << 8),	\
+	.class_mask	= ~0,				\
+	.vendor		= PCI_VENDOR_ID_VIA,		\
+	.device		= x,				\
+	.subvendor	= PCI_ANY_ID,			\
+	.subdevice	= PCI_ANY_ID,			\
+	}
+	ID(PCI_DEVICE_ID_VIA_82C597_0),
+	ID(PCI_DEVICE_ID_VIA_82C598_0),
+	ID(PCI_DEVICE_ID_VIA_8501_0),
+	ID(PCI_DEVICE_ID_VIA_8601_0),
+	ID(PCI_DEVICE_ID_VIA_82C691_0),
+	ID(PCI_DEVICE_ID_VIA_8371_0),
+	ID(PCI_DEVICE_ID_VIA_8633_0),
+	ID(PCI_DEVICE_ID_VIA_XN266),
+	ID(PCI_DEVICE_ID_VIA_8361),
+	ID(PCI_DEVICE_ID_VIA_8363_0),
+	ID(PCI_DEVICE_ID_VIA_8753_0),
+	ID(PCI_DEVICE_ID_VIA_8367_0),
+	ID(PCI_DEVICE_ID_VIA_8653_0),
+	ID(PCI_DEVICE_ID_VIA_XM266),
+	ID(PCI_DEVICE_ID_VIA_862X_0),
+	ID(PCI_DEVICE_ID_VIA_8377_0),
+	ID(PCI_DEVICE_ID_VIA_8605_0),
+	ID(PCI_DEVICE_ID_VIA_8703_51_0),
+	ID(PCI_DEVICE_ID_VIA_8754C_0),
+	ID(PCI_DEVICE_ID_VIA_8763_0),
+	ID(PCI_DEVICE_ID_VIA_8378_0),
+	ID(PCI_DEVICE_ID_VIA_PT880),
+	ID(PCI_DEVICE_ID_VIA_PT880ULTRA),
+	ID(PCI_DEVICE_ID_VIA_8783_0),
+	ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+	ID(PCI_DEVICE_ID_VIA_3269_0),
+	ID(PCI_DEVICE_ID_VIA_83_87XX_1),
+	ID(PCI_DEVICE_ID_VIA_3296_0),
+	ID(PCI_DEVICE_ID_VIA_P4M800CE),
+	ID(PCI_DEVICE_ID_VIA_VT3324),
+	ID(PCI_DEVICE_ID_VIA_P4M890),
+	ID(PCI_DEVICE_ID_VIA_VT3364),
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+
+
+static struct pci_driver agp_via_pci_driver = {
+	.name		= "agpgart-via",
+	.id_table	= agp_via_pci_table,
+	.probe		= agp_via_probe,
+	.remove		= agp_via_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_via_suspend,
+	.resume		= agp_via_resume,
+#endif
+};
+
+
+static int __init agp_via_init(void)
+{
+	if (agp_off)
+		return -EINVAL;
+	return pci_register_driver(&agp_via_pci_driver);
+}
+
+static void __exit agp_via_cleanup(void)
+{
+	pci_unregister_driver(&agp_via_pci_driver);
+}
+
+module_init(agp_via_init);
+module_exit(agp_via_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
new file mode 100644
index 0000000..53436c0
--- /dev/null
+++ b/drivers/char/apm-emulation.c
@@ -0,0 +1,725 @@
+/*
+ * bios-less APM driver for ARM Linux
+ *  Jamey Hicks <jamey@crl.dec.com>
+ *  adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ *   Intel Corporation, Microsoft Corporation. Advanced Power Management
+ *   (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * This document is available from Microsoft at:
+ *    http://www.microsoft.com/whdc/archive/amp_12.mspx
+ */
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/apm-emulation.h>
+#include <linux/freezer.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+/*
+ * One option can be changed at boot time as follows:
+ *	apm=on/off			enable/disable APM
+ */
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS		16
+
+struct apm_queue {
+	unsigned int		event_head;
+	unsigned int		event_tail;
+	apm_event_t		events[APM_MAX_EVENTS];
+};
+
+/*
+ * thread states (for threads using a writable /dev/apm_bios fd):
+ *
+ * SUSPEND_NONE:	nothing happening
+ * SUSPEND_PENDING:	suspend event queued for thread and pending to be read
+ * SUSPEND_READ:	suspend event read, pending acknowledgement
+ * SUSPEND_ACKED:	acknowledgement received from thread (via ioctl),
+ *			waiting for resume
+ * SUSPEND_ACKTO:	acknowledgement timeout
+ * SUSPEND_DONE:	thread had acked suspend and is now notified of
+ *			resume
+ *
+ * SUSPEND_WAIT:	this thread invoked suspend and is waiting for resume
+ *
+ * A thread migrates in one of three paths:
+ *	NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
+ *				    -6-> ACKTO -7-> NONE
+ *	NONE -8-> WAIT -9-> NONE
+ *
+ * While in PENDING or READ, the thread is accounted for in the
+ * suspend_acks_pending counter.
+ *
+ * The transitions are invoked as follows:
+ *	1: suspend event is signalled from the core PM code
+ *	2: the suspend event is read from the fd by the userspace thread
+ *	3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
+ *	4: core PM code signals that we have resumed
+ *	5: APM_IOC_SUSPEND ioctl returns
+ *
+ *	6: the notifier invoked from the core PM code timed out waiting
+ *	   for all relevant threds to enter ACKED state and puts those
+ *	   that haven't into ACKTO
+ *	7: those threads issue APM_IOC_SUSPEND ioctl too late,
+ *	   get an error
+ *
+ *	8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
+ *	   ioctl code invokes pm_suspend()
+ *	9: pm_suspend() returns indicating resume
+ */
+enum apm_suspend_state {
+	SUSPEND_NONE,
+	SUSPEND_PENDING,
+	SUSPEND_READ,
+	SUSPEND_ACKED,
+	SUSPEND_ACKTO,
+	SUSPEND_WAIT,
+	SUSPEND_DONE,
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+	struct list_head	list;
+
+	unsigned int		suser: 1;
+	unsigned int		writer: 1;
+	unsigned int		reader: 1;
+
+	int			suspend_result;
+	enum apm_suspend_state	suspend_state;
+
+	struct apm_queue	queue;
+};
+
+/*
+ * Local variables
+ */
+static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
+static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
+static int apm_disabled;
+static struct task_struct *kapmd_tsk;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info.  kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+static DEFINE_MUTEX(state_lock);
+
+static const char driver_version[] = "1.13";	/* no spaces */
+
+
+
+/*
+ * Compatibility cruft until the IPAQ people move over to the new
+ * interface.
+ */
+static void __apm_get_power_status(struct apm_power_info *info)
+{
+}
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+EXPORT_SYMBOL(apm_get_power_status);
+
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+	return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+	q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+	return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+	q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+	if (q->event_head == q->event_tail) {
+		static int notified;
+
+		if (notified++ == 0)
+		    printk(KERN_ERR "apm: an event queue overflowed\n");
+		q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+	}
+	q->events[q->event_head] = event;
+}
+
+static void queue_event(apm_event_t event)
+{
+	struct apm_user *as;
+
+	down_read(&user_list_lock);
+	list_for_each_entry(as, &apm_user_list, list) {
+		if (as->reader)
+			queue_add_event(&as->queue, event);
+	}
+	up_read(&user_list_lock);
+	wake_up_interruptible(&apm_waitqueue);
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct apm_user *as = fp->private_data;
+	apm_event_t event;
+	int i = count, ret = 0;
+
+	if (count < sizeof(apm_event_t))
+		return -EINVAL;
+
+	if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+		return -EAGAIN;
+
+	wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+	while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+		event = queue_get_event(&as->queue);
+
+		ret = -EFAULT;
+		if (copy_to_user(buf, &event, sizeof(event)))
+			break;
+
+		mutex_lock(&state_lock);
+		if (as->suspend_state == SUSPEND_PENDING &&
+		    (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
+			as->suspend_state = SUSPEND_READ;
+		mutex_unlock(&state_lock);
+
+		buf += sizeof(event);
+		i -= sizeof(event);
+	}
+
+	if (i < count)
+		ret = count - i;
+
+	return ret;
+}
+
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
+{
+	struct apm_user *as = fp->private_data;
+
+	poll_wait(fp, &apm_waitqueue, wait);
+	return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ *   This IOCTL is overloaded, and performs two functions.  It is used to:
+ *     - initiate a suspend
+ *     - acknowledge a suspend read from /dev/apm_bios.
+ *   Only when everyone who has opened /dev/apm_bios with write permission
+ *   has acknowledge does the actual suspend happen.
+ */
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
+{
+	struct apm_user *as = filp->private_data;
+	int err = -EINVAL;
+
+	if (!as->suser || !as->writer)
+		return -EPERM;
+
+	switch (cmd) {
+	case APM_IOC_SUSPEND:
+		mutex_lock(&state_lock);
+
+		as->suspend_result = -EINTR;
+
+		switch (as->suspend_state) {
+		case SUSPEND_READ:
+			/*
+			 * If we read a suspend command from /dev/apm_bios,
+			 * then the corresponding APM_IOC_SUSPEND ioctl is
+			 * interpreted as an acknowledge.
+			 */
+			as->suspend_state = SUSPEND_ACKED;
+			atomic_dec(&suspend_acks_pending);
+			mutex_unlock(&state_lock);
+
+			/*
+			 * suspend_acks_pending changed, the notifier needs to
+			 * be woken up for this
+			 */
+			wake_up(&apm_suspend_waitqueue);
+
+			/*
+			 * Wait for the suspend/resume to complete.  If there
+			 * are pending acknowledges, we wait here for them.
+			 * wait_event_freezable() is interruptible and pending
+			 * signal can cause busy looping.  We aren't doing
+			 * anything critical, chill a bit on each iteration.
+			 */
+			while (wait_event_freezable(apm_suspend_waitqueue,
+					as->suspend_state != SUSPEND_ACKED))
+				msleep(10);
+			break;
+		case SUSPEND_ACKTO:
+			as->suspend_result = -ETIMEDOUT;
+			mutex_unlock(&state_lock);
+			break;
+		default:
+			as->suspend_state = SUSPEND_WAIT;
+			mutex_unlock(&state_lock);
+
+			/*
+			 * Otherwise it is a request to suspend the system.
+			 * Just invoke pm_suspend(), we'll handle it from
+			 * there via the notifier.
+			 */
+			as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
+		}
+
+		mutex_lock(&state_lock);
+		err = as->suspend_result;
+		as->suspend_state = SUSPEND_NONE;
+		mutex_unlock(&state_lock);
+		break;
+	}
+
+	return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+	struct apm_user *as = filp->private_data;
+
+	filp->private_data = NULL;
+
+	down_write(&user_list_lock);
+	list_del(&as->list);
+	up_write(&user_list_lock);
+
+	/*
+	 * We are now unhooked from the chain.  As far as new
+	 * events are concerned, we no longer exist.
+	 */
+	mutex_lock(&state_lock);
+	if (as->suspend_state == SUSPEND_PENDING ||
+	    as->suspend_state == SUSPEND_READ)
+		atomic_dec(&suspend_acks_pending);
+	mutex_unlock(&state_lock);
+
+	wake_up(&apm_suspend_waitqueue);
+
+	kfree(as);
+	return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+	struct apm_user *as;
+
+	as = kzalloc(sizeof(*as), GFP_KERNEL);
+	if (as) {
+		/*
+		 * XXX - this is a tiny bit broken, when we consider BSD
+		 * process accounting. If the device is opened by root, we
+		 * instantly flag that we used superuser privs. Who knows,
+		 * we might close the device immediately without doing a
+		 * privileged operation -- cevans
+		 */
+		as->suser = capable(CAP_SYS_ADMIN);
+		as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+		as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+		down_write(&user_list_lock);
+		list_add(&as->list, &apm_user_list);
+		up_write(&user_list_lock);
+
+		filp->private_data = as;
+	}
+
+	return as ? 0 : -ENOMEM;
+}
+
+static const struct file_operations apm_bios_fops = {
+	.owner		= THIS_MODULE,
+	.read		= apm_read,
+	.poll		= apm_poll,
+	.unlocked_ioctl	= apm_ioctl,
+	.open		= apm_open,
+	.release	= apm_release,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice apm_device = {
+	.minor		= APM_MINOR_DEV,
+	.name		= "apm_bios",
+	.fops		= &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ *   0) Linux driver version (this will change if format changes)
+ *   1) APM BIOS Version.  Usually 1.0, 1.1 or 1.2.
+ *   2) APM flags from APM Installation Check (0x00):
+ *	bit 0: APM_16_BIT_SUPPORT
+ *	bit 1: APM_32_BIT_SUPPORT
+ *	bit 2: APM_IDLE_SLOWS_CLOCK
+ *	bit 3: APM_BIOS_DISABLED
+ *	bit 4: APM_BIOS_DISENGAGED
+ *   3) AC line status
+ *	0x00: Off-line
+ *	0x01: On-line
+ *	0x02: On backup power (BIOS >= 1.1 only)
+ *	0xff: Unknown
+ *   4) Battery status
+ *	0x00: High
+ *	0x01: Low
+ *	0x02: Critical
+ *	0x03: Charging
+ *	0x04: Selected battery not present (BIOS >= 1.2 only)
+ *	0xff: Unknown
+ *   5) Battery flag
+ *	bit 0: High
+ *	bit 1: Low
+ *	bit 2: Critical
+ *	bit 3: Charging
+ *	bit 7: No system battery
+ *	0xff: Unknown
+ *   6) Remaining battery life (percentage of charge):
+ *	0-100: valid
+ *	-1: Unknown
+ *   7) Remaining battery life (time units):
+ *	Number of remaining minutes or seconds
+ *	-1: Unknown
+ *   8) min = minutes; sec = seconds
+ */
+static int proc_apm_show(struct seq_file *m, void *v)
+{
+	struct apm_power_info info;
+	char *units;
+
+	info.ac_line_status = 0xff;
+	info.battery_status = 0xff;
+	info.battery_flag   = 0xff;
+	info.battery_life   = -1;
+	info.time	    = -1;
+	info.units	    = -1;
+
+	if (apm_get_power_status)
+		apm_get_power_status(&info);
+
+	switch (info.units) {
+	default:	units = "?";	break;
+	case 0: 	units = "min";	break;
+	case 1: 	units = "sec";	break;
+	}
+
+	seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+		     driver_version, APM_32_BIT_SUPPORT,
+		     info.ac_line_status, info.battery_status,
+		     info.battery_flag, info.battery_life,
+		     info.time, units);
+
+	return 0;
+}
+#endif
+
+static int kapmd(void *arg)
+{
+	do {
+		apm_event_t event;
+
+		wait_event_interruptible(kapmd_wait,
+				!queue_empty(&kapmd_queue) || kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		spin_lock_irq(&kapmd_queue_lock);
+		event = 0;
+		if (!queue_empty(&kapmd_queue))
+			event = queue_get_event(&kapmd_queue);
+		spin_unlock_irq(&kapmd_queue_lock);
+
+		switch (event) {
+		case 0:
+			break;
+
+		case APM_LOW_BATTERY:
+		case APM_POWER_STATUS_CHANGE:
+			queue_event(event);
+			break;
+
+		case APM_USER_SUSPEND:
+		case APM_SYS_SUSPEND:
+			pm_suspend(PM_SUSPEND_MEM);
+			break;
+
+		case APM_CRITICAL_SUSPEND:
+			atomic_inc(&userspace_notification_inhibit);
+			pm_suspend(PM_SUSPEND_MEM);
+			atomic_dec(&userspace_notification_inhibit);
+			break;
+		}
+	} while (1);
+
+	return 0;
+}
+
+static int apm_suspend_notifier(struct notifier_block *nb,
+				unsigned long event,
+				void *dummy)
+{
+	struct apm_user *as;
+	int err;
+	unsigned long apm_event;
+
+	/* short-cut emergency suspends */
+	if (atomic_read(&userspace_notification_inhibit))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+	case PM_HIBERNATION_PREPARE:
+		apm_event = (event == PM_SUSPEND_PREPARE) ?
+			APM_USER_SUSPEND : APM_USER_HIBERNATION;
+		/*
+		 * Queue an event to all "writer" users that we want
+		 * to suspend and need their ack.
+		 */
+		mutex_lock(&state_lock);
+		down_read(&user_list_lock);
+
+		list_for_each_entry(as, &apm_user_list, list) {
+			if (as->suspend_state != SUSPEND_WAIT && as->reader &&
+			    as->writer && as->suser) {
+				as->suspend_state = SUSPEND_PENDING;
+				atomic_inc(&suspend_acks_pending);
+				queue_add_event(&as->queue, apm_event);
+			}
+		}
+
+		up_read(&user_list_lock);
+		mutex_unlock(&state_lock);
+		wake_up_interruptible(&apm_waitqueue);
+
+		/*
+		 * Wait for the the suspend_acks_pending variable to drop to
+		 * zero, meaning everybody acked the suspend event (or the
+		 * process was killed.)
+		 *
+		 * If the app won't answer within a short while we assume it
+		 * locked up and ignore it.
+		 */
+		err = wait_event_interruptible_timeout(
+			apm_suspend_waitqueue,
+			atomic_read(&suspend_acks_pending) == 0,
+			5*HZ);
+
+		/* timed out */
+		if (err == 0) {
+			/*
+			 * Move anybody who timed out to "ack timeout" state.
+			 *
+			 * We could time out and the userspace does the ACK
+			 * right after we time out but before we enter the
+			 * locked section here, but that's fine.
+			 */
+			mutex_lock(&state_lock);
+			down_read(&user_list_lock);
+			list_for_each_entry(as, &apm_user_list, list) {
+				if (as->suspend_state == SUSPEND_PENDING ||
+				    as->suspend_state == SUSPEND_READ) {
+					as->suspend_state = SUSPEND_ACKTO;
+					atomic_dec(&suspend_acks_pending);
+				}
+			}
+			up_read(&user_list_lock);
+			mutex_unlock(&state_lock);
+		}
+
+		/* let suspend proceed */
+		if (err >= 0)
+			return NOTIFY_OK;
+
+		/* interrupted by signal */
+		return notifier_from_errno(err);
+
+	case PM_POST_SUSPEND:
+	case PM_POST_HIBERNATION:
+		apm_event = (event == PM_POST_SUSPEND) ?
+			APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
+		/*
+		 * Anyone on the APM queues will think we're still suspended.
+		 * Send a message so everyone knows we're now awake again.
+		 */
+		queue_event(apm_event);
+
+		/*
+		 * Finally, wake up anyone who is sleeping on the suspend.
+		 */
+		mutex_lock(&state_lock);
+		down_read(&user_list_lock);
+		list_for_each_entry(as, &apm_user_list, list) {
+			if (as->suspend_state == SUSPEND_ACKED) {
+				/*
+				 * TODO: maybe grab error code, needs core
+				 * changes to push the error to the notifier
+				 * chain (could use the second parameter if
+				 * implemented)
+				 */
+				as->suspend_result = 0;
+				as->suspend_state = SUSPEND_DONE;
+			}
+		}
+		up_read(&user_list_lock);
+		mutex_unlock(&state_lock);
+
+		wake_up(&apm_suspend_waitqueue);
+		return NOTIFY_OK;
+
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block apm_notif_block = {
+	.notifier_call = apm_suspend_notifier,
+};
+
+static int __init apm_init(void)
+{
+	int ret;
+
+	if (apm_disabled) {
+		printk(KERN_NOTICE "apm: disabled on user request.\n");
+		return -ENODEV;
+	}
+
+	kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
+	if (IS_ERR(kapmd_tsk)) {
+		ret = PTR_ERR(kapmd_tsk);
+		kapmd_tsk = NULL;
+		goto out;
+	}
+	wake_up_process(kapmd_tsk);
+
+#ifdef CONFIG_PROC_FS
+	proc_create_single("apm", 0, NULL, proc_apm_show);
+#endif
+
+	ret = misc_register(&apm_device);
+	if (ret)
+		goto out_stop;
+
+	ret = register_pm_notifier(&apm_notif_block);
+	if (ret)
+		goto out_unregister;
+
+	return 0;
+
+ out_unregister:
+	misc_deregister(&apm_device);
+ out_stop:
+	remove_proc_entry("apm", NULL);
+	kthread_stop(kapmd_tsk);
+ out:
+	return ret;
+}
+
+static void __exit apm_exit(void)
+{
+	unregister_pm_notifier(&apm_notif_block);
+	misc_deregister(&apm_device);
+	remove_proc_entry("apm", NULL);
+
+	kthread_stop(kapmd_tsk);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init apm_setup(char *str)
+{
+	while ((str != NULL) && (*str != '\0')) {
+		if (strncmp(str, "off", 3) == 0)
+			apm_disabled = 1;
+		if (strncmp(str, "on", 2) == 0)
+			apm_disabled = 0;
+		str = strchr(str, ',');
+		if (str != NULL)
+			str += strspn(str, ", \t");
+	}
+	return 1;
+}
+
+__setup("apm=", apm_setup);
+#endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action.  Only a subset of events are handled:
+ *   %APM_LOW_BATTERY
+ *   %APM_POWER_STATUS_CHANGE
+ *   %APM_USER_SUSPEND
+ *   %APM_SYS_SUSPEND
+ *   %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kapmd_queue_lock, flags);
+	queue_add_event(&kapmd_queue, event);
+	spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+	wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
new file mode 100644
index 0000000..c0a5b1f
--- /dev/null
+++ b/drivers/char/applicom.c
@@ -0,0 +1,842 @@
+/* Derived from Applicom driver ac.c for SCO Unix                            */
+/* Ported by David Woodhouse, Axiom (Cambridge) Ltd.                         */
+/* dwmw2@infradead.org 30/8/98                                               */
+/* $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $			     */
+/* This module is for Linux 2.1 and 2.2 series kernels.                      */
+/*****************************************************************************/
+/* J PAGET 18/02/94 passage V2.4.2 ioctl avec code 2 reset to les interrupt  */
+/* ceci pour reseter correctement apres une sortie sauvage                   */
+/* J PAGET 02/05/94 passage V2.4.3 dans le traitement de d'interruption,     */
+/* LoopCount n'etait pas initialise a 0.                                     */
+/* F LAFORSE 04/07/95 version V2.6.0 lecture bidon apres acces a une carte   */
+/*           pour liberer le bus                                             */
+/* J.PAGET 19/11/95 version V2.6.1 Nombre, addresse,irq n'est plus configure */
+/* et passe en argument a acinit, mais est scrute sur le bus pour s'adapter  */
+/* au nombre de cartes presentes sur le bus. IOCL code 6 affichait V2.4.3    */
+/* F.LAFORSE 28/11/95 creation de fichiers acXX.o avec les differentes       */
+/* addresses de base des cartes, IOCTL 6 plus complet                         */
+/* J.PAGET le 19/08/96 copie de la version V2.6 en V2.8.0 sans modification  */
+/* de code autre que le texte V2.6.1 en V2.8.0                               */
+/*****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+
+#include "applicom.h"
+
+
+/* NOTE: We use for loops with {write,read}b() instead of 
+   memcpy_{from,to}io throughout this driver. This is because
+   the board doesn't correctly handle word accesses - only
+   bytes. 
+*/
+
+
+#undef DEBUG
+
+#define MAX_BOARD 8		/* maximum of pc board possible */
+#define MAX_ISA_BOARD 4
+#define LEN_RAM_IO 0x800
+#define AC_MINOR 157
+
+#ifndef PCI_VENDOR_ID_APPLICOM
+#define PCI_VENDOR_ID_APPLICOM                0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC     0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB     0x0003
+#endif
+
+static DEFINE_MUTEX(ac_mutex);
+static char *applicom_pci_devnames[] = {
+	"PCI board",
+	"PCI2000IBS / PCI2000CAN",
+	"PCI2000PFB"
+};
+
+static const struct pci_device_id applicom_pci_tbl[] = {
+	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
+	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
+	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, applicom_pci_tbl);
+
+MODULE_AUTHOR("David Woodhouse & Applicom International");
+MODULE_DESCRIPTION("Driver for Applicom Profibus card");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(AC_MINOR);
+
+MODULE_SUPPORTED_DEVICE("ac");
+
+
+static struct applicom_board {
+	unsigned long PhysIO;
+	void __iomem *RamIO;
+	wait_queue_head_t FlagSleepSend;
+	long irq;
+	spinlock_t mutex;
+} apbs[MAX_BOARD];
+
+static unsigned int irq = 0;	/* interrupt number IRQ       */
+static unsigned long mem = 0;	/* physical segment of board  */
+
+module_param_hw(irq, uint, irq, 0);
+MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
+module_param_hw(mem, ulong, iomem, 0);
+MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board");
+
+static unsigned int numboards;	/* number of installed boards */
+static volatile unsigned char Dummy;
+static DECLARE_WAIT_QUEUE_HEAD(FlagSleepRec);
+static unsigned int WriteErrorCount;	/* number of write error      */
+static unsigned int ReadErrorCount;	/* number of read error       */
+static unsigned int DeviceErrorCount;	/* number of device error     */
+
+static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
+static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
+static irqreturn_t ac_interrupt(int, void *);
+
+static const struct file_operations ac_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read = ac_read,
+	.write = ac_write,
+	.unlocked_ioctl = ac_ioctl,
+};
+
+static struct miscdevice ac_miscdev = {
+	AC_MINOR,
+	"ac",
+	&ac_fops
+};
+
+static int dummy;	/* dev_id for request_irq() */
+
+static int ac_register_board(unsigned long physloc, void __iomem *loc, 
+		      unsigned char boardno)
+{
+	volatile unsigned char byte_reset_it;
+
+	if((readb(loc + CONF_END_TEST)     != 0x00) ||
+	   (readb(loc + CONF_END_TEST + 1) != 0x55) ||
+	   (readb(loc + CONF_END_TEST + 2) != 0xAA) ||
+	   (readb(loc + CONF_END_TEST + 3) != 0xFF))
+		return 0;
+
+	if (!boardno)
+		boardno = readb(loc + NUMCARD_OWNER_TO_PC);
+
+	if (!boardno || boardno > MAX_BOARD) {
+		printk(KERN_WARNING "Board #%d (at 0x%lx) is out of range (1 <= x <= %d).\n",
+		       boardno, physloc, MAX_BOARD);
+		return 0;
+	}
+
+	if (apbs[boardno - 1].RamIO) {
+		printk(KERN_WARNING "Board #%d (at 0x%lx) conflicts with previous board #%d (at 0x%lx)\n", 
+		       boardno, physloc, boardno, apbs[boardno-1].PhysIO);
+		return 0;
+	}
+
+	boardno--;
+
+	apbs[boardno].PhysIO = physloc;
+	apbs[boardno].RamIO = loc;
+	init_waitqueue_head(&apbs[boardno].FlagSleepSend);
+	spin_lock_init(&apbs[boardno].mutex);
+	byte_reset_it = readb(loc + RAM_IT_TO_PC);
+
+	numboards++;
+	return boardno + 1;
+}
+
+static void __exit applicom_exit(void)
+{
+	unsigned int i;
+
+	misc_deregister(&ac_miscdev);
+
+	for (i = 0; i < MAX_BOARD; i++) {
+
+		if (!apbs[i].RamIO)
+			continue;
+
+		if (apbs[i].irq)
+			free_irq(apbs[i].irq, &dummy);
+
+		iounmap(apbs[i].RamIO);
+	}
+}
+
+static int __init applicom_init(void)
+{
+	int i, numisa = 0;
+	struct pci_dev *dev = NULL;
+	void __iomem *RamIO;
+	int boardno, ret;
+
+	printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
+
+	/* No mem and irq given - check for a PCI card */
+
+	while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) {
+
+		if (!pci_match_id(applicom_pci_tbl, dev))
+			continue;
+		
+		if (pci_enable_device(dev))
+			return -EIO;
+
+		RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+
+		if (!RamIO) {
+			printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
+				"space at 0x%llx\n",
+				(unsigned long long)pci_resource_start(dev, 0));
+			pci_disable_device(dev);
+			return -EIO;
+		}
+
+		printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n",
+		       applicom_pci_devnames[dev->device-1],
+			   (unsigned long long)pci_resource_start(dev, 0),
+		       dev->irq);
+
+		boardno = ac_register_board(pci_resource_start(dev, 0),
+				RamIO, 0);
+		if (!boardno) {
+			printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n");
+			iounmap(RamIO);
+			pci_disable_device(dev);
+			continue;
+		}
+
+		if (request_irq(dev->irq, &ac_interrupt, IRQF_SHARED, "Applicom PCI", &dummy)) {
+			printk(KERN_INFO "Could not allocate IRQ %d for PCI Applicom device.\n", dev->irq);
+			iounmap(RamIO);
+			pci_disable_device(dev);
+			apbs[boardno - 1].RamIO = NULL;
+			continue;
+		}
+
+		/* Enable interrupts. */
+
+		writeb(0x40, apbs[boardno - 1].RamIO + RAM_IT_FROM_PC);
+
+		apbs[boardno - 1].irq = dev->irq;
+	}
+
+	/* Finished with PCI cards. If none registered, 
+	 * and there was no mem/irq specified, exit */
+
+	if (!mem || !irq) {
+		if (numboards)
+			goto fin;
+		else {
+			printk(KERN_INFO "ac.o: No PCI boards found.\n");
+			printk(KERN_INFO "ac.o: For an ISA board you must supply memory and irq parameters.\n");
+			return -ENXIO;
+		}
+	}
+
+	/* Now try the specified ISA cards */
+
+	for (i = 0; i < MAX_ISA_BOARD; i++) {
+		RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+
+		if (!RamIO) {
+			printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
+			continue;
+		}
+
+		if (!(boardno = ac_register_board((unsigned long)mem+ (LEN_RAM_IO*i),
+						  RamIO,i+1))) {
+			iounmap(RamIO);
+			continue;
+		}
+
+		printk(KERN_NOTICE "Applicom ISA card found at mem 0x%lx, irq %d\n", mem + (LEN_RAM_IO*i), irq);
+
+		if (!numisa) {
+			if (request_irq(irq, &ac_interrupt, IRQF_SHARED, "Applicom ISA", &dummy)) {
+				printk(KERN_WARNING "Could not allocate IRQ %d for ISA Applicom device.\n", irq);
+				iounmap(RamIO);
+				apbs[boardno - 1].RamIO = NULL;
+			}
+			else
+				apbs[boardno - 1].irq = irq;
+		}
+		else
+			apbs[boardno - 1].irq = 0;
+
+		numisa++;
+	}
+
+	if (!numisa)
+		printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+				"at mem 0x%lx\n", mem);
+
+ fin:
+	init_waitqueue_head(&FlagSleepRec);
+
+	WriteErrorCount = 0;
+	ReadErrorCount = 0;
+	DeviceErrorCount = 0;
+
+	if (numboards) {
+		ret = misc_register(&ac_miscdev);
+		if (ret) {
+			printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+			goto out;
+		}
+		for (i = 0; i < MAX_BOARD; i++) {
+			int serial;
+			char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+			if (!apbs[i].RamIO)
+				continue;
+
+			for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+				boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+
+			boardname[serial] = 0;
+
+
+			printk(KERN_INFO "Applicom board %d: %s, PROM V%d.%d",
+			       i+1, boardname,
+			       (int)(readb(apbs[i].RamIO + VERS) >> 4),
+			       (int)(readb(apbs[i].RamIO + VERS) & 0xF));
+			
+			serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + 
+				(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + 
+				(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+			if (serial != 0)
+				printk(" S/N %d\n", serial);
+			else
+				printk("\n");
+		}
+		return 0;
+	}
+
+	else
+		return -ENXIO;
+
+out:
+	for (i = 0; i < MAX_BOARD; i++) {
+		if (!apbs[i].RamIO)
+			continue;
+		if (apbs[i].irq)
+			free_irq(apbs[i].irq, &dummy);
+		iounmap(apbs[i].RamIO);
+	}
+	return ret;
+}
+
+module_init(applicom_init);
+module_exit(applicom_exit);
+
+
+static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
+{
+	unsigned int NumCard;	/* Board number 1 -> 8           */
+	unsigned int IndexCard;	/* Index board number 0 -> 7     */
+	unsigned char TicCard;	/* Board TIC to send             */
+	unsigned long flags;	/* Current priority              */
+	struct st_ram_io st_loc;
+	struct mailbox tmpmailbox;
+#ifdef DEBUG
+	int c;
+#endif
+	DECLARE_WAITQUEUE(wait, current);
+
+	if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+		static int warncount = 5;
+		if (warncount) {
+			printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
+			       count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
+			warncount--;
+		}
+		return -EINVAL;
+	}
+
+	if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io))) 
+		return -EFAULT;
+	
+	if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
+			  sizeof(struct mailbox))) 
+		return -EFAULT;
+
+	NumCard = st_loc.num_card;	/* board number to send          */
+	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
+	IndexCard = NumCard - 1;
+
+	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+		return -EINVAL;
+
+#ifdef DEBUG
+	printk("Write to applicom card #%d. struct st_ram_io follows:",
+	       IndexCard+1);
+
+		for (c = 0; c < sizeof(struct st_ram_io);) {
+		
+			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);
+
+			for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+				printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
+			}
+		}
+
+		printk("\nstruct mailbox follows:");
+
+		for (c = 0; c < sizeof(struct mailbox);) {
+			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);
+
+			for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+				printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
+			}
+		}
+
+		printk("\n");
+#endif
+
+	spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+
+	/* Test octet ready correct */
+	if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) { 
+		Dummy = readb(apbs[IndexCard].RamIO + VERS);
+		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+		printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
+		       IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
+		DeviceErrorCount++;
+		return -EIO;
+	}
+	
+	/* Place ourselves on the wait queue */
+	set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+	/* Check whether the card is ready for us */
+	while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
+		Dummy = readb(apbs[IndexCard].RamIO + VERS);
+		/* It's busy. Sleep. */
+
+		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+		schedule();
+		if (signal_pending(current)) {
+			remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
+					  &wait);
+			return -EINTR;
+		}
+		spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+
+	/* We may not have actually slept */
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+	writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+	/* Which is best - lock down the pages with rawio and then
+	   copy directly, or use bounce buffers? For now we do the latter 
+	   because it works with 2.2 still */
+	{
+		unsigned char *from = (unsigned char *) &tmpmailbox;
+		void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
+		int c;
+
+		for (c = 0; c < sizeof(struct mailbox); c++)
+			writeb(*(from++), to++);
+	}
+
+	writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
+	writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
+	writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
+	writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+	writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+	writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+	Dummy = readb(apbs[IndexCard].RamIO + VERS);
+	spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+	return 0;
+}
+
+static int do_ac_read(int IndexCard, char __user *buf,
+		struct st_ram_io *st_loc, struct mailbox *mailbox)
+{
+	void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC;
+	unsigned char *to = (unsigned char *)mailbox;
+#ifdef DEBUG
+	int c;
+#endif
+
+	st_loc->tic_owner_to_pc = readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC);
+	st_loc->numcard_owner_to_pc = readb(apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+
+
+	{
+		int c;
+
+		for (c = 0; c < sizeof(struct mailbox); c++)
+			*(to++) = readb(from++);
+	}
+	writeb(1, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+	writeb(1, apbs[IndexCard].RamIO + TYP_ACK_FROM_PC);
+	writeb(IndexCard+1, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+	writeb(readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC), 
+	       apbs[IndexCard].RamIO + TIC_ACK_FROM_PC);
+	writeb(2, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+	writeb(0, apbs[IndexCard].RamIO + DATA_TO_PC_READY);
+	writeb(2, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+	Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+#ifdef DEBUG
+		printk("Read from applicom card #%d. struct st_ram_io follows:", NumCard);
+
+		for (c = 0; c < sizeof(struct st_ram_io);) {
+			printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]);
+
+			for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+				printk(" %2.2X", ((unsigned char *)st_loc)[c]);
+			}
+		}
+
+		printk("\nstruct mailbox follows:");
+
+		for (c = 0; c < sizeof(struct mailbox);) {
+			printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]);
+
+			for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+				printk(" %2.2X", ((unsigned char *)mailbox)[c]);
+			}
+		}
+		printk("\n");
+#endif
+	return (sizeof(struct st_ram_io) + sizeof(struct mailbox));
+}
+
+static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
+{
+	unsigned long flags;
+	unsigned int i;
+	unsigned char tmp;
+	int ret = 0;
+	DECLARE_WAITQUEUE(wait, current);
+#ifdef DEBUG
+	int loopcount=0;
+#endif
+	/* No need to ratelimit this. Only root can trigger it anyway */
+	if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+		printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
+			count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
+		return -EINVAL;
+	}
+	
+	while(1) {
+		/* Stick ourself on the wait queue */
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&FlagSleepRec, &wait);
+		
+		/* Scan each board, looking for one which has a packet for us */
+		for (i=0; i < MAX_BOARD; i++) {
+			if (!apbs[i].RamIO)
+				continue;
+			spin_lock_irqsave(&apbs[i].mutex, flags);
+			
+			tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);
+			
+			if (tmp == 2) {
+				struct st_ram_io st_loc;
+				struct mailbox mailbox;
+
+				/* Got a packet for us */
+				memset(&st_loc, 0, sizeof(st_loc));
+				ret = do_ac_read(i, buf, &st_loc, &mailbox);
+				spin_unlock_irqrestore(&apbs[i].mutex, flags);
+				set_current_state(TASK_RUNNING);
+				remove_wait_queue(&FlagSleepRec, &wait);
+
+				if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
+					return -EFAULT;
+				if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
+					return -EFAULT;
+				return tmp;
+			}
+			
+			if (tmp > 2) {
+				/* Got an error */
+				Dummy = readb(apbs[i].RamIO + VERS);
+				
+				spin_unlock_irqrestore(&apbs[i].mutex, flags);
+				set_current_state(TASK_RUNNING);
+				remove_wait_queue(&FlagSleepRec, &wait);
+				
+				printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
+				       i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+				DeviceErrorCount++;
+				return -EIO;
+			}
+			
+			/* Nothing for us. Try the next board */
+			Dummy = readb(apbs[i].RamIO + VERS);
+			spin_unlock_irqrestore(&apbs[i].mutex, flags);
+			
+		} /* per board */
+
+		/* OK - No boards had data for us. Sleep now */
+
+		schedule();
+		remove_wait_queue(&FlagSleepRec, &wait);
+
+		if (signal_pending(current))
+			return -EINTR;
+
+#ifdef DEBUG
+		if (loopcount++ > 2) {
+			printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
+		}
+#endif
+	} 
+}
+
+static irqreturn_t ac_interrupt(int vec, void *dev_instance)
+{
+	unsigned int i;
+	unsigned int FlagInt;
+	unsigned int LoopCount;
+	int handled = 0;
+
+	//    printk("Applicom interrupt on IRQ %d occurred\n", vec);
+
+	LoopCount = 0;
+
+	do {
+		FlagInt = 0;
+		for (i = 0; i < MAX_BOARD; i++) {
+			
+			/* Skip if this board doesn't exist */
+			if (!apbs[i].RamIO)
+				continue;
+
+			spin_lock(&apbs[i].mutex);
+
+			/* Skip if this board doesn't want attention */
+			if(readb(apbs[i].RamIO + RAM_IT_TO_PC) == 0) {
+				spin_unlock(&apbs[i].mutex);
+				continue;
+			}
+
+			handled = 1;
+			FlagInt = 1;
+			writeb(0, apbs[i].RamIO + RAM_IT_TO_PC);
+
+			if (readb(apbs[i].RamIO + DATA_TO_PC_READY) > 2) {
+				printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataToPcReady = %d\n",
+				       i+1,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+				DeviceErrorCount++;
+			}
+
+			if((readb(apbs[i].RamIO + DATA_FROM_PC_READY) > 2) && 
+			   (readb(apbs[i].RamIO + DATA_FROM_PC_READY) != 6)) {
+				
+				printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataFromPcReady = %d\n",
+				       i+1,(int)readb(apbs[i].RamIO + DATA_FROM_PC_READY));
+				DeviceErrorCount++;
+			}
+
+			if (readb(apbs[i].RamIO + DATA_TO_PC_READY) == 2) {	/* mailbox sent by the card ?   */
+				if (waitqueue_active(&FlagSleepRec)) {
+				wake_up_interruptible(&FlagSleepRec);
+			}
+			}
+
+			if (readb(apbs[i].RamIO + DATA_FROM_PC_READY) == 0) {	/* ram i/o free for write by pc ? */
+				if (waitqueue_active(&apbs[i].FlagSleepSend)) {	/* process sleep during read ?    */
+					wake_up_interruptible(&apbs[i].FlagSleepSend);
+				}
+			}
+			Dummy = readb(apbs[i].RamIO + VERS);
+
+			if(readb(apbs[i].RamIO + RAM_IT_TO_PC)) {
+				/* There's another int waiting on this card */
+				spin_unlock(&apbs[i].mutex);
+				i--;
+			} else {
+				spin_unlock(&apbs[i].mutex);
+			}
+		}
+		if (FlagInt)
+			LoopCount = 0;
+		else
+			LoopCount++;
+	} while(LoopCount < 2);
+	return IRQ_RETVAL(handled);
+}
+
+
+
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+     
+{				/* @ ADG ou ATO selon le cas */
+	int i;
+	unsigned char IndexCard;
+	void __iomem *pmem;
+	int ret = 0;
+	volatile unsigned char byte_reset_it;
+	struct st_ram_io *adgl;
+	void __user *argp = (void __user *)arg;
+
+	/* In general, the device is only openable by root anyway, so we're not
+	   particularly concerned that bogus ioctls can flood the console. */
+
+	adgl = memdup_user(argp, sizeof(struct st_ram_io));
+	if (IS_ERR(adgl))
+		return PTR_ERR(adgl);
+
+	mutex_lock(&ac_mutex);	
+	IndexCard = adgl->num_card-1;
+	 
+	if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+		static int warncount = 10;
+		if (warncount) {
+			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
+			warncount--;
+		}
+		kfree(adgl);
+		mutex_unlock(&ac_mutex);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+		
+	case 0:
+		pmem = apbs[IndexCard].RamIO;
+		for (i = 0; i < sizeof(struct st_ram_io); i++)
+			((unsigned char *)adgl)[i]=readb(pmem++);
+		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+			ret = -EFAULT;
+		break;
+	case 1:
+		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+		for (i = 0; i < 4; i++)
+			adgl->conf_end_test[i] = readb(pmem++);
+		for (i = 0; i < 2; i++)
+			adgl->error_code[i] = readb(pmem++);
+		for (i = 0; i < 4; i++)
+			adgl->parameter_error[i] = readb(pmem++);
+		pmem = apbs[IndexCard].RamIO + VERS;
+		adgl->vers = readb(pmem);
+		pmem = apbs[IndexCard].RamIO + TYPE_CARD;
+		for (i = 0; i < 20; i++)
+			adgl->reserv1[i] = readb(pmem++);
+		*(int *)&adgl->reserv1[20] =  
+			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) + 
+			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) + 
+			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );
+
+		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+			ret = -EFAULT;
+		break;
+	case 2:
+		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+		for (i = 0; i < 10; i++)
+			writeb(0xff, pmem++);
+		writeb(adgl->data_from_pc_ready, 
+		       apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+		
+		for (i = 0; i < MAX_BOARD; i++) {
+			if (apbs[i].RamIO) {
+				byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
+			}
+		}
+		break;
+	case 3:
+		pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
+		writeb(adgl->tic_des_from_pc, pmem);
+		break;
+	case 4:
+		pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
+		adgl->tic_owner_to_pc     = readb(pmem++);
+		adgl->numcard_owner_to_pc = readb(pmem);
+		if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
+			ret = -EFAULT;
+		break;
+	case 5:
+		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+		writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+		break;
+	case 6:
+		printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n");
+		printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
+		printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
+		printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
+		for (i = 0; i < MAX_BOARD; i++) {
+			int serial;
+			char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+			if (!apbs[i].RamIO)
+				continue;
+
+			for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+				boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+			boardname[serial] = 0;
+
+			printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
+			       i+1,
+			       (int)(readb(apbs[i].RamIO + VERS) >> 4),
+			       (int)(readb(apbs[i].RamIO + VERS) & 0xF),
+			       boardname);
+
+
+			serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + 
+				(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + 
+				(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+			if (serial != 0)
+				printk(" S/N %d\n", serial);
+			else
+				printk("\n");
+		}
+		if (DeviceErrorCount != 0)
+			printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
+		if (ReadErrorCount != 0)
+			printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
+		if (WriteErrorCount != 0)
+			printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
+		if (waitqueue_active(&FlagSleepRec))
+			printk(KERN_INFO "Process in read pending\n");
+		for (i = 0; i < MAX_BOARD; i++) {
+			if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
+				printk(KERN_INFO "Process in write pending board %d\n",i+1);
+		}
+		break;
+	default:
+		ret = -ENOTTY;
+		break;
+	}
+	Dummy = readb(apbs[IndexCard].RamIO + VERS);
+	kfree(adgl);
+	mutex_unlock(&ac_mutex);
+	return 0;
+}
+
diff --git a/drivers/char/applicom.h b/drivers/char/applicom.h
new file mode 100644
index 0000000..282e08f
--- /dev/null
+++ b/drivers/char/applicom.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* $Id: applicom.h,v 1.2 1999/08/28 15:09:49 dwmw2 Exp $ */
+
+
+#ifndef __LINUX_APPLICOM_H__
+#define __LINUX_APPLICOM_H__
+
+
+#define DATA_TO_PC_READY      0x00
+#define TIC_OWNER_TO_PC       0x01
+#define NUMCARD_OWNER_TO_PC   0x02
+#define TIC_DES_TO_PC         0x03
+#define NUMCARD_DES_TO_PC     0x04
+#define DATA_FROM_PC_READY    0x05
+#define TIC_OWNER_FROM_PC     0x06
+#define NUMCARD_OWNER_FROM_PC 0x07
+#define TIC_DES_FROM_PC       0x08
+#define NUMCARD_DES_FROM_PC   0x09
+#define ACK_FROM_PC_READY     0x0E
+#define TIC_ACK_FROM_PC       0x0F
+#define NUMCARD_ACK_FROM_PC   0x010
+#define TYP_ACK_FROM_PC       0x011
+#define CONF_END_TEST         0x012
+#define ERROR_CODE            0x016 
+#define PARAMETER_ERROR       0x018 
+#define VERS                  0x01E 
+#define RAM_TO_PC             0x040
+#define RAM_FROM_PC           0x0170
+#define TYPE_CARD             0x03C0
+#define SERIAL_NUMBER         0x03DA
+#define RAM_IT_FROM_PC        0x03FE
+#define RAM_IT_TO_PC          0x03FF
+
+struct mailbox{
+	u16  stjb_codef;		/* offset 00 */
+	s16  stjb_status;     		/* offset 02 */
+	u16  stjb_ticuser_root;		/* offset 04 */
+	u8   stjb_piduser[4];		/* offset 06 */
+	u16  stjb_mode;			/* offset 0A */
+	u16  stjb_time;			/* offset 0C */
+	u16  stjb_stop;			/* offset 0E */
+	u16  stjb_nfonc;		/* offset 10 */
+	u16  stjb_ncard;		/* offset 12 */
+	u16  stjb_nchan;		/* offset 14 */
+	u16  stjb_nes;			/* offset 16 */
+	u16  stjb_nb;			/* offset 18 */
+	u16  stjb_typvar;		/* offset 1A */
+	u32  stjb_adr;			/* offset 1C */
+	u16  stjb_ticuser_dispcyc;	/* offset 20 */
+	u16  stjb_ticuser_protocol;	/* offset 22 */
+	u8   stjb_filler[12];		/* offset 24 */
+	u8   stjb_data[256];		/* offset 30 */
+	};
+
+struct st_ram_io 
+{
+	unsigned char data_to_pc_ready;
+	unsigned char tic_owner_to_pc;
+	unsigned char numcard_owner_to_pc;
+	unsigned char tic_des_to_pc;
+	unsigned char numcard_des_to_pc;
+	unsigned char data_from_pc_ready;
+	unsigned char tic_owner_from_pc;
+	unsigned char numcard_owner_from_pc;
+	unsigned char tic_des_from_pc;
+	unsigned char numcard_des_from_pc;
+	unsigned char ack_to_pc_ready;
+	unsigned char tic_ack_to_pc;
+	unsigned char numcard_ack_to_pc;
+	unsigned char typ_ack_to_pc;
+	unsigned char ack_from_pc_ready;
+	unsigned char tic_ack_from_pc;
+	unsigned char numcard_ack_from_pc;
+	unsigned char typ_ack_from_pc;
+	unsigned char conf_end_test[4];
+	unsigned char error_code[2];
+	unsigned char parameter_error[4];
+	unsigned char time_base;
+	unsigned char nul_inc;
+	unsigned char vers;
+	unsigned char num_card;
+	unsigned char reserv1[32];
+};
+
+
+#endif /* __LINUX_APPLICOM_H__ */
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
new file mode 100644
index 0000000..a6cef54
--- /dev/null
+++ b/drivers/char/bsr.c
@@ -0,0 +1,361 @@
+/* IBM POWER Barrier Synchronization Register Driver
+ *
+ * Copyright IBM Corporation 2008
+ *
+ * Author: Sonny Rao <sonnyrao@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+/*
+ This driver exposes a special register which can be used for fast
+ synchronization across a large SMP machine.  The hardware is exposed
+ as an array of bytes where each process will write to one of the bytes to
+ indicate it has finished the current stage and this update is broadcast to
+ all processors without having to bounce a cacheline between them. In
+ POWER5 and POWER6 there is one of these registers per SMP,  but it is
+ presented in two forms; first, it is given as a whole and then as a number
+ of smaller registers which alias to parts of the single whole register.
+ This can potentially allow multiple groups of processes to each have their
+ own private synchronization device.
+
+ Note that this hardware *must* be written to using *only* single byte writes.
+ It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
+ this region is treated as cache-inhibited  processes should also use a
+ full sync before and after writing to the BSR to ensure all stores and
+ the BSR update have made it to all chips in the system
+*/
+
+/* This is arbitrary number, up to Power6 it's been 17 or fewer  */
+#define BSR_MAX_DEVS (32)
+
+struct bsr_dev {
+	u64      bsr_addr;     /* Real address */
+	u64      bsr_len;      /* length of mem region we can map */
+	unsigned bsr_bytes;    /* size of the BSR reg itself */
+	unsigned bsr_stride;   /* interval at which BSR repeats in the page */
+	unsigned bsr_type;     /* maps to enum below */
+	unsigned bsr_num;      /* bsr id number for its type */
+	int      bsr_minor;
+
+	struct list_head bsr_list;
+
+	dev_t    bsr_dev;
+	struct cdev bsr_cdev;
+	struct device *bsr_device;
+	char     bsr_name[32];
+
+};
+
+static unsigned total_bsr_devs;
+static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
+static struct class *bsr_class;
+static int bsr_major;
+
+enum {
+	BSR_8    = 0,
+	BSR_16   = 1,
+	BSR_64   = 2,
+	BSR_128  = 3,
+	BSR_4096 = 4,
+	BSR_UNKNOWN = 5,
+	BSR_MAX  = 6,
+};
+
+static unsigned bsr_types[BSR_MAX];
+
+static ssize_t
+bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
+}
+static DEVICE_ATTR_RO(bsr_size);
+
+static ssize_t
+bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
+}
+static DEVICE_ATTR_RO(bsr_stride);
+
+static ssize_t
+bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+	return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
+}
+static DEVICE_ATTR_RO(bsr_length);
+
+static struct attribute *bsr_dev_attrs[] = {
+	&dev_attr_bsr_size.attr,
+	&dev_attr_bsr_stride.attr,
+	&dev_attr_bsr_length.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(bsr_dev);
+
+static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long size   = vma->vm_end - vma->vm_start;
+	struct bsr_dev *dev = filp->private_data;
+	int ret;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	/* check for the case of a small BSR device and map one 4k page for it*/
+	if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+		ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+				   vma->vm_page_prot);
+	else if (size <= dev->bsr_len)
+		ret = io_remap_pfn_range(vma, vma->vm_start,
+					 dev->bsr_addr >> PAGE_SHIFT,
+					 size, vma->vm_page_prot);
+	else
+		return -EINVAL;
+
+	if (ret)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int bsr_open(struct inode * inode, struct file * filp)
+{
+	struct cdev *cdev = inode->i_cdev;
+	struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
+
+	filp->private_data = dev;
+	return 0;
+}
+
+static const struct file_operations bsr_fops = {
+	.owner = THIS_MODULE,
+	.mmap  = bsr_mmap,
+	.open  = bsr_open,
+	.llseek = noop_llseek,
+};
+
+static void bsr_cleanup_devs(void)
+{
+	struct bsr_dev *cur, *n;
+
+	list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
+		if (cur->bsr_device) {
+			cdev_del(&cur->bsr_cdev);
+			device_del(cur->bsr_device);
+		}
+		list_del(&cur->bsr_list);
+		kfree(cur);
+	}
+}
+
+static int bsr_add_node(struct device_node *bn)
+{
+	int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
+	const u32 *bsr_stride;
+	const u32 *bsr_bytes;
+	unsigned i;
+	int ret = -ENODEV;
+
+	bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
+	bsr_bytes  = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
+
+	if (!bsr_stride || !bsr_bytes ||
+	    (bsr_stride_len != bsr_bytes_len)) {
+		printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
+		return ret;
+	}
+
+	num_bsr_devs = bsr_bytes_len / sizeof(u32);
+
+	for (i = 0 ; i < num_bsr_devs; i++) {
+		struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
+					      GFP_KERNEL);
+		struct resource res;
+		int result;
+
+		if (!cur) {
+			printk(KERN_ERR "Unable to alloc bsr dev\n");
+			ret = -ENOMEM;
+			goto out_err;
+		}
+
+		result = of_address_to_resource(bn, i, &res);
+		if (result < 0) {
+			printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
+			kfree(cur);
+			continue;
+		}
+
+		cur->bsr_minor  = i + total_bsr_devs;
+		cur->bsr_addr   = res.start;
+		cur->bsr_len    = resource_size(&res);
+		cur->bsr_bytes  = bsr_bytes[i];
+		cur->bsr_stride = bsr_stride[i];
+		cur->bsr_dev    = MKDEV(bsr_major, i + total_bsr_devs);
+
+		/* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+		/* we can only map 4k of it, so only advertise the 4k in sysfs */
+		if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+			cur->bsr_len = 4096;
+
+		switch(cur->bsr_bytes) {
+		case 8:
+			cur->bsr_type = BSR_8;
+			break;
+		case 16:
+			cur->bsr_type = BSR_16;
+			break;
+		case 64:
+			cur->bsr_type = BSR_64;
+			break;
+		case 128:
+			cur->bsr_type = BSR_128;
+			break;
+		case 4096:
+			cur->bsr_type = BSR_4096;
+			break;
+		default:
+			cur->bsr_type = BSR_UNKNOWN;
+		}
+
+		cur->bsr_num = bsr_types[cur->bsr_type];
+		snprintf(cur->bsr_name, 32, "bsr%d_%d",
+			 cur->bsr_bytes, cur->bsr_num);
+
+		cdev_init(&cur->bsr_cdev, &bsr_fops);
+		result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
+		if (result) {
+			kfree(cur);
+			goto out_err;
+		}
+
+		cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
+						cur, "%s", cur->bsr_name);
+		if (IS_ERR(cur->bsr_device)) {
+			printk(KERN_ERR "device_create failed for %s\n",
+			       cur->bsr_name);
+			cdev_del(&cur->bsr_cdev);
+			kfree(cur);
+			goto out_err;
+		}
+
+		bsr_types[cur->bsr_type] = cur->bsr_num + 1;
+		list_add_tail(&cur->bsr_list, &bsr_devs);
+	}
+
+	total_bsr_devs += num_bsr_devs;
+
+	return 0;
+
+ out_err:
+
+	bsr_cleanup_devs();
+	return ret;
+}
+
+static int bsr_create_devs(struct device_node *bn)
+{
+	int ret;
+
+	while (bn) {
+		ret = bsr_add_node(bn);
+		if (ret) {
+			of_node_put(bn);
+			return ret;
+		}
+		bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
+	}
+	return 0;
+}
+
+static int __init bsr_init(void)
+{
+	struct device_node *np;
+	dev_t bsr_dev;
+	int ret = -ENODEV;
+
+	np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
+	if (!np)
+		goto out_err;
+
+	bsr_class = class_create(THIS_MODULE, "bsr");
+	if (IS_ERR(bsr_class)) {
+		printk(KERN_ERR "class_create() failed for bsr_class\n");
+		ret = PTR_ERR(bsr_class);
+		goto out_err_1;
+	}
+	bsr_class->dev_groups = bsr_dev_groups;
+
+	ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
+	bsr_major = MAJOR(bsr_dev);
+	if (ret < 0) {
+		printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
+		goto out_err_2;
+	}
+
+	if ((ret = bsr_create_devs(np)) < 0) {
+		np = NULL;
+		goto out_err_3;
+	}
+
+	return 0;
+
+ out_err_3:
+	unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
+
+ out_err_2:
+	class_destroy(bsr_class);
+
+ out_err_1:
+	of_node_put(np);
+
+ out_err:
+
+	return ret;
+}
+
+static void __exit  bsr_exit(void)
+{
+
+	bsr_cleanup_devs();
+
+	if (bsr_class)
+		class_destroy(bsr_class);
+
+	if (bsr_major)
+		unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
+}
+
+module_init(bsr_init);
+module_exit(bsr_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
new file mode 100644
index 0000000..a5ecf6d
--- /dev/null
+++ b/drivers/char/ds1620.c
@@ -0,0 +1,423 @@
+/*
+ * linux/drivers/char/ds1620.c: Dallas Semiconductors DS1620
+ *   thermometer driver (as used in the Rebel.com NetWinder)
+ */
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <linux/uaccess.h>
+#include <asm/therm.h>
+
+#ifdef CONFIG_PROC_FS
+/* define for /proc interface */
+#define THERM_USE_PROC
+#endif
+
+/* Definitions for DS1620 chip */
+#define THERM_START_CONVERT	0xee
+#define THERM_RESET		0xaf
+#define THERM_READ_CONFIG	0xac
+#define THERM_READ_TEMP		0xaa
+#define THERM_READ_TL		0xa2
+#define THERM_READ_TH		0xa1
+#define THERM_WRITE_CONFIG	0x0c
+#define THERM_WRITE_TL		0x02
+#define THERM_WRITE_TH		0x01
+
+#define CFG_CPU			2
+#define CFG_1SHOT		1
+
+static DEFINE_MUTEX(ds1620_mutex);
+static const char *fan_state[] = { "off", "on", "on (hardwired)" };
+
+/*
+ * Start of NetWinder specifics
+ *  Note!  We have to hold the gpio lock with IRQs disabled over the
+ *  whole of our transaction to the Dallas chip, since there is a
+ *  chance that the WaveArtist driver could touch these bits to
+ *  enable or disable the speaker.
+ */
+extern unsigned int system_rev;
+
+static inline void netwinder_ds1620_set_clk(int clk)
+{
+	nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0);
+}
+
+static inline void netwinder_ds1620_set_data(int dat)
+{
+	nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0);
+}
+
+static inline int netwinder_ds1620_get_data(void)
+{
+	return nw_gpio_read() & GPIO_DATA;
+}
+
+static inline void netwinder_ds1620_set_data_dir(int dir)
+{
+	nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0);
+}
+
+static inline void netwinder_ds1620_reset(void)
+{
+	nw_cpld_modify(CPLD_DS_ENABLE, 0);
+	nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE);
+}
+
+static inline void netwinder_lock(unsigned long *flags)
+{
+	raw_spin_lock_irqsave(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_unlock(unsigned long *flags)
+{
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_set_fan(int i)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+	nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+}
+
+static inline int netwinder_get_fan(void)
+{
+	if ((system_rev & 0xf000) == 0x4000)
+		return FAN_ALWAYS_ON;
+
+	return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF;
+}
+
+/*
+ * End of NetWinder specifics
+ */
+
+static void ds1620_send_bits(int nr, int value)
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		netwinder_ds1620_set_data(value & 1);
+		netwinder_ds1620_set_clk(0);
+		udelay(1);
+		netwinder_ds1620_set_clk(1);
+		udelay(1);
+
+		value >>= 1;
+	}
+}
+
+static unsigned int ds1620_recv_bits(int nr)
+{
+	unsigned int value = 0, mask = 1;
+	int i;
+
+	netwinder_ds1620_set_data(0);
+
+	for (i = 0; i < nr; i++) {
+		netwinder_ds1620_set_clk(0);
+		udelay(1);
+
+		if (netwinder_ds1620_get_data())
+			value |= mask;
+
+		mask <<= 1;
+
+		netwinder_ds1620_set_clk(1);
+		udelay(1);
+	}
+
+	return value;
+}
+
+static void ds1620_out(int cmd, int bits, int value)
+{
+	unsigned long flags;
+
+	netwinder_lock(&flags);
+	netwinder_ds1620_set_clk(1);
+	netwinder_ds1620_set_data_dir(0);
+	netwinder_ds1620_reset();
+
+	udelay(1);
+
+	ds1620_send_bits(8, cmd);
+	if (bits)
+		ds1620_send_bits(bits, value);
+
+	udelay(1);
+
+	netwinder_ds1620_reset();
+	netwinder_unlock(&flags);
+
+	msleep(20);
+}
+
+static unsigned int ds1620_in(int cmd, int bits)
+{
+	unsigned long flags;
+	unsigned int value;
+
+	netwinder_lock(&flags);
+	netwinder_ds1620_set_clk(1);
+	netwinder_ds1620_set_data_dir(0);
+	netwinder_ds1620_reset();
+
+	udelay(1);
+
+	ds1620_send_bits(8, cmd);
+
+	netwinder_ds1620_set_data_dir(1);
+	value = ds1620_recv_bits(bits);
+
+	netwinder_ds1620_reset();
+	netwinder_unlock(&flags);
+
+	return value;
+}
+
+static int cvt_9_to_int(unsigned int val)
+{
+	if (val & 0x100)
+		val |= 0xfffffe00;
+
+	return val;
+}
+
+static void ds1620_write_state(struct therm *therm)
+{
+	ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+	ds1620_out(THERM_WRITE_TL, 9, therm->lo);
+	ds1620_out(THERM_WRITE_TH, 9, therm->hi);
+	ds1620_out(THERM_START_CONVERT, 0, 0);
+}
+
+static void ds1620_read_state(struct therm *therm)
+{
+	therm->lo = cvt_9_to_int(ds1620_in(THERM_READ_TL, 9));
+	therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9));
+}
+
+static int ds1620_open(struct inode *inode, struct file *file)
+{
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t
+ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
+{
+	signed int cur_temp;
+	signed char cur_temp_degF;
+
+	cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
+
+	/* convert to Fahrenheit, as per wdt.c */
+	cur_temp_degF = (cur_temp * 9) / 5 + 32;
+
+	if (copy_to_user(buf, &cur_temp_degF, 1))
+		return -EFAULT;
+
+	return 1;
+}
+
+static int
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct therm therm;
+	union {
+		struct therm __user *therm;
+		int __user *i;
+	} uarg;
+	int i;
+
+	uarg.i = (int __user *)arg;
+
+	switch(cmd) {
+	case CMD_SET_THERMOSTATE:
+	case CMD_SET_THERMOSTATE2:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (cmd == CMD_SET_THERMOSTATE) {
+			if (get_user(therm.hi, uarg.i))
+				return -EFAULT;
+			therm.lo = therm.hi - 3;
+		} else {
+			if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
+				return -EFAULT;
+		}
+
+		therm.lo <<= 1;
+		therm.hi <<= 1;
+
+		ds1620_write_state(&therm);
+		break;
+
+	case CMD_GET_THERMOSTATE:
+	case CMD_GET_THERMOSTATE2:
+		ds1620_read_state(&therm);
+
+		therm.lo >>= 1;
+		therm.hi >>= 1;
+
+		if (cmd == CMD_GET_THERMOSTATE) {
+			if (put_user(therm.hi, uarg.i))
+				return -EFAULT;
+		} else {
+			if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
+				return -EFAULT;
+		}
+		break;
+
+	case CMD_GET_TEMPERATURE:
+	case CMD_GET_TEMPERATURE2:
+		i = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+		if (cmd == CMD_GET_TEMPERATURE)
+			i >>= 1;
+
+		return put_user(i, uarg.i) ? -EFAULT : 0;
+
+	case CMD_GET_STATUS:
+		i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
+
+		return put_user(i, uarg.i) ? -EFAULT : 0;
+
+	case CMD_GET_FAN:
+		i = netwinder_get_fan();
+
+		return put_user(i, uarg.i) ? -EFAULT : 0;
+
+	case CMD_SET_FAN:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (get_user(i, uarg.i))
+			return -EFAULT;
+
+		netwinder_set_fan(i);
+		break;
+		
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return 0;
+}
+
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&ds1620_mutex);
+	ret = ds1620_ioctl(file, cmd, arg);
+	mutex_unlock(&ds1620_mutex);
+
+	return ret;
+}
+
+#ifdef THERM_USE_PROC
+static int ds1620_proc_therm_show(struct seq_file *m, void *v)
+{
+	struct therm th;
+	int temp;
+
+	ds1620_read_state(&th);
+	temp =  cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+	seq_printf(m, "Thermostat: HI %i.%i, LOW %i.%i; temperature: %i.%i C, fan %s\n",
+		   th.hi >> 1, th.hi & 1 ? 5 : 0,
+		   th.lo >> 1, th.lo & 1 ? 5 : 0,
+		   temp  >> 1, temp  & 1 ? 5 : 0,
+		   fan_state[netwinder_get_fan()]);
+	return 0;
+}
+#endif
+
+static const struct file_operations ds1620_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ds1620_open,
+	.read		= ds1620_read,
+	.unlocked_ioctl	= ds1620_unlocked_ioctl,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice ds1620_miscdev = {
+	TEMP_MINOR,
+	"temp",
+	&ds1620_fops
+};
+
+static int __init ds1620_init(void)
+{
+	int ret;
+	struct therm th, th_start;
+
+	if (!machine_is_netwinder())
+		return -ENODEV;
+
+	ds1620_out(THERM_RESET, 0, 0);
+	ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+	ds1620_out(THERM_START_CONVERT, 0, 0);
+
+	/*
+	 * Trigger the fan to start by setting
+	 * temperature high point low.  This kicks
+	 * the fan into action.
+	 */
+	ds1620_read_state(&th);
+	th_start.lo = 0;
+	th_start.hi = 1;
+	ds1620_write_state(&th_start);
+
+	msleep(2000);
+
+	ds1620_write_state(&th);
+
+	ret = misc_register(&ds1620_miscdev);
+	if (ret < 0)
+		return ret;
+
+#ifdef THERM_USE_PROC
+	if (!proc_create_single("therm", 0, NULL, ds1620_proc_therm_show))
+		printk(KERN_ERR "therm: unable to register /proc/therm\n");
+#endif
+
+	ds1620_read_state(&th);
+	ret = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+	printk(KERN_INFO "Thermostat: high %i.%i, low %i.%i, "
+	       "current %i.%i C, fan %s.\n",
+	       th.hi >> 1, th.hi & 1 ? 5 : 0,
+	       th.lo >> 1, th.lo & 1 ? 5 : 0,
+	       ret   >> 1, ret   & 1 ? 5 : 0,
+	       fan_state[netwinder_get_fan()]);
+
+	return 0;
+}
+
+static void __exit ds1620_exit(void)
+{
+#ifdef THERM_USE_PROC
+	remove_proc_entry("therm", NULL);
+#endif
+	misc_deregister(&ds1620_miscdev);
+}
+
+module_init(ds1620_init);
+module_exit(ds1620_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
new file mode 100644
index 0000000..06749e2
--- /dev/null
+++ b/drivers/char/dsp56k.c
@@ -0,0 +1,534 @@
+/*
+ * The DSP56001 Device Driver, saviour of the Free World(tm)
+ *
+ * Authors: Fredrik Noring   <noring@nocrew.org>
+ *          lars brinkhoff   <lars@nocrew.org>
+ *          Tomas Berndtsson <tomas@nocrew.org>
+ *
+ * First version May 1996
+ *
+ * History:
+ *  97-01-29   Tomas Berndtsson,
+ *               Integrated with Linux 2.1.21 kernel sources.
+ *  97-02-15   Tomas Berndtsson,
+ *               Fixed for kernel 2.1.26
+ *
+ * BUGS:
+ *  Hmm... there must be something here :)
+ *
+ * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>	/* guess what */
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>	/* For put_user and get_user */
+
+#include <asm/atarihw.h>
+#include <asm/traps.h>
+
+#include <asm/dsp56k.h>
+
+/* minor devices */
+#define DSP56K_DEV_56001        0    /* The only device so far */
+
+#define TIMEOUT    10   /* Host port timeout in number of tries */
+#define MAXIO    2048   /* Maximum number of words before sleep */
+#define DSP56K_MAX_BINARY_LENGTH (3*64*1024)
+
+#define DSP56K_TX_INT_ON	dsp56k_host_interface.icr |=  DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_ON	dsp56k_host_interface.icr |=  DSP56K_ICR_RREQ
+#define DSP56K_TX_INT_OFF	dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_OFF	dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ
+
+#define DSP56K_TRANSMIT		(dsp56k_host_interface.isr & DSP56K_ISR_TXDE)
+#define DSP56K_RECEIVE		(dsp56k_host_interface.isr & DSP56K_ISR_RXDF)
+
+#define handshake(count, maxio, timeout, ENABLE, f) \
+{ \
+	long i, t, m; \
+	while (count > 0) { \
+		m = min_t(unsigned long, count, maxio); \
+		for (i = 0; i < m; i++) { \
+			for (t = 0; t < timeout && !ENABLE; t++) \
+				msleep(20); \
+			if(!ENABLE) \
+				return -EIO; \
+			f; \
+		} \
+		count -= m; \
+		if (m == maxio) msleep(20); \
+	} \
+}
+
+#define tx_wait(n) \
+{ \
+	int t; \
+	for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \
+		msleep(10); \
+	if(!DSP56K_TRANSMIT) { \
+		return -EIO; \
+	} \
+}
+
+#define rx_wait(n) \
+{ \
+	int t; \
+	for(t = 0; t < n && !DSP56K_RECEIVE; t++) \
+		msleep(10); \
+	if(!DSP56K_RECEIVE) { \
+		return -EIO; \
+	} \
+}
+
+static DEFINE_MUTEX(dsp56k_mutex);
+static struct dsp56k_device {
+	unsigned long in_use;
+	long maxio, timeout;
+	int tx_wsize, rx_wsize;
+} dsp56k;
+
+static struct class *dsp56k_class;
+
+static int dsp56k_reset(void)
+{
+	u_char status;
+	
+	/* Power down the DSP */
+	sound_ym.rd_data_reg_sel = 14;
+	status = sound_ym.rd_data_reg_sel & 0xef;
+	sound_ym.wd_data = status;
+	sound_ym.wd_data = status | 0x10;
+  
+	udelay(10);
+  
+	/* Power up the DSP */
+	sound_ym.rd_data_reg_sel = 14;
+	sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef;
+
+	return 0;
+}
+
+static int dsp56k_upload(u_char __user *bin, int len)
+{
+	struct platform_device *pdev;
+	const struct firmware *fw;
+	const char fw_name[] = "dsp56k/bootstrap.bin";
+	int err;
+	int i;
+
+	dsp56k_reset();
+
+	pdev = platform_device_register_simple("dsp56k", 0, NULL, 0);
+	if (IS_ERR(pdev)) {
+		printk(KERN_ERR "Failed to register device for \"%s\"\n",
+		       fw_name);
+		return -EINVAL;
+	}
+	err = request_firmware(&fw, fw_name, &pdev->dev);
+	platform_device_unregister(pdev);
+	if (err) {
+		printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+		       fw_name, err);
+		return err;
+	}
+	if (fw->size % 3) {
+		printk(KERN_ERR "Bogus length %d in image \"%s\"\n",
+		       fw->size, fw_name);
+		release_firmware(fw);
+		return -EINVAL;
+	}
+	for (i = 0; i < fw->size; i = i + 3) {
+		/* tx_wait(10); */
+		dsp56k_host_interface.data.b[1] = fw->data[i];
+		dsp56k_host_interface.data.b[2] = fw->data[i + 1];
+		dsp56k_host_interface.data.b[3] = fw->data[i + 2];
+	}
+	release_firmware(fw);
+	for (; i < 512; i++) {
+		/* tx_wait(10); */
+		dsp56k_host_interface.data.b[1] = 0;
+		dsp56k_host_interface.data.b[2] = 0;
+		dsp56k_host_interface.data.b[3] = 0;
+	}
+  
+	for (i = 0; i < len; i++) {
+		tx_wait(10);
+		get_user(dsp56k_host_interface.data.b[1], bin++);
+		get_user(dsp56k_host_interface.data.b[2], bin++);
+		get_user(dsp56k_host_interface.data.b[3], bin++);
+	}
+
+	tx_wait(10);
+	dsp56k_host_interface.data.l = 3;    /* Magic execute */
+
+	return 0;
+}
+
+static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
+			   loff_t *ppos)
+{
+	struct inode *inode = file_inode(file);
+	int dev = iminor(inode) & 0x0f;
+
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+	{
+
+		long n;
+
+		/* Don't do anything if nothing is to be done */
+		if (!count) return 0;
+
+		n = 0;
+		switch (dsp56k.rx_wsize) {
+		case 1:  /* 8 bit */
+		{
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+				  put_user(dsp56k_host_interface.data.b[3], buf+n++));
+			return n;
+		}
+		case 2:  /* 16 bit */
+		{
+			short __user *data;
+
+			count /= 2;
+			data = (short __user *) buf;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+				  put_user(dsp56k_host_interface.data.w[1], data+n++));
+			return 2*n;
+		}
+		case 3:  /* 24 bit */
+		{
+			count /= 3;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+				  put_user(dsp56k_host_interface.data.b[1], buf+n++);
+				  put_user(dsp56k_host_interface.data.b[2], buf+n++);
+				  put_user(dsp56k_host_interface.data.b[3], buf+n++));
+			return 3*n;
+		}
+		case 4:  /* 32 bit */
+		{
+			long __user *data;
+
+			count /= 4;
+			data = (long __user *) buf;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+				  put_user(dsp56k_host_interface.data.l, data+n++));
+			return 4*n;
+		}
+		}
+		return -EFAULT;
+	}
+
+	default:
+		printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+		return -ENXIO;
+	}
+}
+
+static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
+			    loff_t *ppos)
+{
+	struct inode *inode = file_inode(file);
+	int dev = iminor(inode) & 0x0f;
+
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+	{
+		long n;
+
+		/* Don't do anything if nothing is to be done */
+		if (!count) return 0;
+
+		n = 0;
+		switch (dsp56k.tx_wsize) {
+		case 1:  /* 8 bit */
+		{
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+				  get_user(dsp56k_host_interface.data.b[3], buf+n++));
+			return n;
+		}
+		case 2:  /* 16 bit */
+		{
+			const short __user *data;
+
+			count /= 2;
+			data = (const short __user *)buf;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+				  get_user(dsp56k_host_interface.data.w[1], data+n++));
+			return 2*n;
+		}
+		case 3:  /* 24 bit */
+		{
+			count /= 3;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+				  get_user(dsp56k_host_interface.data.b[1], buf+n++);
+				  get_user(dsp56k_host_interface.data.b[2], buf+n++);
+				  get_user(dsp56k_host_interface.data.b[3], buf+n++));
+			return 3*n;
+		}
+		case 4:  /* 32 bit */
+		{
+			const long __user *data;
+
+			count /= 4;
+			data = (const long __user *)buf;
+			handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+				  get_user(dsp56k_host_interface.data.l, data+n++));
+			return 4*n;
+		}
+		}
+
+		return -EFAULT;
+	}
+	default:
+		printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+		return -ENXIO;
+	}
+}
+
+static long dsp56k_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg)
+{
+	int dev = iminor(file_inode(file)) & 0x0f;
+	void __user *argp = (void __user *)arg;
+
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+
+		switch(cmd) {
+		case DSP56K_UPLOAD:
+		{
+			char __user *bin;
+			int r, len;
+			struct dsp56k_upload __user *binary = argp;
+    
+			if(get_user(len, &binary->len) < 0)
+				return -EFAULT;
+			if(get_user(bin, &binary->bin) < 0)
+				return -EFAULT;
+		
+			if (len <= 0) {
+				return -EINVAL;      /* nothing to upload?!? */
+			}
+			if (len > DSP56K_MAX_BINARY_LENGTH) {
+				return -EINVAL;
+			}
+			mutex_lock(&dsp56k_mutex);
+			r = dsp56k_upload(bin, len);
+			mutex_unlock(&dsp56k_mutex);
+			if (r < 0) {
+				return r;
+			}
+    
+			break;
+		}
+		case DSP56K_SET_TX_WSIZE:
+			if (arg > 4 || arg < 1)
+				return -EINVAL;
+			mutex_lock(&dsp56k_mutex);
+			dsp56k.tx_wsize = (int) arg;
+			mutex_unlock(&dsp56k_mutex);
+			break;
+		case DSP56K_SET_RX_WSIZE:
+			if (arg > 4 || arg < 1)
+				return -EINVAL;
+			mutex_lock(&dsp56k_mutex);
+			dsp56k.rx_wsize = (int) arg;
+			mutex_unlock(&dsp56k_mutex);
+			break;
+		case DSP56K_HOST_FLAGS:
+		{
+			int dir, out, status;
+			struct dsp56k_host_flags __user *hf = argp;
+    
+			if(get_user(dir, &hf->dir) < 0)
+				return -EFAULT;
+			if(get_user(out, &hf->out) < 0)
+				return -EFAULT;
+
+			mutex_lock(&dsp56k_mutex);
+			if ((dir & 0x1) && (out & 0x1))
+				dsp56k_host_interface.icr |= DSP56K_ICR_HF0;
+			else if (dir & 0x1)
+				dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+			if ((dir & 0x2) && (out & 0x2))
+				dsp56k_host_interface.icr |= DSP56K_ICR_HF1;
+			else if (dir & 0x2)
+				dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+			status = 0;
+			if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1;
+			if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2;
+			if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4;
+			if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8;
+			mutex_unlock(&dsp56k_mutex);
+			return put_user(status, &hf->status);
+		}
+		case DSP56K_HOST_CMD:
+			if (arg > 31)
+				return -EINVAL;
+			mutex_lock(&dsp56k_mutex);
+			dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
+							     DSP56K_CVR_HC);
+			mutex_unlock(&dsp56k_mutex);
+			break;
+		default:
+			return -EINVAL;
+		}
+		return 0;
+
+	default:
+		printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+		return -ENXIO;
+	}
+}
+
+/* As of 2.1.26 this should be dsp56k_poll,
+ * but how do I then check device minor number?
+ * Do I need this function at all???
+ */
+#if 0
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
+{
+	int dev = iminor(file_inode(file)) & 0x0f;
+
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+		/* poll_wait(file, ???, wait); */
+		return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+
+	default:
+		printk("DSP56k driver: Unknown minor device: %d\n", dev);
+		return 0;
+	}
+}
+#endif
+
+static int dsp56k_open(struct inode *inode, struct file *file)
+{
+	int dev = iminor(inode) & 0x0f;
+	int ret = 0;
+
+	mutex_lock(&dsp56k_mutex);
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+
+		if (test_and_set_bit(0, &dsp56k.in_use)) {
+			ret = -EBUSY;
+			goto out;
+		}
+
+		dsp56k.timeout = TIMEOUT;
+		dsp56k.maxio = MAXIO;
+		dsp56k.rx_wsize = dsp56k.tx_wsize = 4; 
+
+		DSP56K_TX_INT_OFF;
+		DSP56K_RX_INT_OFF;
+
+		/* Zero host flags */
+		dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+		dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+		break;
+
+	default:
+		ret = -ENODEV;
+	}
+out:
+	mutex_unlock(&dsp56k_mutex);
+	return ret;
+}
+
+static int dsp56k_release(struct inode *inode, struct file *file)
+{
+	int dev = iminor(inode) & 0x0f;
+
+	switch(dev)
+	{
+	case DSP56K_DEV_56001:
+		clear_bit(0, &dsp56k.in_use);
+		break;
+	default:
+		printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static const struct file_operations dsp56k_fops = {
+	.owner		= THIS_MODULE,
+	.read		= dsp56k_read,
+	.write		= dsp56k_write,
+	.unlocked_ioctl	= dsp56k_ioctl,
+	.open		= dsp56k_open,
+	.release	= dsp56k_release,
+	.llseek		= noop_llseek,
+};
+
+
+/****** Init and module functions ******/
+
+static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n";
+
+static int __init dsp56k_init_driver(void)
+{
+	int err = 0;
+
+	if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) {
+		printk("DSP56k driver: Hardware not present\n");
+		return -ENODEV;
+	}
+
+	if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) {
+		printk("DSP56k driver: Unable to register driver\n");
+		return -ENODEV;
+	}
+	dsp56k_class = class_create(THIS_MODULE, "dsp56k");
+	if (IS_ERR(dsp56k_class)) {
+		err = PTR_ERR(dsp56k_class);
+		goto out_chrdev;
+	}
+	device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL,
+		      "dsp56k");
+
+	printk(banner);
+	goto out;
+
+out_chrdev:
+	unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+out:
+	return err;
+}
+module_init(dsp56k_init_driver);
+
+static void __exit dsp56k_cleanup_driver(void)
+{
+	device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
+	class_destroy(dsp56k_class);
+	unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+}
+module_exit(dsp56k_cleanup_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("dsp56k/bootstrap.bin");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
new file mode 100644
index 0000000..f882460
--- /dev/null
+++ b/drivers/char/dtlk.c
@@ -0,0 +1,663 @@
+/*                                              -*- linux-c -*-
+ * dtlk.c - DoubleTalk PC driver for Linux
+ *
+ * Original author: Chris Pallotta <chris@allmedia.com>
+ * Current maintainer: Jim Van Zandt <jrv@vanzandt.mv.com>
+ * 
+ * 2000-03-18 Jim Van Zandt: Fix polling.
+ *  Eliminate dtlk_timer_active flag and separate dtlk_stop_timer
+ *  function.  Don't restart timer in dtlk_timer_tick.  Restart timer
+ *  in dtlk_poll after every poll.  dtlk_poll returns mask (duh).
+ *  Eliminate unused function dtlk_write_byte.  Misc. code cleanups.
+ */
+
+/* This driver is for the DoubleTalk PC, a speech synthesizer
+   manufactured by RC Systems (http://www.rcsys.com/).  It was written
+   based on documentation in their User's Manual file and Developer's
+   Tools disk.
+
+   The DoubleTalk PC contains four voice synthesizers: text-to-speech
+   (TTS), linear predictive coding (LPC), PCM/ADPCM, and CVSD.  It
+   also has a tone generator.  Output data for LPC are written to the
+   LPC port, and output data for the other modes are written to the
+   TTS port.
+
+   Two kinds of data can be read from the DoubleTalk: status
+   information (in response to the "\001?" interrogation command) is
+   read from the TTS port, and index markers (which mark the progress
+   of the speech) are read from the LPC port.  Not all models of the
+   DoubleTalk PC implement index markers.  Both the TTS and LPC ports
+   can also display status flags.
+
+   The DoubleTalk PC generates no interrupts.
+
+   These characteristics are mapped into the Unix stream I/O model as
+   follows:
+
+   "write" sends bytes to the TTS port.  It is the responsibility of
+   the user program to switch modes among TTS, PCM/ADPCM, and CVSD.
+   This driver was written for use with the text-to-speech
+   synthesizer.  If LPC output is needed some day, other minor device
+   numbers can be used to select among output modes.
+
+   "read" gets index markers from the LPC port.  If the device does
+   not implement index markers, the read will fail with error EINVAL.
+
+   Status information is available using the DTLK_INTERROGATE ioctl.
+
+ */
+
+#include <linux/module.h>
+
+#define KERNEL
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/errno.h>	/* for -EBUSY */
+#include <linux/ioport.h>	/* for request_region */
+#include <linux/delay.h>	/* for loops_per_jiffy */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <asm/io.h>		/* for inb_p, outb_p, inb, outb, etc. */
+#include <linux/uaccess.h>	/* for get_user, etc. */
+#include <linux/wait.h>		/* for wait_queue */
+#include <linux/init.h>		/* for __init, module_{init,exit} */
+#include <linux/poll.h>		/* for EPOLLIN, etc. */
+#include <linux/dtlk.h>		/* local header file for DoubleTalk values */
+
+#ifdef TRACING
+#define TRACE_TEXT(str) printk(str);
+#define TRACE_RET printk(")")
+#else				/* !TRACING */
+#define TRACE_TEXT(str) ((void) 0)
+#define TRACE_RET ((void) 0)
+#endif				/* TRACING */
+
+static DEFINE_MUTEX(dtlk_mutex);
+static void dtlk_timer_tick(struct timer_list *unused);
+
+static int dtlk_major;
+static int dtlk_port_lpc;
+static int dtlk_port_tts;
+static int dtlk_busy;
+static int dtlk_has_indexing;
+static unsigned int dtlk_portlist[] =
+{0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0};
+static wait_queue_head_t dtlk_process_list;
+static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick);
+
+/* prototypes for file_operations struct */
+static ssize_t dtlk_read(struct file *, char __user *,
+			 size_t nbytes, loff_t * ppos);
+static ssize_t dtlk_write(struct file *, const char __user *,
+			  size_t nbytes, loff_t * ppos);
+static __poll_t dtlk_poll(struct file *, poll_table *);
+static int dtlk_open(struct inode *, struct file *);
+static int dtlk_release(struct inode *, struct file *);
+static long dtlk_ioctl(struct file *file,
+		       unsigned int cmd, unsigned long arg);
+
+static const struct file_operations dtlk_fops =
+{
+	.owner		= THIS_MODULE,
+	.read		= dtlk_read,
+	.write		= dtlk_write,
+	.poll		= dtlk_poll,
+	.unlocked_ioctl	= dtlk_ioctl,
+	.open		= dtlk_open,
+	.release	= dtlk_release,
+	.llseek		= no_llseek,
+};
+
+/* local prototypes */
+static int dtlk_dev_probe(void);
+static struct dtlk_settings *dtlk_interrogate(void);
+static int dtlk_readable(void);
+static char dtlk_read_lpc(void);
+static char dtlk_read_tts(void);
+static int dtlk_writeable(void);
+static char dtlk_write_bytes(const char *buf, int n);
+static char dtlk_write_tts(char);
+/*
+   static void dtlk_handle_error(char, char, unsigned int);
+ */
+
+static ssize_t dtlk_read(struct file *file, char __user *buf,
+			 size_t count, loff_t * ppos)
+{
+	unsigned int minor = iminor(file_inode(file));
+	char ch;
+	int i = 0, retries;
+
+	TRACE_TEXT("(dtlk_read");
+	/*  printk("DoubleTalk PC - dtlk_read()\n"); */
+
+	if (minor != DTLK_MINOR || !dtlk_has_indexing)
+		return -EINVAL;
+
+	for (retries = 0; retries < loops_per_jiffy; retries++) {
+		while (i < count && dtlk_readable()) {
+			ch = dtlk_read_lpc();
+			/*        printk("dtlk_read() reads 0x%02x\n", ch); */
+			if (put_user(ch, buf++))
+				return -EFAULT;
+			i++;
+		}
+		if (i)
+			return i;
+		if (file->f_flags & O_NONBLOCK)
+			break;
+		msleep_interruptible(100);
+	}
+	if (retries == loops_per_jiffy)
+		printk(KERN_ERR "dtlk_read times out\n");
+	TRACE_RET;
+	return -EAGAIN;
+}
+
+static ssize_t dtlk_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t * ppos)
+{
+	int i = 0, retries = 0, ch;
+
+	TRACE_TEXT("(dtlk_write");
+#ifdef TRACING
+	printk(" \"");
+	{
+		int i, ch;
+		for (i = 0; i < count; i++) {
+			if (get_user(ch, buf + i))
+				return -EFAULT;
+			if (' ' <= ch && ch <= '~')
+				printk("%c", ch);
+			else
+				printk("\\%03o", ch);
+		}
+		printk("\"");
+	}
+#endif
+
+	if (iminor(file_inode(file)) != DTLK_MINOR)
+		return -EINVAL;
+
+	while (1) {
+		while (i < count && !get_user(ch, buf) &&
+		       (ch == DTLK_CLEAR || dtlk_writeable())) {
+			dtlk_write_tts(ch);
+			buf++;
+			i++;
+			if (i % 5 == 0)
+				/* We yield our time until scheduled
+				   again.  This reduces the transfer
+				   rate to 500 bytes/sec, but that's
+				   still enough to keep up with the
+				   speech synthesizer. */
+				msleep_interruptible(1);
+			else {
+				/* the RDY bit goes zero 2-3 usec
+				   after writing, and goes 1 again
+				   180-190 usec later.  Here, we wait
+				   up to 250 usec for the RDY bit to
+				   go nonzero. */
+				for (retries = 0;
+				     retries < loops_per_jiffy / (4000/HZ);
+				     retries++)
+					if (inb_p(dtlk_port_tts) &
+					    TTS_WRITABLE)
+						break;
+			}
+			retries = 0;
+		}
+		if (i == count)
+			return i;
+		if (file->f_flags & O_NONBLOCK)
+			break;
+
+		msleep_interruptible(1);
+
+		if (++retries > 10 * HZ) { /* wait no more than 10 sec
+					      from last write */
+			printk("dtlk: write timeout.  "
+			       "inb_p(dtlk_port_tts) = 0x%02x\n",
+			       inb_p(dtlk_port_tts));
+			TRACE_RET;
+			return -EBUSY;
+		}
+	}
+	TRACE_RET;
+	return -EAGAIN;
+}
+
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
+{
+	__poll_t mask = 0;
+	unsigned long expires;
+
+	TRACE_TEXT(" dtlk_poll");
+	/*
+	   static long int j;
+	   printk(".");
+	   printk("<%ld>", jiffies-j);
+	   j=jiffies;
+	 */
+	poll_wait(file, &dtlk_process_list, wait);
+
+	if (dtlk_has_indexing && dtlk_readable()) {
+	        del_timer(&dtlk_timer);
+		mask = EPOLLIN | EPOLLRDNORM;
+	}
+	if (dtlk_writeable()) {
+	        del_timer(&dtlk_timer);
+		mask |= EPOLLOUT | EPOLLWRNORM;
+	}
+	/* there are no exception conditions */
+
+	/* There won't be any interrupts, so we set a timer instead. */
+	expires = jiffies + 3*HZ / 100;
+	mod_timer(&dtlk_timer, expires);
+
+	return mask;
+}
+
+static void dtlk_timer_tick(struct timer_list *unused)
+{
+	TRACE_TEXT(" dtlk_timer_tick");
+	wake_up_interruptible(&dtlk_process_list);
+}
+
+static long dtlk_ioctl(struct file *file,
+		       unsigned int cmd,
+		       unsigned long arg)
+{
+	char __user *argp = (char __user *)arg;
+	struct dtlk_settings *sp;
+	char portval;
+	TRACE_TEXT(" dtlk_ioctl");
+
+	switch (cmd) {
+
+	case DTLK_INTERROGATE:
+		mutex_lock(&dtlk_mutex);
+		sp = dtlk_interrogate();
+		mutex_unlock(&dtlk_mutex);
+		if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
+			return -EINVAL;
+		return 0;
+
+	case DTLK_STATUS:
+		portval = inb_p(dtlk_port_tts);
+		return put_user(portval, argp);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/* Note that nobody ever sets dtlk_busy... */
+static int dtlk_open(struct inode *inode, struct file *file)
+{
+	TRACE_TEXT("(dtlk_open");
+
+	nonseekable_open(inode, file);
+	switch (iminor(inode)) {
+	case DTLK_MINOR:
+		if (dtlk_busy)
+			return -EBUSY;
+		return nonseekable_open(inode, file);
+
+	default:
+		return -ENXIO;
+	}
+}
+
+static int dtlk_release(struct inode *inode, struct file *file)
+{
+	TRACE_TEXT("(dtlk_release");
+
+	switch (iminor(inode)) {
+	case DTLK_MINOR:
+		break;
+
+	default:
+		break;
+	}
+	TRACE_RET;
+	
+	del_timer_sync(&dtlk_timer);
+
+	return 0;
+}
+
+static int __init dtlk_init(void)
+{
+	int err;
+
+	dtlk_port_lpc = 0;
+	dtlk_port_tts = 0;
+	dtlk_busy = 0;
+	dtlk_major = register_chrdev(0, "dtlk", &dtlk_fops);
+	if (dtlk_major < 0) {
+		printk(KERN_ERR "DoubleTalk PC - cannot register device\n");
+		return dtlk_major;
+	}
+	err = dtlk_dev_probe();
+	if (err) {
+		unregister_chrdev(dtlk_major, "dtlk");
+		return err;
+	}
+	printk(", MAJOR %d\n", dtlk_major);
+
+	init_waitqueue_head(&dtlk_process_list);
+
+	return 0;
+}
+
+static void __exit dtlk_cleanup (void)
+{
+	dtlk_write_bytes("goodbye", 8);
+	msleep_interruptible(500);		/* nap 0.50 sec but
+						   could be awakened
+						   earlier by
+						   signals... */
+
+	dtlk_write_tts(DTLK_CLEAR);
+	unregister_chrdev(dtlk_major, "dtlk");
+	release_region(dtlk_port_lpc, DTLK_IO_EXTENT);
+}
+
+module_init(dtlk_init);
+module_exit(dtlk_cleanup);
+
+/* ------------------------------------------------------------------------ */
+
+static int dtlk_readable(void)
+{
+#ifdef TRACING
+	printk(" dtlk_readable=%u@%u", inb_p(dtlk_port_lpc) != 0x7f, jiffies);
+#endif
+	return inb_p(dtlk_port_lpc) != 0x7f;
+}
+
+static int dtlk_writeable(void)
+{
+	/* TRACE_TEXT(" dtlk_writeable"); */
+#ifdef TRACINGMORE
+	printk(" dtlk_writeable=%u", (inb_p(dtlk_port_tts) & TTS_WRITABLE)!=0);
+#endif
+	return inb_p(dtlk_port_tts) & TTS_WRITABLE;
+}
+
+static int __init dtlk_dev_probe(void)
+{
+	unsigned int testval = 0;
+	int i = 0;
+	struct dtlk_settings *sp;
+
+	if (dtlk_port_lpc | dtlk_port_tts)
+		return -EBUSY;
+
+	for (i = 0; dtlk_portlist[i]; i++) {
+#if 0
+		printk("DoubleTalk PC - Port %03x = %04x\n",
+		       dtlk_portlist[i], (testval = inw_p(dtlk_portlist[i])));
+#endif
+
+		if (!request_region(dtlk_portlist[i], DTLK_IO_EXTENT, 
+			       "dtlk"))
+			continue;
+		testval = inw_p(dtlk_portlist[i]);
+		if ((testval &= 0xfbff) == 0x107f) {
+			dtlk_port_lpc = dtlk_portlist[i];
+			dtlk_port_tts = dtlk_port_lpc + 1;
+
+			sp = dtlk_interrogate();
+			printk("DoubleTalk PC at %03x-%03x, "
+			       "ROM version %s, serial number %u",
+			       dtlk_portlist[i], dtlk_portlist[i] +
+			       DTLK_IO_EXTENT - 1,
+			       sp->rom_version, sp->serial_number);
+
+                        /* put LPC port into known state, so
+			   dtlk_readable() gives valid result */
+			outb_p(0xff, dtlk_port_lpc); 
+
+                        /* INIT string and index marker */
+			dtlk_write_bytes("\036\1@\0\0012I\r", 8);
+			/* posting an index takes 18 msec.  Here, we
+			   wait up to 100 msec to see whether it
+			   appears. */
+			msleep_interruptible(100);
+			dtlk_has_indexing = dtlk_readable();
+#ifdef TRACING
+			printk(", indexing %d\n", dtlk_has_indexing);
+#endif
+#ifdef INSCOPE
+			{
+/* This macro records ten samples read from the LPC port, for later display */
+#define LOOK					\
+for (i = 0; i < 10; i++)			\
+  {						\
+    buffer[b++] = inb_p(dtlk_port_lpc);		\
+    __delay(loops_per_jiffy/(1000000/HZ));             \
+  }
+				char buffer[1000];
+				int b = 0, i, j;
+
+				LOOK
+				outb_p(0xff, dtlk_port_lpc);
+				buffer[b++] = 0;
+				LOOK
+				dtlk_write_bytes("\0012I\r", 4);
+				buffer[b++] = 0;
+				__delay(50 * loops_per_jiffy / (1000/HZ));
+				outb_p(0xff, dtlk_port_lpc);
+				buffer[b++] = 0;
+				LOOK
+
+				printk("\n");
+				for (j = 0; j < b; j++)
+					printk(" %02x", buffer[j]);
+				printk("\n");
+			}
+#endif				/* INSCOPE */
+
+#ifdef OUTSCOPE
+			{
+/* This macro records ten samples read from the TTS port, for later display */
+#define LOOK					\
+for (i = 0; i < 10; i++)			\
+  {						\
+    buffer[b++] = inb_p(dtlk_port_tts);		\
+    __delay(loops_per_jiffy/(1000000/HZ));  /* 1 us */ \
+  }
+				char buffer[1000];
+				int b = 0, i, j;
+
+				mdelay(10);	/* 10 ms */
+				LOOK
+				outb_p(0x03, dtlk_port_tts);
+				buffer[b++] = 0;
+				LOOK
+				LOOK
+
+				printk("\n");
+				for (j = 0; j < b; j++)
+					printk(" %02x", buffer[j]);
+				printk("\n");
+			}
+#endif				/* OUTSCOPE */
+
+			dtlk_write_bytes("Double Talk found", 18);
+
+			return 0;
+		}
+		release_region(dtlk_portlist[i], DTLK_IO_EXTENT);
+	}
+
+	printk(KERN_INFO "DoubleTalk PC - not found\n");
+	return -ENODEV;
+}
+
+/*
+   static void dtlk_handle_error(char op, char rc, unsigned int minor)
+   {
+   printk(KERN_INFO"\nDoubleTalk PC - MINOR: %d, OPCODE: %d, ERROR: %d\n", 
+   minor, op, rc);
+   return;
+   }
+ */
+
+/* interrogate the DoubleTalk PC and return its settings */
+static struct dtlk_settings *dtlk_interrogate(void)
+{
+	unsigned char *t;
+	static char buf[sizeof(struct dtlk_settings) + 1];
+	int total, i;
+	static struct dtlk_settings status;
+	TRACE_TEXT("(dtlk_interrogate");
+	dtlk_write_bytes("\030\001?", 3);
+	for (total = 0, i = 0; i < 50; i++) {
+		buf[total] = dtlk_read_tts();
+		if (total > 2 && buf[total] == 0x7f)
+			break;
+		if (total < sizeof(struct dtlk_settings))
+			total++;
+	}
+	/*
+	   if (i==50) printk("interrogate() read overrun\n");
+	   for (i=0; i<sizeof(buf); i++)
+	   printk(" %02x", buf[i]);
+	   printk("\n");
+	 */
+	t = buf;
+	status.serial_number = t[0] + t[1] * 256; /* serial number is
+						     little endian */
+	t += 2;
+
+	i = 0;
+	while (*t != '\r') {
+		status.rom_version[i] = *t;
+		if (i < sizeof(status.rom_version) - 1)
+			i++;
+		t++;
+	}
+	status.rom_version[i] = 0;
+	t++;
+
+	status.mode = *t++;
+	status.punc_level = *t++;
+	status.formant_freq = *t++;
+	status.pitch = *t++;
+	status.speed = *t++;
+	status.volume = *t++;
+	status.tone = *t++;
+	status.expression = *t++;
+	status.ext_dict_loaded = *t++;
+	status.ext_dict_status = *t++;
+	status.free_ram = *t++;
+	status.articulation = *t++;
+	status.reverb = *t++;
+	status.eob = *t++;
+	status.has_indexing = dtlk_has_indexing;
+	TRACE_RET;
+	return &status;
+}
+
+static char dtlk_read_tts(void)
+{
+	int portval, retries = 0;
+	char ch;
+	TRACE_TEXT("(dtlk_read_tts");
+
+	/* verify DT is ready, read char, wait for ACK */
+	do {
+		portval = inb_p(dtlk_port_tts);
+	} while ((portval & TTS_READABLE) == 0 &&
+		 retries++ < DTLK_MAX_RETRIES);
+	if (retries > DTLK_MAX_RETRIES)
+		printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+	ch = inb_p(dtlk_port_tts);	/* input from TTS port */
+	ch &= 0x7f;
+	outb_p(ch, dtlk_port_tts);
+
+	retries = 0;
+	do {
+		portval = inb_p(dtlk_port_tts);
+	} while ((portval & TTS_READABLE) != 0 &&
+		 retries++ < DTLK_MAX_RETRIES);
+	if (retries > DTLK_MAX_RETRIES)
+		printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+	TRACE_RET;
+	return ch;
+}
+
+static char dtlk_read_lpc(void)
+{
+	int retries = 0;
+	char ch;
+	TRACE_TEXT("(dtlk_read_lpc");
+
+	/* no need to test -- this is only called when the port is readable */
+
+	ch = inb_p(dtlk_port_lpc);	/* input from LPC port */
+
+	outb_p(0xff, dtlk_port_lpc);
+
+	/* acknowledging a read takes 3-4
+	   usec.  Here, we wait up to 20 usec
+	   for the acknowledgement */
+	retries = (loops_per_jiffy * 20) / (1000000/HZ);
+	while (inb_p(dtlk_port_lpc) != 0x7f && --retries > 0);
+	if (retries == 0)
+		printk(KERN_ERR "dtlk_read_lpc() timeout\n");
+
+	TRACE_RET;
+	return ch;
+}
+
+/* write n bytes to tts port */
+static char dtlk_write_bytes(const char *buf, int n)
+{
+	char val = 0;
+	/*  printk("dtlk_write_bytes(\"%-*s\", %d)\n", n, buf, n); */
+	TRACE_TEXT("(dtlk_write_bytes");
+	while (n-- > 0)
+		val = dtlk_write_tts(*buf++);
+	TRACE_RET;
+	return val;
+}
+
+static char dtlk_write_tts(char ch)
+{
+	int retries = 0;
+#ifdef TRACINGMORE
+	printk("  dtlk_write_tts(");
+	if (' ' <= ch && ch <= '~')
+		printk("'%c'", ch);
+	else
+		printk("0x%02x", ch);
+#endif
+	if (ch != DTLK_CLEAR)	/* no flow control for CLEAR command */
+		while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 &&
+		       retries++ < DTLK_MAX_RETRIES)	/* DT ready? */
+			;
+	if (retries > DTLK_MAX_RETRIES)
+		printk(KERN_ERR "dtlk_write_tts() timeout\n");
+
+	outb_p(ch, dtlk_port_tts);	/* output to TTS port */
+	/* the RDY bit goes zero 2-3 usec after writing, and goes
+	   1 again 180-190 usec later.  Here, we wait up to 10
+	   usec for the RDY bit to go zero. */
+	for (retries = 0; retries < loops_per_jiffy / (100000/HZ); retries++)
+		if ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0)
+			break;
+
+#ifdef TRACINGMORE
+	printk(")\n");
+#endif
+	return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
new file mode 100644
index 0000000..d9aab64
--- /dev/null
+++ b/drivers/char/efirtc.c
@@ -0,0 +1,388 @@
+/*
+ * EFI Time Services Driver for Linux
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker
+ *
+ * This code provides an architected & portable interface to the real time
+ * clock by using EFI instead of direct bit fiddling. The functionalities are 
+ * quite different from the rtc.c driver. The only way to talk to the device 
+ * is by using ioctl(). There is a /proc interface which provides the raw 
+ * information.
+ *
+ * Please note that we have kept the API as close as possible to the
+ * legacy RTC. The standard /sbin/hwclock program should work normally 
+ * when used to get/set the time.
+ *
+ * NOTES:
+ *	- Locking is required for safe execution of EFI calls with regards
+ *	  to interrupts and SMP.
+ *
+ * TODO (December 1999):
+ * 	- provide the API to set/get the WakeUp Alarm (different from the
+ *	  rtc.c alarm).
+ *	- SMP testing
+ * 	- Add module support
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/efi.h>
+#include <linux/uaccess.h>
+
+
+#define EFI_RTC_VERSION		"0.4"
+
+#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT)
+/*
+ * EFI Epoch is 1/1/1998
+ */
+#define EFI_RTC_EPOCH		1998
+
+static DEFINE_SPINLOCK(efi_rtc_lock);
+
+static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg);
+
+#define is_leap(year) \
+          ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+
+static const unsigned short int __mon_yday[2][13] =
+{
+	/* Normal years.  */
+	{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+	/* Leap years.  */  
+	{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+};
+
+/*
+ * returns day of the year [0-365]
+ */
+static inline int
+compute_yday(efi_time_t *eft)
+{
+	/* efi_time_t.month is in the [1-12] so, we need -1 */
+	return  __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1;
+}
+/*
+ * returns day of the week [0-6] 0=Sunday
+ *
+ * Don't try to provide a year that's before 1998, please !
+ */
+static int
+compute_wday(efi_time_t *eft)
+{
+	int y;
+	int ndays = 0;
+
+	if ( eft->year < 1998 ) {
+		printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
+		return -1;
+	}
+
+	for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) {
+		ndays += 365 + (is_leap(y) ? 1 : 0);
+	}
+	ndays += compute_yday(eft);
+
+	/*
+	 * 4=1/1/1998 was a Thursday
+	 */
+	return (ndays + 4) % 7;
+}
+
+static void
+convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
+{
+
+	eft->year	= wtime->tm_year + 1900;
+	eft->month	= wtime->tm_mon + 1; 
+	eft->day	= wtime->tm_mday;
+	eft->hour	= wtime->tm_hour;
+	eft->minute	= wtime->tm_min;
+	eft->second 	= wtime->tm_sec;
+	eft->nanosecond = 0; 
+	eft->daylight	= wtime->tm_isdst ? EFI_ISDST: 0;
+	eft->timezone	= EFI_UNSPECIFIED_TIMEZONE;
+}
+
+static void
+convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
+{
+	memset(wtime, 0, sizeof(*wtime));
+	wtime->tm_sec  = eft->second;
+	wtime->tm_min  = eft->minute;
+	wtime->tm_hour = eft->hour;
+	wtime->tm_mday = eft->day;
+	wtime->tm_mon  = eft->month - 1;
+	wtime->tm_year = eft->year - 1900;
+
+	/* day of the week [0-6], Sunday=0 */
+	wtime->tm_wday = compute_wday(eft);
+
+	/* day in the year [1-365]*/
+	wtime->tm_yday = compute_yday(eft);
+
+
+	switch (eft->daylight & EFI_ISDST) {
+		case EFI_ISDST:
+			wtime->tm_isdst = 1;
+			break;
+		case EFI_TIME_ADJUST_DAYLIGHT:
+			wtime->tm_isdst = 0;
+			break;
+		default:
+			wtime->tm_isdst = -1;
+	}
+}
+
+static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+
+	efi_status_t	status;
+	unsigned long	flags;
+	efi_time_t	eft;
+	efi_time_cap_t	cap;
+	struct rtc_time	wtime;
+	struct rtc_wkalrm __user *ewp;
+	unsigned char	enabled, pending;
+
+	switch (cmd) {
+		case RTC_UIE_ON:
+		case RTC_UIE_OFF:
+		case RTC_PIE_ON:
+		case RTC_PIE_OFF:
+		case RTC_AIE_ON:
+		case RTC_AIE_OFF:
+		case RTC_ALM_SET:
+		case RTC_ALM_READ:
+		case RTC_IRQP_READ:
+		case RTC_IRQP_SET:
+		case RTC_EPOCH_READ:
+		case RTC_EPOCH_SET:
+			return -EINVAL;
+
+		case RTC_RD_TIME:
+			spin_lock_irqsave(&efi_rtc_lock, flags);
+
+			status = efi.get_time(&eft, &cap);
+
+			spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+			if (status != EFI_SUCCESS) {
+				/* should never happen */
+				printk(KERN_ERR "efitime: can't read time\n");
+				return -EINVAL;
+			}
+
+			convert_from_efi_time(&eft, &wtime);
+
+ 			return copy_to_user((void __user *)arg, &wtime,
+					    sizeof (struct rtc_time)) ? - EFAULT : 0;
+
+		case RTC_SET_TIME:
+
+			if (!capable(CAP_SYS_TIME)) return -EACCES;
+
+			if (copy_from_user(&wtime, (struct rtc_time __user *)arg,
+					   sizeof(struct rtc_time)) )
+				return -EFAULT;
+
+			convert_to_efi_time(&wtime, &eft);
+
+			spin_lock_irqsave(&efi_rtc_lock, flags);
+
+			status = efi.set_time(&eft);
+
+			spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+			return status == EFI_SUCCESS ? 0 : -EINVAL;
+
+		case RTC_WKALM_SET:
+
+			if (!capable(CAP_SYS_TIME)) return -EACCES;
+
+			ewp = (struct rtc_wkalrm __user *)arg;
+
+			if (  get_user(enabled, &ewp->enabled)
+			   || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) )
+				return -EFAULT;
+
+			convert_to_efi_time(&wtime, &eft);
+
+			spin_lock_irqsave(&efi_rtc_lock, flags);
+			/*
+			 * XXX Fixme:
+			 * As of EFI 0.92 with the firmware I have on my
+			 * machine this call does not seem to work quite
+			 * right
+			 */
+			status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
+
+			spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+			return status == EFI_SUCCESS ? 0 : -EINVAL;
+
+		case RTC_WKALM_RD:
+
+			spin_lock_irqsave(&efi_rtc_lock, flags);
+
+			status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
+
+			spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+			if (status != EFI_SUCCESS) return -EINVAL;
+
+			ewp = (struct rtc_wkalrm __user *)arg;
+
+			if (  put_user(enabled, &ewp->enabled)
+			   || put_user(pending, &ewp->pending)) return -EFAULT;
+
+			convert_from_efi_time(&eft, &wtime);
+
+			return copy_to_user(&ewp->time, &wtime,
+					    sizeof(struct rtc_time)) ? -EFAULT : 0;
+	}
+	return -ENOTTY;
+}
+
+/*
+ *	We enforce only one user at a time here with the open/close.
+ *	Also clear the previous interrupt data on an open, and clean
+ *	up things on a close.
+ */
+
+static int efi_rtc_open(struct inode *inode, struct file *file)
+{
+	/*
+	 * nothing special to do here
+	 * We do accept multiple open files at the same time as we
+	 * synchronize on the per call operation.
+	 */
+	return 0;
+}
+
+static int efi_rtc_close(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+/*
+ *	The various file operations we support.
+ */
+
+static const struct file_operations efi_rtc_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= efi_rtc_ioctl,
+	.open		= efi_rtc_open,
+	.release	= efi_rtc_close,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice efi_rtc_dev= {
+	EFI_RTC_MINOR,
+	"efirtc",
+	&efi_rtc_fops
+};
+
+/*
+ *	We export RAW EFI information to /proc/driver/efirtc
+ */
+static int efi_rtc_proc_show(struct seq_file *m, void *v)
+{
+	efi_time_t 	eft, alm;
+	efi_time_cap_t	cap;
+	efi_bool_t	enabled, pending;	
+	unsigned long	flags;
+
+	memset(&eft, 0, sizeof(eft));
+	memset(&alm, 0, sizeof(alm));
+	memset(&cap, 0, sizeof(cap));
+
+	spin_lock_irqsave(&efi_rtc_lock, flags);
+
+	efi.get_time(&eft, &cap);
+	efi.get_wakeup_time(&enabled, &pending, &alm);
+
+	spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+	seq_printf(m,
+		   "Time           : %u:%u:%u.%09u\n"
+		   "Date           : %u-%u-%u\n"
+		   "Daylight       : %u\n",
+		   eft.hour, eft.minute, eft.second, eft.nanosecond, 
+		   eft.year, eft.month, eft.day,
+		   eft.daylight);
+
+	if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+		seq_puts(m, "Timezone       : unspecified\n");
+	else
+		/* XXX fixme: convert to string? */
+		seq_printf(m, "Timezone       : %u\n", eft.timezone);
+		
+
+	seq_printf(m,
+		   "Alarm Time     : %u:%u:%u.%09u\n"
+		   "Alarm Date     : %u-%u-%u\n"
+		   "Alarm Daylight : %u\n"
+		   "Enabled        : %s\n"
+		   "Pending        : %s\n",
+		   alm.hour, alm.minute, alm.second, alm.nanosecond, 
+		   alm.year, alm.month, alm.day, 
+		   alm.daylight,
+		   enabled == 1 ? "yes" : "no",
+		   pending == 1 ? "yes" : "no");
+
+	if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+		seq_puts(m, "Timezone       : unspecified\n");
+	else
+		/* XXX fixme: convert to string? */
+		seq_printf(m, "Timezone       : %u\n", alm.timezone);
+
+	/*
+	 * now prints the capabilities
+	 */
+	seq_printf(m,
+		   "Resolution     : %u\n"
+		   "Accuracy       : %u\n"
+		   "SetstoZero     : %u\n",
+		   cap.resolution, cap.accuracy, cap.sets_to_zero);
+
+	return 0;
+}
+static int __init 
+efi_rtc_init(void)
+{
+	int ret;
+	struct proc_dir_entry *dir;
+
+	printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION);
+
+	ret = misc_register(&efi_rtc_dev);
+	if (ret) {
+		printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n",
+				EFI_RTC_MINOR);
+		return ret;
+	}
+
+	dir = proc_create_single("driver/efirtc", 0, NULL, efi_rtc_proc_show);
+	if (dir == NULL) {
+		printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n");
+		misc_deregister(&efi_rtc_dev);
+		return -1;
+	}
+	return 0;
+}
+device_initcall(efi_rtc_init);
+
+/*
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
new file mode 100644
index 0000000..14e728f
--- /dev/null
+++ b/drivers/char/generic_nvram.c
@@ -0,0 +1,159 @@
+/*
+ * Generic /dev/nvram driver for architectures providing some
+ * "generic" hooks, that is :
+ *
+ * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
+ *
+ * Note that an additional hook is supported for PowerMac only
+ * for getting the nvram "partition" informations
+ *
+ */
+
+#define NVRAM_VERSION "1.1"
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+#include <asm/nvram.h>
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+#endif
+
+#define NVRAM_SIZE	8192
+
+static DEFINE_MUTEX(nvram_mutex);
+static ssize_t nvram_len;
+
+static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+{
+	return generic_file_llseek_size(file, offset, origin,
+					MAX_LFS_FILESIZE, nvram_len);
+}
+
+static ssize_t read_nvram(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned int i;
+	char __user *p = buf;
+
+	if (!access_ok(VERIFY_WRITE, buf, count))
+		return -EFAULT;
+	if (*ppos >= nvram_len)
+		return 0;
+	for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
+		if (__put_user(nvram_read_byte(i), p))
+			return -EFAULT;
+	*ppos = i;
+	return p - buf;
+}
+
+static ssize_t write_nvram(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	unsigned int i;
+	const char __user *p = buf;
+	char c;
+
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+	if (*ppos >= nvram_len)
+		return 0;
+	for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
+		if (__get_user(c, p))
+			return -EFAULT;
+		nvram_write_byte(c, i);
+	}
+	*ppos = i;
+	return p - buf;
+}
+
+static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch(cmd) {
+#ifdef CONFIG_PPC_PMAC
+	case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+		printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+	case IOC_NVRAM_GET_OFFSET: {
+		int part, offset;
+
+		if (!machine_is(powermac))
+			return -EINVAL;
+		if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
+			return -EFAULT;
+		if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+			return -EINVAL;
+		offset = pmac_get_partition(part);
+		if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
+			return -EFAULT;
+		break;
+	}
+#endif /* CONFIG_PPC_PMAC */
+	case IOC_NVRAM_SYNC:
+		nvram_sync();
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&nvram_mutex);
+	ret = nvram_ioctl(file, cmd, arg);
+	mutex_unlock(&nvram_mutex);
+
+	return ret;
+}
+
+const struct file_operations nvram_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= nvram_llseek,
+	.read		= read_nvram,
+	.write		= write_nvram,
+	.unlocked_ioctl	= nvram_unlocked_ioctl,
+};
+
+static struct miscdevice nvram_dev = {
+	NVRAM_MINOR,
+	"nvram",
+	&nvram_fops
+};
+
+int __init nvram_init(void)
+{
+	int ret = 0;
+
+	printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
+		NVRAM_VERSION);
+	ret = misc_register(&nvram_dev);
+	if (ret != 0)
+		goto out;
+
+	nvram_len = nvram_get_size();
+	if (nvram_len < 0)
+		nvram_len = NVRAM_SIZE;
+
+out:
+	return ret;
+}
+
+void __exit nvram_cleanup(void)
+{
+        misc_deregister( &nvram_dev );
+}
+
+module_init(nvram_init);
+module_exit(nvram_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
new file mode 100644
index 0000000..7700280
--- /dev/null
+++ b/drivers/char/hangcheck-timer.c
@@ -0,0 +1,188 @@
+/*
+ * hangcheck-timer.c
+ *
+ * Driver for a little io fencing timer.
+ *
+ * Copyright (C) 2002, 2003 Oracle.  All rights reserved.
+ *
+ * Author: Joel Becker <joel.becker@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/*
+ * The hangcheck-timer driver uses the TSC to catch delays that
+ * jiffies does not notice.  A timer is set.  When the timer fires, it
+ * checks whether it was delayed and if that delay exceeds a given
+ * margin of error.  The hangcheck_tick module parameter takes the timer
+ * duration in seconds.  The hangcheck_margin parameter defines the
+ * margin of error, in seconds.  The defaults are 60 seconds for the
+ * timer and 180 seconds for the margin of error.  IOW, a timer is set
+ * for 60 seconds.  When the timer fires, the callback checks the
+ * actual duration that the timer waited.  If the duration exceeds the
+ * allotted time and margin (here 60 + 180, or 240 seconds), the machine
+ * is restarted.  A healthy machine will have the duration match the
+ * expected timeout very closely.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/sysrq.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+
+#define VERSION_STR "0.9.1"
+
+#define DEFAULT_IOFENCE_MARGIN 60	/* Default fudge factor, in seconds */
+#define DEFAULT_IOFENCE_TICK 180	/* Default timer timeout, in seconds */
+
+static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
+static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
+static int hangcheck_reboot;  /* Defaults to not reboot */
+static int hangcheck_dump_tasks;  /* Defaults to not dumping SysRQ T */
+
+/* options - modular */
+module_param(hangcheck_tick, int, 0);
+MODULE_PARM_DESC(hangcheck_tick, "Timer delay.");
+module_param(hangcheck_margin, int, 0);
+MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire.");
+module_param(hangcheck_reboot, int, 0);
+MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded.");
+module_param(hangcheck_dump_tasks, int, 0);
+MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded.");
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION_STR);
+
+/* options - nonmodular */
+#ifndef MODULE
+
+static int __init hangcheck_parse_tick(char *str)
+{
+	int par;
+	if (get_option(&str,&par))
+		hangcheck_tick = par;
+	return 1;
+}
+
+static int __init hangcheck_parse_margin(char *str)
+{
+	int par;
+	if (get_option(&str,&par))
+		hangcheck_margin = par;
+	return 1;
+}
+
+static int __init hangcheck_parse_reboot(char *str)
+{
+	int par;
+	if (get_option(&str,&par))
+		hangcheck_reboot = par;
+	return 1;
+}
+
+static int __init hangcheck_parse_dump_tasks(char *str)
+{
+	int par;
+	if (get_option(&str,&par))
+		hangcheck_dump_tasks = par;
+	return 1;
+}
+
+__setup("hcheck_tick", hangcheck_parse_tick);
+__setup("hcheck_margin", hangcheck_parse_margin);
+__setup("hcheck_reboot", hangcheck_parse_reboot);
+__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
+#endif /* not MODULE */
+
+#define TIMER_FREQ 1000000000ULL
+
+/* Last time scheduled */
+static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
+
+static void hangcheck_fire(struct timer_list *);
+
+static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
+
+static void hangcheck_fire(struct timer_list *unused)
+{
+	unsigned long long cur_tsc, tsc_diff;
+
+	cur_tsc = ktime_get_ns();
+
+	if (cur_tsc > hangcheck_tsc)
+		tsc_diff = cur_tsc - hangcheck_tsc;
+	else
+		tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */
+
+	if (tsc_diff > hangcheck_tsc_margin) {
+		if (hangcheck_dump_tasks) {
+			printk(KERN_CRIT "Hangcheck: Task state:\n");
+#ifdef CONFIG_MAGIC_SYSRQ
+			handle_sysrq('t');
+#endif  /* CONFIG_MAGIC_SYSRQ */
+		}
+		if (hangcheck_reboot) {
+			printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+			emergency_restart();
+		} else {
+			printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+		}
+	}
+#if 0
+	/*
+	 * Enable to investigate delays in detail
+	 */
+	printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+			tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
+	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+	hangcheck_tsc = ktime_get_ns();
+}
+
+
+static int __init hangcheck_init(void)
+{
+	printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+	       VERSION_STR, hangcheck_tick, hangcheck_margin);
+	hangcheck_tsc_margin =
+		(unsigned long long)hangcheck_margin + hangcheck_tick;
+	hangcheck_tsc_margin *= TIMER_FREQ;
+
+	hangcheck_tsc = ktime_get_ns();
+	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+
+	return 0;
+}
+
+
+static void __exit hangcheck_exit(void)
+{
+	del_timer_sync(&hangcheck_ticktock);
+        printk("Hangcheck: Stopped hangcheck timer.\n");
+}
+
+module_init(hangcheck_init);
+module_exit(hangcheck_exit);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
new file mode 100644
index 0000000..4a22b4b
--- /dev/null
+++ b/drivers/char/hpet.c
@@ -0,0 +1,1084 @@
+/*
+ * Intel & MS High Precision Event Timer Implementation.
+ *
+ * Copyright (C) 2003 Intel Corporation
+ *	Venki Pallipadi
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ *	Bob Picco <robert.picco@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/sysctl.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+#include <linux/bcd.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/clocksource.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/hpet.h>
+#include <asm/current.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+/*
+ * The High Precision Event Timer driver.
+ * This driver is closely modelled after the rtc.c driver.
+ * See HPET spec revision 1.
+ */
+#define	HPET_USER_FREQ	(64)
+#define	HPET_DRIFT	(500)
+
+#define HPET_RANGE_SIZE		1024	/* from HPET spec */
+
+
+/* WARNING -- don't get confused.  These macros are never used
+ * to write the (single) counter, and rarely to read it.
+ * They're badly named; to fix, someday.
+ */
+#if BITS_PER_LONG == 64
+#define	write_counter(V, MC)	writeq(V, MC)
+#define	read_counter(MC)	readq(MC)
+#else
+#define	write_counter(V, MC)	writel(V, MC)
+#define	read_counter(MC)	readl(MC)
+#endif
+
+static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
+static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+static void __iomem *hpet_mctr;
+
+static u64 read_hpet(struct clocksource *cs)
+{
+	return (u64)read_counter((void __iomem *)hpet_mctr);
+}
+
+static struct clocksource clocksource_hpet = {
+	.name		= "hpet",
+	.rating		= 250,
+	.read		= read_hpet,
+	.mask		= CLOCKSOURCE_MASK(64),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+static struct clocksource *hpet_clocksource;
+#endif
+
+/* A lock for concurrent access by app and isr hpet activity. */
+static DEFINE_SPINLOCK(hpet_lock);
+
+#define	HPET_DEV_NAME	(7)
+
+struct hpet_dev {
+	struct hpets *hd_hpets;
+	struct hpet __iomem *hd_hpet;
+	struct hpet_timer __iomem *hd_timer;
+	unsigned long hd_ireqfreq;
+	unsigned long hd_irqdata;
+	wait_queue_head_t hd_waitqueue;
+	struct fasync_struct *hd_async_queue;
+	unsigned int hd_flags;
+	unsigned int hd_irq;
+	unsigned int hd_hdwirq;
+	char hd_name[HPET_DEV_NAME];
+};
+
+struct hpets {
+	struct hpets *hp_next;
+	struct hpet __iomem *hp_hpet;
+	unsigned long hp_hpet_phys;
+	struct clocksource *hp_clocksource;
+	unsigned long long hp_tick_freq;
+	unsigned long hp_delta;
+	unsigned int hp_ntimer;
+	unsigned int hp_which;
+	struct hpet_dev hp_dev[1];
+};
+
+static struct hpets *hpets;
+
+#define	HPET_OPEN		0x0001
+#define	HPET_IE			0x0002	/* interrupt enabled */
+#define	HPET_PERIODIC		0x0004
+#define	HPET_SHARED_IRQ		0x0008
+
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+	return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+	writel(v & 0xffffffff, addr);
+	writel(v >> 32, addr + 4);
+}
+#endif
+
+static irqreturn_t hpet_interrupt(int irq, void *data)
+{
+	struct hpet_dev *devp;
+	unsigned long isr;
+
+	devp = data;
+	isr = 1 << (devp - devp->hd_hpets->hp_dev);
+
+	if ((devp->hd_flags & HPET_SHARED_IRQ) &&
+	    !(isr & readl(&devp->hd_hpet->hpet_isr)))
+		return IRQ_NONE;
+
+	spin_lock(&hpet_lock);
+	devp->hd_irqdata++;
+
+	/*
+	 * For non-periodic timers, increment the accumulator.
+	 * This has the effect of treating non-periodic like periodic.
+	 */
+	if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
+		unsigned long m, t, mc, base, k;
+		struct hpet __iomem *hpet = devp->hd_hpet;
+		struct hpets *hpetp = devp->hd_hpets;
+
+		t = devp->hd_ireqfreq;
+		m = read_counter(&devp->hd_timer->hpet_compare);
+		mc = read_counter(&hpet->hpet_mc);
+		/* The time for the next interrupt would logically be t + m,
+		 * however, if we are very unlucky and the interrupt is delayed
+		 * for longer than t then we will completely miss the next
+		 * interrupt if we set t + m and an application will hang.
+		 * Therefore we need to make a more complex computation assuming
+		 * that there exists a k for which the following is true:
+		 * k * t + base < mc + delta
+		 * (k + 1) * t + base > mc + delta
+		 * where t is the interval in hpet ticks for the given freq,
+		 * base is the theoretical start value 0 < base < t,
+		 * mc is the main counter value at the time of the interrupt,
+		 * delta is the time it takes to write the a value to the
+		 * comparator.
+		 * k may then be computed as (mc - base + delta) / t .
+		 */
+		base = mc % t;
+		k = (mc - base + hpetp->hp_delta) / t;
+		write_counter(t * (k + 1) + base,
+			      &devp->hd_timer->hpet_compare);
+	}
+
+	if (devp->hd_flags & HPET_SHARED_IRQ)
+		writel(isr, &devp->hd_hpet->hpet_isr);
+	spin_unlock(&hpet_lock);
+
+	wake_up_interruptible(&devp->hd_waitqueue);
+
+	kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
+
+	return IRQ_HANDLED;
+}
+
+static void hpet_timer_set_irq(struct hpet_dev *devp)
+{
+	unsigned long v;
+	int irq, gsi;
+	struct hpet_timer __iomem *timer;
+
+	spin_lock_irq(&hpet_lock);
+	if (devp->hd_hdwirq) {
+		spin_unlock_irq(&hpet_lock);
+		return;
+	}
+
+	timer = devp->hd_timer;
+
+	/* we prefer level triggered mode */
+	v = readl(&timer->hpet_config);
+	if (!(v & Tn_INT_TYPE_CNF_MASK)) {
+		v |= Tn_INT_TYPE_CNF_MASK;
+		writel(v, &timer->hpet_config);
+	}
+	spin_unlock_irq(&hpet_lock);
+
+	v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
+				 Tn_INT_ROUTE_CAP_SHIFT;
+
+	/*
+	 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
+	 * legacy device. In IO APIC mode, we skip all the legacy IRQS.
+	 */
+	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
+		v &= ~0xf3df;
+	else
+		v &= ~0xffff;
+
+	for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
+		if (irq >= nr_irqs) {
+			irq = HPET_MAX_IRQ;
+			break;
+		}
+
+		gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
+					ACPI_ACTIVE_LOW);
+		if (gsi > 0)
+			break;
+
+		/* FIXME: Setup interrupt source table */
+	}
+
+	if (irq < HPET_MAX_IRQ) {
+		spin_lock_irq(&hpet_lock);
+		v = readl(&timer->hpet_config);
+		v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
+		writel(v, &timer->hpet_config);
+		devp->hd_hdwirq = gsi;
+		spin_unlock_irq(&hpet_lock);
+	}
+	return;
+}
+
+static int hpet_open(struct inode *inode, struct file *file)
+{
+	struct hpet_dev *devp;
+	struct hpets *hpetp;
+	int i;
+
+	if (file->f_mode & FMODE_WRITE)
+		return -EINVAL;
+
+	mutex_lock(&hpet_mutex);
+	spin_lock_irq(&hpet_lock);
+
+	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
+		for (i = 0; i < hpetp->hp_ntimer; i++)
+			if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
+				continue;
+			else {
+				devp = &hpetp->hp_dev[i];
+				break;
+			}
+
+	if (!devp) {
+		spin_unlock_irq(&hpet_lock);
+		mutex_unlock(&hpet_mutex);
+		return -EBUSY;
+	}
+
+	file->private_data = devp;
+	devp->hd_irqdata = 0;
+	devp->hd_flags |= HPET_OPEN;
+	spin_unlock_irq(&hpet_lock);
+	mutex_unlock(&hpet_mutex);
+
+	hpet_timer_set_irq(devp);
+
+	return 0;
+}
+
+static ssize_t
+hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long data;
+	ssize_t retval;
+	struct hpet_dev *devp;
+
+	devp = file->private_data;
+	if (!devp->hd_ireqfreq)
+		return -EIO;
+
+	if (count < sizeof(unsigned long))
+		return -EINVAL;
+
+	add_wait_queue(&devp->hd_waitqueue, &wait);
+
+	for ( ; ; ) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irq(&hpet_lock);
+		data = devp->hd_irqdata;
+		devp->hd_irqdata = 0;
+		spin_unlock_irq(&hpet_lock);
+
+		if (data)
+			break;
+		else if (file->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			goto out;
+		} else if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			goto out;
+		}
+		schedule();
+	}
+
+	retval = put_user(data, (unsigned long __user *)buf);
+	if (!retval)
+		retval = sizeof(unsigned long);
+out:
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&devp->hd_waitqueue, &wait);
+
+	return retval;
+}
+
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
+{
+	unsigned long v;
+	struct hpet_dev *devp;
+
+	devp = file->private_data;
+
+	if (!devp->hd_ireqfreq)
+		return 0;
+
+	poll_wait(file, &devp->hd_waitqueue, wait);
+
+	spin_lock_irq(&hpet_lock);
+	v = devp->hd_irqdata;
+	spin_unlock_irq(&hpet_lock);
+
+	if (v != 0)
+		return EPOLLIN | EPOLLRDNORM;
+
+	return 0;
+}
+
+#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_MMAP_DEFAULT
+static int hpet_mmap_enabled = 1;
+#else
+static int hpet_mmap_enabled = 0;
+#endif
+
+static __init int hpet_mmap_enable(char *str)
+{
+	get_option(&str, &hpet_mmap_enabled);
+	pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
+	return 1;
+}
+__setup("hpet_mmap", hpet_mmap_enable);
+
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct hpet_dev *devp;
+	unsigned long addr;
+
+	if (!hpet_mmap_enabled)
+		return -EACCES;
+
+	devp = file->private_data;
+	addr = devp->hd_hpets->hp_hpet_phys;
+
+	if (addr & (PAGE_SIZE - 1))
+		return -ENOSYS;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	return vm_iomap_memory(vma, addr, PAGE_SIZE);
+}
+#else
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return -ENOSYS;
+}
+#endif
+
+static int hpet_fasync(int fd, struct file *file, int on)
+{
+	struct hpet_dev *devp;
+
+	devp = file->private_data;
+
+	if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
+		return 0;
+	else
+		return -EIO;
+}
+
+static int hpet_release(struct inode *inode, struct file *file)
+{
+	struct hpet_dev *devp;
+	struct hpet_timer __iomem *timer;
+	int irq = 0;
+
+	devp = file->private_data;
+	timer = devp->hd_timer;
+
+	spin_lock_irq(&hpet_lock);
+
+	writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
+	       &timer->hpet_config);
+
+	irq = devp->hd_irq;
+	devp->hd_irq = 0;
+
+	devp->hd_ireqfreq = 0;
+
+	if (devp->hd_flags & HPET_PERIODIC
+	    && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+		unsigned long v;
+
+		v = readq(&timer->hpet_config);
+		v ^= Tn_TYPE_CNF_MASK;
+		writeq(v, &timer->hpet_config);
+	}
+
+	devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
+	spin_unlock_irq(&hpet_lock);
+
+	if (irq)
+		free_irq(irq, devp);
+
+	file->private_data = NULL;
+	return 0;
+}
+
+static int hpet_ioctl_ieon(struct hpet_dev *devp)
+{
+	struct hpet_timer __iomem *timer;
+	struct hpet __iomem *hpet;
+	struct hpets *hpetp;
+	int irq;
+	unsigned long g, v, t, m;
+	unsigned long flags, isr;
+
+	timer = devp->hd_timer;
+	hpet = devp->hd_hpet;
+	hpetp = devp->hd_hpets;
+
+	if (!devp->hd_ireqfreq)
+		return -EIO;
+
+	spin_lock_irq(&hpet_lock);
+
+	if (devp->hd_flags & HPET_IE) {
+		spin_unlock_irq(&hpet_lock);
+		return -EBUSY;
+	}
+
+	devp->hd_flags |= HPET_IE;
+
+	if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
+		devp->hd_flags |= HPET_SHARED_IRQ;
+	spin_unlock_irq(&hpet_lock);
+
+	irq = devp->hd_hdwirq;
+
+	if (irq) {
+		unsigned long irq_flags;
+
+		if (devp->hd_flags & HPET_SHARED_IRQ) {
+			/*
+			 * To prevent the interrupt handler from seeing an
+			 * unwanted interrupt status bit, program the timer
+			 * so that it will not fire in the near future ...
+			 */
+			writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+			       &timer->hpet_config);
+			write_counter(read_counter(&hpet->hpet_mc),
+				      &timer->hpet_compare);
+			/* ... and clear any left-over status. */
+			isr = 1 << (devp - devp->hd_hpets->hp_dev);
+			writel(isr, &hpet->hpet_isr);
+		}
+
+		sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
+		irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0;
+		if (request_irq(irq, hpet_interrupt, irq_flags,
+				devp->hd_name, (void *)devp)) {
+			printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
+			irq = 0;
+		}
+	}
+
+	if (irq == 0) {
+		spin_lock_irq(&hpet_lock);
+		devp->hd_flags ^= HPET_IE;
+		spin_unlock_irq(&hpet_lock);
+		return -EIO;
+	}
+
+	devp->hd_irq = irq;
+	t = devp->hd_ireqfreq;
+	v = readq(&timer->hpet_config);
+
+	/* 64-bit comparators are not yet supported through the ioctls,
+	 * so force this into 32-bit mode if it supports both modes
+	 */
+	g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
+
+	if (devp->hd_flags & HPET_PERIODIC) {
+		g |= Tn_TYPE_CNF_MASK;
+		v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
+		writeq(v, &timer->hpet_config);
+		local_irq_save(flags);
+
+		/*
+		 * NOTE: First we modify the hidden accumulator
+		 * register supported by periodic-capable comparators.
+		 * We never want to modify the (single) counter; that
+		 * would affect all the comparators. The value written
+		 * is the counter value when the first interrupt is due.
+		 */
+		m = read_counter(&hpet->hpet_mc);
+		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+		/*
+		 * Then we modify the comparator, indicating the period
+		 * for subsequent interrupt.
+		 */
+		write_counter(t, &timer->hpet_compare);
+	} else {
+		local_irq_save(flags);
+		m = read_counter(&hpet->hpet_mc);
+		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+	}
+
+	if (devp->hd_flags & HPET_SHARED_IRQ) {
+		isr = 1 << (devp - devp->hd_hpets->hp_dev);
+		writel(isr, &hpet->hpet_isr);
+	}
+	writeq(g, &timer->hpet_config);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+/* converts Hz to number of timer ticks */
+static inline unsigned long hpet_time_div(struct hpets *hpets,
+					  unsigned long dis)
+{
+	unsigned long long m;
+
+	m = hpets->hp_tick_freq + (dis >> 1);
+	do_div(m, dis);
+	return (unsigned long)m;
+}
+
+static int
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+		  struct hpet_info *info)
+{
+	struct hpet_timer __iomem *timer;
+	struct hpets *hpetp;
+	int err;
+	unsigned long v;
+
+	switch (cmd) {
+	case HPET_IE_OFF:
+	case HPET_INFO:
+	case HPET_EPI:
+	case HPET_DPI:
+	case HPET_IRQFREQ:
+		timer = devp->hd_timer;
+		hpetp = devp->hd_hpets;
+		break;
+	case HPET_IE_ON:
+		return hpet_ioctl_ieon(devp);
+	default:
+		return -EINVAL;
+	}
+
+	err = 0;
+
+	switch (cmd) {
+	case HPET_IE_OFF:
+		if ((devp->hd_flags & HPET_IE) == 0)
+			break;
+		v = readq(&timer->hpet_config);
+		v &= ~Tn_INT_ENB_CNF_MASK;
+		writeq(v, &timer->hpet_config);
+		if (devp->hd_irq) {
+			free_irq(devp->hd_irq, devp);
+			devp->hd_irq = 0;
+		}
+		devp->hd_flags ^= HPET_IE;
+		break;
+	case HPET_INFO:
+		{
+			memset(info, 0, sizeof(*info));
+			if (devp->hd_ireqfreq)
+				info->hi_ireqfreq =
+					hpet_time_div(hpetp, devp->hd_ireqfreq);
+			info->hi_flags =
+			    readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
+			info->hi_hpet = hpetp->hp_which;
+			info->hi_timer = devp - hpetp->hp_dev;
+			break;
+		}
+	case HPET_EPI:
+		v = readq(&timer->hpet_config);
+		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+			err = -ENXIO;
+			break;
+		}
+		devp->hd_flags |= HPET_PERIODIC;
+		break;
+	case HPET_DPI:
+		v = readq(&timer->hpet_config);
+		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+			err = -ENXIO;
+			break;
+		}
+		if (devp->hd_flags & HPET_PERIODIC &&
+		    readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+			v = readq(&timer->hpet_config);
+			v ^= Tn_TYPE_CNF_MASK;
+			writeq(v, &timer->hpet_config);
+		}
+		devp->hd_flags &= ~HPET_PERIODIC;
+		break;
+	case HPET_IRQFREQ:
+		if ((arg > hpet_max_freq) &&
+		    !capable(CAP_SYS_RESOURCE)) {
+			err = -EACCES;
+			break;
+		}
+
+		if (!arg) {
+			err = -EINVAL;
+			break;
+		}
+
+		devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
+	}
+
+	return err;
+}
+
+static long
+hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct hpet_info info;
+	int err;
+
+	mutex_lock(&hpet_mutex);
+	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+	mutex_unlock(&hpet_mutex);
+
+	if ((cmd == HPET_INFO) && !err &&
+	    (copy_to_user((void __user *)arg, &info, sizeof(info))))
+		err = -EFAULT;
+
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_hpet_info {
+	compat_ulong_t hi_ireqfreq;	/* Hz */
+	compat_ulong_t hi_flags;	/* information */
+	unsigned short hi_hpet;
+	unsigned short hi_timer;
+};
+
+static long
+hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct hpet_info info;
+	int err;
+
+	mutex_lock(&hpet_mutex);
+	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+	mutex_unlock(&hpet_mutex);
+
+	if ((cmd == HPET_INFO) && !err) {
+		struct compat_hpet_info __user *u = compat_ptr(arg);
+		if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
+		    put_user(info.hi_flags, &u->hi_flags) ||
+		    put_user(info.hi_hpet, &u->hi_hpet) ||
+		    put_user(info.hi_timer, &u->hi_timer))
+			err = -EFAULT;
+	}
+
+	return err;
+}
+#endif
+
+static const struct file_operations hpet_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read = hpet_read,
+	.poll = hpet_poll,
+	.unlocked_ioctl = hpet_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = hpet_compat_ioctl,
+#endif
+	.open = hpet_open,
+	.release = hpet_release,
+	.fasync = hpet_fasync,
+	.mmap = hpet_mmap,
+};
+
+static int hpet_is_known(struct hpet_data *hdp)
+{
+	struct hpets *hpetp;
+
+	for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
+		if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
+			return 1;
+
+	return 0;
+}
+
+static struct ctl_table hpet_table[] = {
+	{
+	 .procname = "max-user-freq",
+	 .data = &hpet_max_freq,
+	 .maxlen = sizeof(int),
+	 .mode = 0644,
+	 .proc_handler = proc_dointvec,
+	 },
+	{}
+};
+
+static struct ctl_table hpet_root[] = {
+	{
+	 .procname = "hpet",
+	 .maxlen = 0,
+	 .mode = 0555,
+	 .child = hpet_table,
+	 },
+	{}
+};
+
+static struct ctl_table dev_root[] = {
+	{
+	 .procname = "dev",
+	 .maxlen = 0,
+	 .mode = 0555,
+	 .child = hpet_root,
+	 },
+	{}
+};
+
+static struct ctl_table_header *sysctl_header;
+
+/*
+ * Adjustment for when arming the timer with
+ * initial conditions.  That is, main counter
+ * ticks expired before interrupts are enabled.
+ */
+#define	TICK_CALIBRATE	(1000UL)
+
+static unsigned long __hpet_calibrate(struct hpets *hpetp)
+{
+	struct hpet_timer __iomem *timer = NULL;
+	unsigned long t, m, count, i, flags, start;
+	struct hpet_dev *devp;
+	int j;
+	struct hpet __iomem *hpet;
+
+	for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
+		if ((devp->hd_flags & HPET_OPEN) == 0) {
+			timer = devp->hd_timer;
+			break;
+		}
+
+	if (!timer)
+		return 0;
+
+	hpet = hpetp->hp_hpet;
+	t = read_counter(&timer->hpet_compare);
+
+	i = 0;
+	count = hpet_time_div(hpetp, TICK_CALIBRATE);
+
+	local_irq_save(flags);
+
+	start = read_counter(&hpet->hpet_mc);
+
+	do {
+		m = read_counter(&hpet->hpet_mc);
+		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+	} while (i++, (m - start) < count);
+
+	local_irq_restore(flags);
+
+	return (m - start) / i;
+}
+
+static unsigned long hpet_calibrate(struct hpets *hpetp)
+{
+	unsigned long ret = ~0UL;
+	unsigned long tmp;
+
+	/*
+	 * Try to calibrate until return value becomes stable small value.
+	 * If SMI interruption occurs in calibration loop, the return value
+	 * will be big. This avoids its impact.
+	 */
+	for ( ; ; ) {
+		tmp = __hpet_calibrate(hpetp);
+		if (ret <= tmp)
+			break;
+		ret = tmp;
+	}
+
+	return ret;
+}
+
+int hpet_alloc(struct hpet_data *hdp)
+{
+	u64 cap, mcfg;
+	struct hpet_dev *devp;
+	u32 i, ntimer;
+	struct hpets *hpetp;
+	size_t siz;
+	struct hpet __iomem *hpet;
+	static struct hpets *last;
+	unsigned long period;
+	unsigned long long temp;
+	u32 remainder;
+
+	/*
+	 * hpet_alloc can be called by platform dependent code.
+	 * If platform dependent code has allocated the hpet that
+	 * ACPI has also reported, then we catch it here.
+	 */
+	if (hpet_is_known(hdp)) {
+		printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
+			__func__);
+		return 0;
+	}
+
+	siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
+				      sizeof(struct hpet_dev));
+
+	hpetp = kzalloc(siz, GFP_KERNEL);
+
+	if (!hpetp)
+		return -ENOMEM;
+
+	hpetp->hp_which = hpet_nhpet++;
+	hpetp->hp_hpet = hdp->hd_address;
+	hpetp->hp_hpet_phys = hdp->hd_phys_address;
+
+	hpetp->hp_ntimer = hdp->hd_nirqs;
+
+	for (i = 0; i < hdp->hd_nirqs; i++)
+		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
+
+	hpet = hpetp->hp_hpet;
+
+	cap = readq(&hpet->hpet_cap);
+
+	ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
+
+	if (hpetp->hp_ntimer != ntimer) {
+		printk(KERN_WARNING "hpet: number irqs doesn't agree"
+		       " with number of timers\n");
+		kfree(hpetp);
+		return -ENODEV;
+	}
+
+	if (last)
+		last->hp_next = hpetp;
+	else
+		hpets = hpetp;
+
+	last = hpetp;
+
+	period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
+		HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
+	temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
+	temp += period >> 1; /* round */
+	do_div(temp, period);
+	hpetp->hp_tick_freq = temp; /* ticks per second */
+
+	printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+		hpetp->hp_which, hdp->hd_phys_address,
+		hpetp->hp_ntimer > 1 ? "s" : "");
+	for (i = 0; i < hpetp->hp_ntimer; i++)
+		printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+	printk(KERN_CONT "\n");
+
+	temp = hpetp->hp_tick_freq;
+	remainder = do_div(temp, 1000000);
+	printk(KERN_INFO
+		"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
+		hpetp->hp_which, hpetp->hp_ntimer,
+		cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
+		(unsigned) temp, remainder);
+
+	mcfg = readq(&hpet->hpet_config);
+	if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
+		write_counter(0L, &hpet->hpet_mc);
+		mcfg |= HPET_ENABLE_CNF_MASK;
+		writeq(mcfg, &hpet->hpet_config);
+	}
+
+	for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
+		struct hpet_timer __iomem *timer;
+
+		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
+
+		devp->hd_hpets = hpetp;
+		devp->hd_hpet = hpet;
+		devp->hd_timer = timer;
+
+		/*
+		 * If the timer was reserved by platform code,
+		 * then make timer unavailable for opens.
+		 */
+		if (hdp->hd_state & (1 << i)) {
+			devp->hd_flags = HPET_OPEN;
+			continue;
+		}
+
+		init_waitqueue_head(&devp->hd_waitqueue);
+	}
+
+	hpetp->hp_delta = hpet_calibrate(hpetp);
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+	if (!hpet_clocksource) {
+		hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
+		clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
+		clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
+		hpetp->hp_clocksource = &clocksource_hpet;
+		hpet_clocksource = &clocksource_hpet;
+	}
+#endif
+
+	return 0;
+}
+
+static acpi_status hpet_resources(struct acpi_resource *res, void *data)
+{
+	struct hpet_data *hdp;
+	acpi_status status;
+	struct acpi_resource_address64 addr;
+
+	hdp = data;
+
+	status = acpi_resource_to_address64(res, &addr);
+
+	if (ACPI_SUCCESS(status)) {
+		hdp->hd_phys_address = addr.address.minimum;
+		hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+
+		if (hpet_is_known(hdp)) {
+			iounmap(hdp->hd_address);
+			return AE_ALREADY_EXISTS;
+		}
+	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
+		struct acpi_resource_fixed_memory32 *fixmem32;
+
+		fixmem32 = &res->data.fixed_memory32;
+
+		hdp->hd_phys_address = fixmem32->address;
+		hdp->hd_address = ioremap(fixmem32->address,
+						HPET_RANGE_SIZE);
+
+		if (hpet_is_known(hdp)) {
+			iounmap(hdp->hd_address);
+			return AE_ALREADY_EXISTS;
+		}
+	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
+		struct acpi_resource_extended_irq *irqp;
+		int i, irq;
+
+		irqp = &res->data.extended_irq;
+
+		for (i = 0; i < irqp->interrupt_count; i++) {
+			if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
+				break;
+
+			irq = acpi_register_gsi(NULL, irqp->interrupts[i],
+				      irqp->triggering, irqp->polarity);
+			if (irq < 0)
+				return AE_ERROR;
+
+			hdp->hd_irq[hdp->hd_nirqs] = irq;
+			hdp->hd_nirqs++;
+		}
+	}
+
+	return AE_OK;
+}
+
+static int hpet_acpi_add(struct acpi_device *device)
+{
+	acpi_status result;
+	struct hpet_data data;
+
+	memset(&data, 0, sizeof(data));
+
+	result =
+	    acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+				hpet_resources, &data);
+
+	if (ACPI_FAILURE(result))
+		return -ENODEV;
+
+	if (!data.hd_address || !data.hd_nirqs) {
+		if (data.hd_address)
+			iounmap(data.hd_address);
+		printk("%s: no address or irqs in _CRS\n", __func__);
+		return -ENODEV;
+	}
+
+	return hpet_alloc(&data);
+}
+
+static const struct acpi_device_id hpet_device_ids[] = {
+	{"PNP0103", 0},
+	{"", 0},
+};
+
+static struct acpi_driver hpet_acpi_driver = {
+	.name = "hpet",
+	.ids = hpet_device_ids,
+	.ops = {
+		.add = hpet_acpi_add,
+		},
+};
+
+static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
+
+static int __init hpet_init(void)
+{
+	int result;
+
+	result = misc_register(&hpet_misc);
+	if (result < 0)
+		return -ENODEV;
+
+	sysctl_header = register_sysctl_table(dev_root);
+
+	result = acpi_bus_register_driver(&hpet_acpi_driver);
+	if (result < 0) {
+		if (sysctl_header)
+			unregister_sysctl_table(sysctl_header);
+		misc_deregister(&hpet_misc);
+		return result;
+	}
+
+	return 0;
+}
+device_initcall(hpet_init);
+
+/*
+MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
new file mode 100644
index 0000000..dac895d
--- /dev/null
+++ b/drivers/char/hw_random/Kconfig
@@ -0,0 +1,449 @@
+#
+# Hardware Random Number Generator (RNG) configuration
+#
+
+menuconfig HW_RANDOM
+	tristate "Hardware Random Number Generator Core support"
+	default m
+	---help---
+	  Hardware Random Number Generator Core infrastructure.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rng-core.  This provides a device
+	  that's usually called /dev/hwrng, and which exposes one
+	  of possibly several hardware random number generators.
+
+	  These hardware random number generators do feed into the
+	  kernel's random number generator entropy pool.
+
+	  If unsure, say Y.
+
+if HW_RANDOM
+
+config HW_RANDOM_TIMERIOMEM
+	tristate "Timer IOMEM HW Random Number Generator support"
+	depends on HAS_IOMEM
+	---help---
+	  This driver provides kernel-side support for a generic Random
+	  Number Generator used by reading a 'dumb' iomem address that
+	  is to be read no faster than, for example, once a second;
+	  the default FPGA bitstream on the TS-7800 has such functionality.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called timeriomem-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_INTEL
+	tristate "Intel HW Random Number Generator support"
+	depends on (X86 || IA64) && PCI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Intel i8xx-based motherboards.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called intel-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_AMD
+	tristate "AMD HW Random Number Generator support"
+	depends on (X86 || PPC_MAPLE) && PCI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on AMD 76x-based motherboards.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called amd-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_ATMEL
+	tristate "Atmel Random Number Generator support"
+	depends on ARCH_AT91 && HAVE_CLK && OF
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Atmel AT91 devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called atmel-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_BCM2835
+	tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+	depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+		   ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called bcm2835-rng
+
+	  If unsure, say Y.
+
+config HW_RANDOM_IPROC_RNG200
+	tristate "Broadcom iProc/STB RNG200 support"
+	depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the RNG200
+	  hardware found on the Broadcom iProc and STB SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called iproc-rng200
+
+	  If unsure, say Y.
+
+config HW_RANDOM_GEODE
+	tristate "AMD Geode HW Random Number Generator support"
+	depends on X86_32 && PCI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on the AMD Geode LX.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called geode-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_N2RNG
+	tristate "Niagara2 Random Number Generator support"
+	depends on SPARC64
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Niagara2 cpus.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called n2-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_VIA
+	tristate "VIA HW Random Number Generator support"
+	depends on X86
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on VIA based motherboards.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called via-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_IXP4XX
+	tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
+	depends on ARCH_IXP4XX
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Pseudo-Random
+	  Number Generator hardware found on the Intel IXP45x/46x NPU.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ixp4xx-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_OMAP
+	tristate "OMAP Random Number Generator support"
+	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
+	default HW_RANDOM
+ 	---help---
+ 	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on OMAP16xx, OMAP2/3/4/5, AM33xx/AM43xx
+	  multimedia processors, and Marvell Armada 7k/8k SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called omap-rng.
+
+ 	  If unsure, say Y.
+
+config HW_RANDOM_OMAP3_ROM
+	tristate "OMAP3 ROM Random Number Generator support"
+	depends on ARCH_OMAP3
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on OMAP34xx processors.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called omap3-rom-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_OCTEON
+	tristate "Octeon Random Number Generator support"
+	depends on CAVIUM_OCTEON_SOC
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Octeon processors.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called octeon-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_PASEMI
+	tristate "PA Semi HW Random Number Generator support"
+	depends on PPC_PASEMI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on PA Semi PWRficient SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pasemi-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_VIRTIO
+	tristate "VirtIO Random Number Generator support"
+	depends on VIRTIO
+	---help---
+	  This driver provides kernel-side support for the virtual Random Number
+	  Generator hardware.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called virtio-rng.  If unsure, say N.
+
+config HW_RANDOM_TX4939
+	tristate "TX4939 Random Number Generator support"
+	depends on SOC_TX4939
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on TX4939 SoC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tx4939-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_MXC_RNGA
+	tristate "Freescale i.MX RNGA Random Number Generator"
+	depends on SOC_IMX31
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Freescale i.MX processors.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mxc-rnga.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_IMX_RNGC
+	tristate "Freescale i.MX RNGC Random Number Generator"
+	depends on ARCH_MXC
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator Version C hardware found on some Freescale i.MX
+	  processors. Version B is also supported by this driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called imx-rngc.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_NOMADIK
+	tristate "ST-Ericsson Nomadik Random Number Generator support"
+	depends on ARCH_NOMADIK
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nomadik-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_PSERIES
+	tristate "pSeries HW Random Number Generator support"
+	depends on PPC64 && IBMVIO
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on POWER7+ machines and above
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pseries-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_POWERNV
+	tristate "PowerNV Random Number Generator support"
+	depends on PPC_POWERNV
+	default HW_RANDOM
+	---help---
+	  This is the driver for Random Number Generator hardware found
+	  in POWER7+ and above machines for PowerNV platform.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called powernv-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_HISI
+	tristate "Hisilicon Random Number Generator support"
+	depends on HW_RANDOM && ARCH_HISI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hisi-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_ST
+	tristate "ST Microelectronics HW Random Number Generator support"
+	depends on HW_RANDOM && ARCH_STI
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on STi series of SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called st-rng.
+
+config HW_RANDOM_XGENE
+	tristate "APM X-Gene True Random Number Generator (TRNG) support"
+	depends on HW_RANDOM && ARCH_XGENE
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on APM X-Gene SoC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called xgene_rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_STM32
+	tristate "STMicroelectronics STM32 random number generator"
+	depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+	depends on HAS_IOMEM
+	default HW_RANDOM
+	help
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on STM32 microcontrollers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called stm32-rng.
+
+	  If unsure, say N.
+
+config HW_RANDOM_PIC32
+	tristate "Microchip PIC32 Random Number Generator support"
+	depends on HW_RANDOM && MACH_PIC32
+	default y
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on a PIC32.
+
+	  To compile this driver as a module, choose M here. the
+	  module will be called pic32-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_MESON
+	tristate "Amlogic Meson Random Number Generator support"
+	depends on HW_RANDOM
+	depends on ARCH_MESON || COMPILE_TEST
+	default y
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Amlogic Meson SoCs.
+
+	  To compile this driver as a module, choose M here. the
+	  module will be called meson-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_CAVIUM
+       tristate "Cavium ThunderX Random Number Generator support"
+       depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+       default HW_RANDOM
+       ---help---
+         This driver provides kernel-side support for the Random Number
+         Generator hardware found on Cavium SoCs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cavium_rng.
+
+         If unsure, say Y.
+
+config HW_RANDOM_MTK
+	tristate "Mediatek Random Number Generator support"
+	depends on HW_RANDOM
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	default y
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Mediatek SoCs.
+
+	  To compile this driver as a module, choose M here. the
+	  module will be called mtk-rng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_S390
+	tristate "S390 True Random Number Generator support"
+	depends on S390
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the True
+	  Random Number Generator available as CPACF extension
+	  on modern s390 hardware platforms.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called s390-trng.
+
+	  If unsure, say Y.
+
+config HW_RANDOM_EXYNOS
+	tristate "Samsung Exynos True Random Number Generator support"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	default HW_RANDOM
+	---help---
+	  This driver provides support for the True Random Number
+	  Generator available in Exynos SoCs.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called exynos-trng.
+
+	  If unsure, say Y.
+endif # HW_RANDOM
+
+config UML_RANDOM
+	depends on UML
+	tristate "Hardware random number generator"
+	help
+	  This option enables UML's "hardware" random number generator.  It
+	  attaches itself to the host's /dev/random, supplying as much entropy
+	  as the host has, rather than the small amount the UML gets from its
+	  own drivers.  It registers itself as a standard hardware random number
+	  generator, major 10, minor 183, and the canonical device name is
+	  /dev/hwrng.
+	  The way to make use of this is to install the rng-tools package
+	  (check your distro, or download from
+	  http://sourceforge.net/projects/gkernel/).  rngd periodically reads
+	  /dev/hwrng and injects the entropy into /dev/random.
+
+config HW_RANDOM_KEYSTONE
+	depends on ARCH_KEYSTONE
+	default HW_RANDOM
+	tristate "TI Keystone NETCP SA Hardware random number generator"
+	help
+	  This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
new file mode 100644
index 0000000..e35ec3c
--- /dev/null
+++ b/drivers/char/hw_random/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for HW Random Number Generator (RNG) device drivers.
+#
+
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
+obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
+obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
+obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
+obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
+obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
+n2-rng-y := n2-drv.o n2-asm.o
+obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
+obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
+obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
+obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
+obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
+obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
+obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
+obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
+obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
+obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
+obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
+obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
+obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
+obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
+obj-$(CONFIG_HW_RANDOM_MTK)	+= mtk-rng.o
+obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
+obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
new file mode 100644
index 0000000..9959c76
--- /dev/null
+++ b/drivers/char/hw_random/amd-rng.c
@@ -0,0 +1,211 @@
+/*
+ * RNG driver for AMD RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define DRV_NAME "AMD768-HWRNG"
+
+#define RNGDATA		0x00
+#define RNGDONE		0x04
+#define PMBASE_OFFSET	0xF0
+#define PMBASE_SIZE	8
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+	{ PCI_VDEVICE(AMD, 0x7443), 0, },
+	{ PCI_VDEVICE(AMD, 0x746b), 0, },
+	{ 0, },	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+struct amd768_priv {
+	void __iomem *iobase;
+	struct pci_dev *pcidev;
+	u32 pmbase;
+};
+
+static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+	u32 *data = buf;
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+	size_t read = 0;
+	/* We will wait at maximum one time per read */
+	int timeout = max / 4 + 1;
+
+	/*
+	 * RNG data is available when RNGDONE is set to 1
+	 * New random numbers are generated approximately 128 microseconds
+	 * after RNGDATA is read
+	 */
+	while (read < max) {
+		if (ioread32(priv->iobase + RNGDONE) == 0) {
+			if (wait) {
+				/* Delay given by datasheet */
+				usleep_range(128, 196);
+				if (timeout-- == 0)
+					return read;
+			} else {
+				return 0;
+			}
+		} else {
+			*data = ioread32(priv->iobase + RNGDATA);
+			data++;
+			read += 4;
+		}
+	}
+
+	return read;
+}
+
+static int amd_rng_init(struct hwrng *rng)
+{
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+	u8 rnen;
+
+	pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+	rnen |= BIT(7);	/* RNG on */
+	pci_write_config_byte(priv->pcidev, 0x40, rnen);
+
+	pci_read_config_byte(priv->pcidev, 0x41, &rnen);
+	rnen |= BIT(7);	/* PMIO enable */
+	pci_write_config_byte(priv->pcidev, 0x41, rnen);
+
+	return 0;
+}
+
+static void amd_rng_cleanup(struct hwrng *rng)
+{
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+	u8 rnen;
+
+	pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+	rnen &= ~BIT(7);	/* RNG off */
+	pci_write_config_byte(priv->pcidev, 0x40, rnen);
+}
+
+static struct hwrng amd_rng = {
+	.name		= "amd",
+	.init		= amd_rng_init,
+	.cleanup	= amd_rng_cleanup,
+	.read		= amd_rng_read,
+};
+
+static int __init mod_init(void)
+{
+	int err = -ENODEV;
+	struct pci_dev *pdev = NULL;
+	const struct pci_device_id *ent;
+	u32 pmbase;
+	struct amd768_priv *priv;
+
+	for_each_pci_dev(pdev) {
+		ent = pci_match_id(pci_tbl, pdev);
+		if (ent)
+			goto found;
+	}
+	/* Device not found. */
+	return -ENODEV;
+
+found:
+	err = pci_read_config_dword(pdev, 0x58, &pmbase);
+	if (err)
+		return err;
+
+	pmbase &= 0x0000FF00;
+	if (pmbase == 0)
+		return -EIO;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+		dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+			pmbase + 0xF0);
+		err = -EBUSY;
+		goto out;
+	}
+
+	priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+	if (!priv->iobase) {
+		pr_err(DRV_NAME "Cannot map ioport\n");
+		err = -EINVAL;
+		goto err_iomap;
+	}
+
+	amd_rng.priv = (unsigned long)priv;
+	priv->pmbase = pmbase;
+	priv->pcidev = pdev;
+
+	pr_info(DRV_NAME " detected\n");
+	err = hwrng_register(&amd_rng);
+	if (err) {
+		pr_err(DRV_NAME " registering failed (%d)\n", err);
+		goto err_hwrng;
+	}
+	return 0;
+
+err_hwrng:
+	ioport_unmap(priv->iobase);
+err_iomap:
+	release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+	kfree(priv);
+	return err;
+}
+
+static void __exit mod_exit(void)
+{
+	struct amd768_priv *priv;
+
+	priv = (struct amd768_priv *)amd_rng.priv;
+
+	hwrng_unregister(&amd_rng);
+
+	ioport_unmap(priv->iobase);
+
+	release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+	kfree(priv);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("The Linux Kernel team");
+MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
new file mode 100644
index 0000000..4334262
--- /dev/null
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2011 Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#define TRNG_CR		0x00
+#define TRNG_ISR	0x1c
+#define TRNG_ODATA	0x50
+
+#define TRNG_KEY	0x524e4700 /* RNG */
+
+struct atmel_trng {
+	struct clk *clk;
+	void __iomem *base;
+	struct hwrng rng;
+};
+
+static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+			   bool wait)
+{
+	struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
+	u32 *data = buf;
+
+	/* data ready? */
+	if (readl(trng->base + TRNG_ISR) & 1) {
+		*data = readl(trng->base + TRNG_ODATA);
+		/*
+		  ensure data ready is only set again AFTER the next data
+		  word is ready in case it got set between checking ISR
+		  and reading ODATA, so we don't risk re-reading the
+		  same word
+		*/
+		readl(trng->base + TRNG_ISR);
+		return 4;
+	} else
+		return 0;
+}
+
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+	writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+	writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
+static int atmel_trng_probe(struct platform_device *pdev)
+{
+	struct atmel_trng *trng;
+	struct resource *res;
+	int ret;
+
+	trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+	if (!trng)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	trng->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(trng->base))
+		return PTR_ERR(trng->base);
+
+	trng->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(trng->clk))
+		return PTR_ERR(trng->clk);
+
+	ret = clk_prepare_enable(trng->clk);
+	if (ret)
+		return ret;
+
+	atmel_trng_enable(trng);
+	trng->rng.name = pdev->name;
+	trng->rng.read = atmel_trng_read;
+
+	ret = hwrng_register(&trng->rng);
+	if (ret)
+		goto err_register;
+
+	platform_set_drvdata(pdev, trng);
+
+	return 0;
+
+err_register:
+	clk_disable_unprepare(trng->clk);
+	return ret;
+}
+
+static int atmel_trng_remove(struct platform_device *pdev)
+{
+	struct atmel_trng *trng = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&trng->rng);
+
+	atmel_trng_disable(trng);
+	clk_disable_unprepare(trng->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_trng_suspend(struct device *dev)
+{
+	struct atmel_trng *trng = dev_get_drvdata(dev);
+
+	atmel_trng_disable(trng);
+	clk_disable_unprepare(trng->clk);
+
+	return 0;
+}
+
+static int atmel_trng_resume(struct device *dev)
+{
+	struct atmel_trng *trng = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(trng->clk);
+	if (ret)
+		return ret;
+
+	atmel_trng_enable(trng);
+
+	return 0;
+}
+
+static const struct dev_pm_ops atmel_trng_pm_ops = {
+	.suspend	= atmel_trng_suspend,
+	.resume		= atmel_trng_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct of_device_id atmel_trng_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g45-trng" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
+
+static struct platform_driver atmel_trng_driver = {
+	.probe		= atmel_trng_probe,
+	.remove		= atmel_trng_remove,
+	.driver		= {
+		.name	= "atmel-trng",
+#ifdef CONFIG_PM
+		.pm	= &atmel_trng_pm_ops,
+#endif /* CONFIG_PM */
+		.of_match_table = atmel_trng_dt_ids,
+	},
+};
+
+module_platform_driver(atmel_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Atmel true random number generator driver");
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
new file mode 100644
index 0000000..6767d96
--- /dev/null
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -0,0 +1,214 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ * Copyright (c) 2013 Lubomir Rintel
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License ("GPL")
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/clk.h>
+
+#define RNG_CTRL	0x0
+#define RNG_STATUS	0x4
+#define RNG_DATA	0x8
+#define RNG_INT_MASK	0x10
+
+/* enable rng */
+#define RNG_RBGEN	0x1
+
+/* the initial numbers generated are "less random" so will be discarded */
+#define RNG_WARMUP_COUNT 0x40000
+
+#define RNG_INT_OFF	0x1
+
+struct bcm2835_rng_priv {
+	struct hwrng rng;
+	void __iomem *base;
+	bool mask_interrupts;
+	struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
+{
+	return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+	/* MIPS chips strapped for BE will automagically configure the
+	 * peripheral registers for CPU-native byte order.
+	 */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		return __raw_readl(priv->base + offset);
+	else
+		return readl(priv->base + offset);
+}
+
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+			      u32 offset)
+{
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		__raw_writel(val, priv->base + offset);
+	else
+		writel(val, priv->base + offset);
+}
+
+static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+			       bool wait)
+{
+	struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+	u32 max_words = max / sizeof(u32);
+	u32 num_words, count;
+
+	while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+		if (!wait)
+			return 0;
+		cpu_relax();
+	}
+
+	num_words = rng_readl(priv, RNG_STATUS) >> 24;
+	if (num_words > max_words)
+		num_words = max_words;
+
+	for (count = 0; count < num_words; count++)
+		((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
+
+	return num_words * sizeof(u32);
+}
+
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+	struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+	int ret = 0;
+	u32 val;
+
+	if (!IS_ERR(priv->clk)) {
+		ret = clk_prepare_enable(priv->clk);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->mask_interrupts) {
+		/* mask the interrupt */
+		val = rng_readl(priv, RNG_INT_MASK);
+		val |= RNG_INT_OFF;
+		rng_writel(priv, val, RNG_INT_MASK);
+	}
+
+	/* set warm-up count & enable */
+	rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+	rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+	return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+	struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+	/* disable rng hardware */
+	rng_writel(priv, 0, RNG_CTRL);
+
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+	bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+	.mask_interrupts = true,
+};
+
+static const struct of_device_id bcm2835_rng_of_match[] = {
+	{ .compatible = "brcm,bcm2835-rng"},
+	{ .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+	{ .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+	{ .compatible = "brcm,bcm6368-rng"},
+	{},
+};
+
+static int bcm2835_rng_probe(struct platform_device *pdev)
+{
+	const struct bcm2835_rng_of_data *of_data;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const struct of_device_id *rng_id;
+	struct bcm2835_rng_priv *priv;
+	struct resource *r;
+	int err;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	/* map peripheral */
+	priv->base = devm_ioremap_resource(dev, r);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	/* Clock is optional on most platforms */
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	priv->rng.name = pdev->name;
+	priv->rng.init = bcm2835_rng_init;
+	priv->rng.read = bcm2835_rng_read;
+	priv->rng.cleanup = bcm2835_rng_cleanup;
+
+	rng_id = of_match_node(bcm2835_rng_of_match, np);
+	if (!rng_id)
+		return -EINVAL;
+
+	/* Check for rng init function, execute it */
+	of_data = rng_id->data;
+	if (of_data)
+		priv->mask_interrupts = of_data->mask_interrupts;
+
+	/* register driver */
+	err = devm_hwrng_register(dev, &priv->rng);
+	if (err)
+		dev_err(dev, "hwrng registration failed\n");
+	else
+		dev_info(dev, "hwrng registered\n");
+
+	return err;
+}
+
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+
+static struct platform_device_id bcm2835_rng_devtype[] = {
+	{ .name = "bcm2835-rng" },
+	{ .name = "bcm63xx-rng" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
+static struct platform_driver bcm2835_rng_driver = {
+	.driver = {
+		.name = "bcm2835-rng",
+		.of_match_table = bcm2835_rng_of_match,
+	},
+	.probe		= bcm2835_rng_probe,
+	.id_table	= bcm2835_rng_devtype,
+};
+module_platform_driver(bcm2835_rng_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
new file mode 100644
index 0000000..2d1352b
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -0,0 +1,103 @@
+/*
+ * Hardware Random Number Generator support for Cavium, Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+struct cavium_rng {
+	struct hwrng ops;
+	void __iomem *result;
+};
+
+/* Read data from the RNG unit */
+static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
+{
+	struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
+	unsigned int size = max;
+
+	while (size >= 8) {
+		*((u64 *)dat) = readq(p->result);
+		size -= 8;
+		dat += 8;
+	}
+	while (size > 0) {
+		*((u8 *)dat) = readb(p->result);
+		size--;
+		dat++;
+	}
+	return max;
+}
+
+/* Map Cavium RNG to an HWRNG object */
+static int cavium_rng_probe_vf(struct	pci_dev		*pdev,
+			 const struct	pci_device_id	*id)
+{
+	struct	cavium_rng *rng;
+	int	ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	/* Map the RNG result */
+	rng->result = pcim_iomap(pdev, 0, 0);
+	if (!rng->result) {
+		dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
+		return -ENOMEM;
+	}
+
+	rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+				       "cavium-rng-%s", dev_name(&pdev->dev));
+	if (!rng->ops.name)
+		return -ENOMEM;
+
+	rng->ops.read    = cavium_rng_read;
+	rng->ops.quality = 1000;
+
+	pci_set_drvdata(pdev, rng);
+
+	ret = hwrng_register(&rng->ops);
+	if (ret) {
+		dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Remove the VF */
+static void  cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+	struct cavium_rng *rng;
+
+	rng = pci_get_drvdata(pdev);
+	hwrng_unregister(&rng->ops);
+}
+
+static const struct pci_device_id cavium_rng_vf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
+	{0,},
+};
+MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
+
+static struct pci_driver cavium_rng_vf_driver = {
+	.name		= "cavium_rng_vf",
+	.id_table	= cavium_rng_vf_id_table,
+	.probe		= cavium_rng_probe_vf,
+	.remove		= cavium_rng_remove_vf,
+};
+module_pci_driver(cavium_rng_vf_driver);
+
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
new file mode 100644
index 0000000..63d6e68
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -0,0 +1,94 @@
+/*
+ * Hardware Random Number Generator support for Cavium Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define THUNDERX_RNM_ENT_EN     0x1
+#define THUNDERX_RNM_RNG_EN     0x2
+
+struct cavium_rng_pf {
+	void __iomem *control_status;
+};
+
+/* Enable the RNG hardware and activate the VF */
+static int cavium_rng_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id)
+{
+	struct	cavium_rng_pf *rng;
+	int	iov_err;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	/*Map the RNG control */
+	rng->control_status = pcim_iomap(pdev, 0, 0);
+	if (!rng->control_status) {
+		dev_err(&pdev->dev,
+			"Error iomap failed retrieving control_status.\n");
+		return -ENOMEM;
+	}
+
+	/* Enable the RNG hardware and entropy source */
+	writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
+		rng->control_status);
+
+	pci_set_drvdata(pdev, rng);
+
+	/* Enable the Cavium RNG as a VF */
+	iov_err = pci_enable_sriov(pdev, 1);
+	if (iov_err != 0) {
+		/* Disable the RNG hardware and entropy source */
+		writeq(0, rng->control_status);
+		dev_err(&pdev->dev,
+			"Error initializing RNG virtual function,(%i).\n",
+			iov_err);
+		return iov_err;
+	}
+
+	return 0;
+}
+
+/* Disable VF and RNG Hardware */
+static void cavium_rng_remove(struct pci_dev *pdev)
+{
+	struct cavium_rng_pf *rng;
+
+	rng = pci_get_drvdata(pdev);
+
+	/* Remove the VF */
+	pci_disable_sriov(pdev);
+
+	/* Disable the RNG hardware and entropy source */
+	writeq(0, rng->control_status);
+}
+
+static const struct pci_device_id cavium_rng_pf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
+	{0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
+
+static struct pci_driver cavium_rng_pf_driver = {
+	.name		= "cavium_rng_pf",
+	.id_table	= cavium_rng_pf_id_table,
+	.probe		= cavium_rng_probe,
+	.remove		= cavium_rng_remove,
+};
+
+module_pci_driver(cavium_rng_pf_driver);
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
new file mode 100644
index 0000000..aaf9e5a
--- /dev/null
+++ b/drivers/char/hw_random/core.c
@@ -0,0 +1,624 @@
+/*
+ * hw_random/core.c: HWRNG core API
+ *
+ * Copyright 2006 Michael Buesch <m@bues.ch>
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Please read Documentation/hw_random.txt for details on use.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define RNG_MODULE_NAME		"hw_random"
+
+static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
+static struct task_struct *hwrng_fill;
+/* list of registered rngs, sorted decending by quality */
+static LIST_HEAD(rng_list);
+/* Protects rng_list and current_rng */
+static DEFINE_MUTEX(rng_mutex);
+/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
+static DEFINE_MUTEX(reading_mutex);
+static int data_avail;
+static u8 *rng_buffer, *rng_fillbuf;
+static unsigned short current_quality;
+static unsigned short default_quality; /* = 0; default to "off" */
+
+module_param(current_quality, ushort, 0644);
+MODULE_PARM_DESC(current_quality,
+		 "current hwrng entropy estimation per mill");
+module_param(default_quality, ushort, 0644);
+MODULE_PARM_DESC(default_quality,
+		 "default entropy content of hwrng per mill");
+
+static void drop_current_rng(void);
+static int hwrng_init(struct hwrng *rng);
+static void start_khwrngd(void);
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+			       int wait);
+
+static size_t rng_buffer_size(void)
+{
+	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
+
+static void add_early_randomness(struct hwrng *rng)
+{
+	int bytes_read;
+	size_t size = min_t(size_t, 16, rng_buffer_size());
+
+	mutex_lock(&reading_mutex);
+	bytes_read = rng_get_data(rng, rng_buffer, size, 1);
+	mutex_unlock(&reading_mutex);
+	if (bytes_read > 0)
+		add_device_randomness(rng_buffer, bytes_read);
+}
+
+static inline void cleanup_rng(struct kref *kref)
+{
+	struct hwrng *rng = container_of(kref, struct hwrng, ref);
+
+	if (rng->cleanup)
+		rng->cleanup(rng);
+
+	complete(&rng->cleanup_done);
+}
+
+static int set_current_rng(struct hwrng *rng)
+{
+	int err;
+
+	BUG_ON(!mutex_is_locked(&rng_mutex));
+
+	err = hwrng_init(rng);
+	if (err)
+		return err;
+
+	drop_current_rng();
+	current_rng = rng;
+
+	return 0;
+}
+
+static void drop_current_rng(void)
+{
+	BUG_ON(!mutex_is_locked(&rng_mutex));
+	if (!current_rng)
+		return;
+
+	/* decrease last reference for triggering the cleanup */
+	kref_put(&current_rng->ref, cleanup_rng);
+	current_rng = NULL;
+}
+
+/* Returns ERR_PTR(), NULL or refcounted hwrng */
+static struct hwrng *get_current_rng(void)
+{
+	struct hwrng *rng;
+
+	if (mutex_lock_interruptible(&rng_mutex))
+		return ERR_PTR(-ERESTARTSYS);
+
+	rng = current_rng;
+	if (rng)
+		kref_get(&rng->ref);
+
+	mutex_unlock(&rng_mutex);
+	return rng;
+}
+
+static void put_rng(struct hwrng *rng)
+{
+	/*
+	 * Hold rng_mutex here so we serialize in case they set_current_rng
+	 * on rng again immediately.
+	 */
+	mutex_lock(&rng_mutex);
+	if (rng)
+		kref_put(&rng->ref, cleanup_rng);
+	mutex_unlock(&rng_mutex);
+}
+
+static int hwrng_init(struct hwrng *rng)
+{
+	if (kref_get_unless_zero(&rng->ref))
+		goto skip_init;
+
+	if (rng->init) {
+		int ret;
+
+		ret =  rng->init(rng);
+		if (ret)
+			return ret;
+	}
+
+	kref_init(&rng->ref);
+	reinit_completion(&rng->cleanup_done);
+
+skip_init:
+	add_early_randomness(rng);
+
+	current_quality = rng->quality ? : default_quality;
+	if (current_quality > 1024)
+		current_quality = 1024;
+
+	if (current_quality == 0 && hwrng_fill)
+		kthread_stop(hwrng_fill);
+	if (current_quality > 0 && !hwrng_fill)
+		start_khwrngd();
+
+	return 0;
+}
+
+static int rng_dev_open(struct inode *inode, struct file *filp)
+{
+	/* enforce read-only access to this chrdev */
+	if ((filp->f_mode & FMODE_READ) == 0)
+		return -EINVAL;
+	if (filp->f_mode & FMODE_WRITE)
+		return -EINVAL;
+	return 0;
+}
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+			int wait) {
+	int present;
+
+	BUG_ON(!mutex_is_locked(&reading_mutex));
+	if (rng->read)
+		return rng->read(rng, (void *)buffer, size, wait);
+
+	if (rng->data_present)
+		present = rng->data_present(rng, wait);
+	else
+		present = 1;
+
+	if (present)
+		return rng->data_read(rng, (u32 *)buffer);
+
+	return 0;
+}
+
+static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+			    size_t size, loff_t *offp)
+{
+	ssize_t ret = 0;
+	int err = 0;
+	int bytes_read, len;
+	struct hwrng *rng;
+
+	while (size) {
+		rng = get_current_rng();
+		if (IS_ERR(rng)) {
+			err = PTR_ERR(rng);
+			goto out;
+		}
+		if (!rng) {
+			err = -ENODEV;
+			goto out;
+		}
+
+		if (mutex_lock_interruptible(&reading_mutex)) {
+			err = -ERESTARTSYS;
+			goto out_put;
+		}
+		if (!data_avail) {
+			bytes_read = rng_get_data(rng, rng_buffer,
+				rng_buffer_size(),
+				!(filp->f_flags & O_NONBLOCK));
+			if (bytes_read < 0) {
+				err = bytes_read;
+				goto out_unlock_reading;
+			}
+			data_avail = bytes_read;
+		}
+
+		if (!data_avail) {
+			if (filp->f_flags & O_NONBLOCK) {
+				err = -EAGAIN;
+				goto out_unlock_reading;
+			}
+		} else {
+			len = data_avail;
+			if (len > size)
+				len = size;
+
+			data_avail -= len;
+
+			if (copy_to_user(buf + ret, rng_buffer + data_avail,
+								len)) {
+				err = -EFAULT;
+				goto out_unlock_reading;
+			}
+
+			size -= len;
+			ret += len;
+		}
+
+		mutex_unlock(&reading_mutex);
+		put_rng(rng);
+
+		if (need_resched())
+			schedule_timeout_interruptible(1);
+
+		if (signal_pending(current)) {
+			err = -ERESTARTSYS;
+			goto out;
+		}
+	}
+out:
+	return ret ? : err;
+
+out_unlock_reading:
+	mutex_unlock(&reading_mutex);
+out_put:
+	put_rng(rng);
+	goto out;
+}
+
+static const struct file_operations rng_chrdev_ops = {
+	.owner		= THIS_MODULE,
+	.open		= rng_dev_open,
+	.read		= rng_dev_read,
+	.llseek		= noop_llseek,
+};
+
+static const struct attribute_group *rng_dev_groups[];
+
+static struct miscdevice rng_miscdev = {
+	.minor		= HWRNG_MINOR,
+	.name		= RNG_MODULE_NAME,
+	.nodename	= "hwrng",
+	.fops		= &rng_chrdev_ops,
+	.groups		= rng_dev_groups,
+};
+
+static int enable_best_rng(void)
+{
+	int ret = -ENODEV;
+
+	BUG_ON(!mutex_is_locked(&rng_mutex));
+
+	/* rng_list is sorted by quality, use the best (=first) one */
+	if (!list_empty(&rng_list)) {
+		struct hwrng *new_rng;
+
+		new_rng = list_entry(rng_list.next, struct hwrng, list);
+		ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+		if (!ret)
+			cur_rng_set_by_user = 0;
+	} else {
+		drop_current_rng();
+		cur_rng_set_by_user = 0;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static ssize_t hwrng_attr_current_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	int err = -ENODEV;
+	struct hwrng *rng;
+
+	err = mutex_lock_interruptible(&rng_mutex);
+	if (err)
+		return -ERESTARTSYS;
+
+	if (sysfs_streq(buf, "")) {
+		err = enable_best_rng();
+	} else {
+		list_for_each_entry(rng, &rng_list, list) {
+			if (sysfs_streq(rng->name, buf)) {
+				cur_rng_set_by_user = 1;
+				err = set_current_rng(rng);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&rng_mutex);
+
+	return err ? : len;
+}
+
+static ssize_t hwrng_attr_current_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	ssize_t ret;
+	struct hwrng *rng;
+
+	rng = get_current_rng();
+	if (IS_ERR(rng))
+		return PTR_ERR(rng);
+
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
+	put_rng(rng);
+
+	return ret;
+}
+
+static ssize_t hwrng_attr_available_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	int err;
+	struct hwrng *rng;
+
+	err = mutex_lock_interruptible(&rng_mutex);
+	if (err)
+		return -ERESTARTSYS;
+	buf[0] = '\0';
+	list_for_each_entry(rng, &rng_list, list) {
+		strlcat(buf, rng->name, PAGE_SIZE);
+		strlcat(buf, " ", PAGE_SIZE);
+	}
+	strlcat(buf, "\n", PAGE_SIZE);
+	mutex_unlock(&rng_mutex);
+
+	return strlen(buf);
+}
+
+static ssize_t hwrng_attr_selected_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+}
+
+static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
+		   hwrng_attr_current_show,
+		   hwrng_attr_current_store);
+static DEVICE_ATTR(rng_available, S_IRUGO,
+		   hwrng_attr_available_show,
+		   NULL);
+static DEVICE_ATTR(rng_selected, S_IRUGO,
+		   hwrng_attr_selected_show,
+		   NULL);
+
+static struct attribute *rng_dev_attrs[] = {
+	&dev_attr_rng_current.attr,
+	&dev_attr_rng_available.attr,
+	&dev_attr_rng_selected.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(rng_dev);
+
+static void __exit unregister_miscdev(void)
+{
+	misc_deregister(&rng_miscdev);
+}
+
+static int __init register_miscdev(void)
+{
+	return misc_register(&rng_miscdev);
+}
+
+static int hwrng_fillfn(void *unused)
+{
+	long rc;
+
+	while (!kthread_should_stop()) {
+		struct hwrng *rng;
+
+		rng = get_current_rng();
+		if (IS_ERR(rng) || !rng)
+			break;
+		mutex_lock(&reading_mutex);
+		rc = rng_get_data(rng, rng_fillbuf,
+				  rng_buffer_size(), 1);
+		mutex_unlock(&reading_mutex);
+		put_rng(rng);
+		if (rc <= 0) {
+			pr_warn("hwrng: no data available\n");
+			msleep_interruptible(10000);
+			continue;
+		}
+		/* Outside lock, sure, but y'know: randomness. */
+		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
+					   rc * current_quality * 8 >> 10);
+	}
+	hwrng_fill = NULL;
+	return 0;
+}
+
+static void start_khwrngd(void)
+{
+	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+	if (IS_ERR(hwrng_fill)) {
+		pr_err("hwrng_fill thread creation failed\n");
+		hwrng_fill = NULL;
+	}
+}
+
+int hwrng_register(struct hwrng *rng)
+{
+	int err = -EINVAL;
+	struct hwrng *old_rng, *tmp;
+	struct list_head *rng_list_ptr;
+
+	if (!rng->name || (!rng->data_read && !rng->read))
+		goto out;
+
+	mutex_lock(&rng_mutex);
+	/* Must not register two RNGs with the same name. */
+	err = -EEXIST;
+	list_for_each_entry(tmp, &rng_list, list) {
+		if (strcmp(tmp->name, rng->name) == 0)
+			goto out_unlock;
+	}
+
+	init_completion(&rng->cleanup_done);
+	complete(&rng->cleanup_done);
+
+	/* rng_list is sorted by decreasing quality */
+	list_for_each(rng_list_ptr, &rng_list) {
+		tmp = list_entry(rng_list_ptr, struct hwrng, list);
+		if (tmp->quality < rng->quality)
+			break;
+	}
+	list_add_tail(&rng->list, rng_list_ptr);
+
+	old_rng = current_rng;
+	err = 0;
+	if (!old_rng ||
+	    (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+		/*
+		 * Set new rng as current as the new rng source
+		 * provides better entropy quality and was not
+		 * chosen by userspace.
+		 */
+		err = set_current_rng(rng);
+		if (err)
+			goto out_unlock;
+	}
+
+	if (old_rng && !rng->init) {
+		/*
+		 * Use a new device's input to add some randomness to
+		 * the system.  If this rng device isn't going to be
+		 * used right away, its init function hasn't been
+		 * called yet; so only use the randomness from devices
+		 * that don't need an init callback.
+		 */
+		add_early_randomness(rng);
+	}
+
+out_unlock:
+	mutex_unlock(&rng_mutex);
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(hwrng_register);
+
+void hwrng_unregister(struct hwrng *rng)
+{
+	int err;
+
+	mutex_lock(&rng_mutex);
+
+	list_del(&rng->list);
+	if (current_rng == rng) {
+		err = enable_best_rng();
+		if (err) {
+			drop_current_rng();
+			cur_rng_set_by_user = 0;
+		}
+	}
+
+	if (list_empty(&rng_list)) {
+		mutex_unlock(&rng_mutex);
+		if (hwrng_fill)
+			kthread_stop(hwrng_fill);
+	} else
+		mutex_unlock(&rng_mutex);
+
+	wait_for_completion(&rng->cleanup_done);
+}
+EXPORT_SYMBOL_GPL(hwrng_unregister);
+
+static void devm_hwrng_release(struct device *dev, void *res)
+{
+	hwrng_unregister(*(struct hwrng **)res);
+}
+
+static int devm_hwrng_match(struct device *dev, void *res, void *data)
+{
+	struct hwrng **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+int devm_hwrng_register(struct device *dev, struct hwrng *rng)
+{
+	struct hwrng **ptr;
+	int error;
+
+	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	error = hwrng_register(rng);
+	if (error) {
+		devres_free(ptr);
+		return error;
+	}
+
+	*ptr = rng;
+	devres_add(dev, ptr);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_register);
+
+void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
+{
+	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+
+static int __init hwrng_modinit(void)
+{
+	int ret = -ENOMEM;
+
+	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+	if (!rng_buffer)
+		return -ENOMEM;
+
+	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
+	if (!rng_fillbuf) {
+		kfree(rng_buffer);
+		return -ENOMEM;
+	}
+
+	ret = register_miscdev();
+	if (ret) {
+		kfree(rng_fillbuf);
+		kfree(rng_buffer);
+	}
+
+	return ret;
+}
+
+static void __exit hwrng_modexit(void)
+{
+	mutex_lock(&rng_mutex);
+	BUG_ON(current_rng);
+	kfree(rng_buffer);
+	kfree(rng_fillbuf);
+	mutex_unlock(&rng_mutex);
+
+	unregister_miscdev();
+}
+
+module_init(hwrng_modinit);
+module_exit(hwrng_modexit);
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 0000000..9423576
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV         (0x0)
+
+#define EXYNOS_TRNG_CTRL           (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN     BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL      (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL    (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT    (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL      (0x50)
+#define EXYNOS_TRNG_FIFO_0         (0x80)
+#define EXYNOS_TRNG_FIFO_1         (0x84)
+#define EXYNOS_TRNG_FIFO_2         (0x88)
+#define EXYNOS_TRNG_FIFO_3         (0x8c)
+#define EXYNOS_TRNG_FIFO_4         (0x90)
+#define EXYNOS_TRNG_FIFO_5         (0x94)
+#define EXYNOS_TRNG_FIFO_6         (0x98)
+#define EXYNOS_TRNG_FIFO_7         (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN       (8)
+#define EXYNOS_TRNG_CLOCK_RATE     (500000)
+
+
+struct exynos_trng_dev {
+	struct device    *dev;
+	void __iomem     *mem;
+	struct clk       *clk;
+	struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+			       bool wait)
+{
+	struct exynos_trng_dev *trng;
+	int val;
+
+	max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+	trng = (struct exynos_trng_dev *)rng->priv;
+
+	writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+	val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+				 val == 0, 200, 1000000);
+	if (val < 0)
+		return val;
+
+	memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+	return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+	struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+	unsigned long sss_rate;
+	u32 val;
+
+	sss_rate = clk_get_rate(trng->clk);
+
+	/*
+	 * For most TRNG circuits the clock frequency of under 500 kHz
+	 * is safe.
+	 */
+	val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+	if (val > 0x7fff) {
+		dev_err(trng->dev, "clock divider too large: %d", val);
+		return -ERANGE;
+	}
+	val = val << 1;
+	writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+	/* Enable the generator. */
+	val = EXYNOS_TRNG_CTRL_RNGEN;
+	writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+	/*
+	 * Disable post-processing. /dev/hwrng is supposed to deliver
+	 * unprocessed data.
+	 */
+	writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+	return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+	struct exynos_trng_dev *trng;
+	struct resource *res;
+	int ret = -ENOMEM;
+
+	trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+	if (!trng)
+		return ret;
+
+	trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+				      GFP_KERNEL);
+	if (!trng->rng.name)
+		return ret;
+
+	trng->rng.init = exynos_trng_init;
+	trng->rng.read = exynos_trng_do_read;
+	trng->rng.priv = (unsigned long) trng;
+
+	platform_set_drvdata(pdev, trng);
+	trng->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	trng->mem = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(trng->mem))
+		return PTR_ERR(trng->mem);
+
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Could not get runtime PM.\n");
+		goto err_pm_get;
+	}
+
+	trng->clk = devm_clk_get(&pdev->dev, "secss");
+	if (IS_ERR(trng->clk)) {
+		ret = PTR_ERR(trng->clk);
+		dev_err(&pdev->dev, "Could not get clock.\n");
+		goto err_clock;
+	}
+
+	ret = clk_prepare_enable(trng->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Could not enable the clk.\n");
+		goto err_clock;
+	}
+
+	ret = hwrng_register(&trng->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "Could not register hwrng device.\n");
+		goto err_register;
+	}
+
+	dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+	return 0;
+
+err_register:
+	clk_disable_unprepare(trng->clk);
+
+err_clock:
+	pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+	pm_runtime_disable(&pdev->dev);
+
+	return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+	struct exynos_trng_dev *trng =  platform_get_drvdata(pdev);
+
+	hwrng_unregister(&trng->rng);
+	clk_disable_unprepare(trng->clk);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "Could not get runtime PM.\n");
+		pm_runtime_put_noidle(dev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+			 exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+	{
+		.compatible = "samsung,exynos5250-trng",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+	.driver = {
+		.name = "exynos-trng",
+		.pm = &exynos_trng_pm_ops,
+		.of_match_table = exynos_trng_dt_match,
+	},
+	.probe = exynos_trng_probe,
+	.remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
new file mode 100644
index 0000000..e1d421a
--- /dev/null
+++ b/drivers/char/hw_random/geode-rng.c
@@ -0,0 +1,139 @@
+/*
+ * RNG driver for AMD Geode RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+
+#define PFX	KBUILD_MODNAME ": "
+
+#define GEODE_RNG_DATA_REG   0x50
+#define GEODE_RNG_STATUS_REG 0x54
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
+	{ 0, },	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+
+static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+
+	*data = readl(mem + GEODE_RNG_DATA_REG);
+
+	return 4;
+}
+
+static int geode_rng_data_present(struct hwrng *rng, int wait)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+	int data, i;
+
+	for (i = 0; i < 20; i++) {
+		data = !!(readl(mem + GEODE_RNG_STATUS_REG));
+		if (data || !wait)
+			break;
+		udelay(10);
+	}
+	return data;
+}
+
+
+static struct hwrng geode_rng = {
+	.name		= "geode",
+	.data_present	= geode_rng_data_present,
+	.data_read	= geode_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+	int err = -ENODEV;
+	struct pci_dev *pdev = NULL;
+	const struct pci_device_id *ent;
+	void __iomem *mem;
+	unsigned long rng_base;
+
+	for_each_pci_dev(pdev) {
+		ent = pci_match_id(pci_tbl, pdev);
+		if (ent)
+			goto found;
+	}
+	/* Device not found. */
+	goto out;
+
+found:
+	rng_base = pci_resource_start(pdev, 0);
+	if (rng_base == 0)
+		goto out;
+	err = -ENOMEM;
+	mem = ioremap(rng_base, 0x58);
+	if (!mem)
+		goto out;
+	geode_rng.priv = (unsigned long)mem;
+
+	pr_info("AMD Geode RNG detected\n");
+	err = hwrng_register(&geode_rng);
+	if (err) {
+		pr_err(PFX "RNG registering failed (%d)\n",
+		       err);
+		goto err_unmap;
+	}
+out:
+	return err;
+
+err_unmap:
+	iounmap(mem);
+	goto out;
+}
+
+static void __exit mod_exit(void)
+{
+	void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+	hwrng_unregister(&geode_rng);
+	iounmap(mem);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
new file mode 100644
index 0000000..40d9657
--- /dev/null
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 HiSilicon Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define RNG_SEED	0x0
+#define RNG_CTRL	0x4
+  #define RNG_SEED_SEL	BIT(2)
+  #define RNG_RING_EN	BIT(1)
+  #define RNG_EN	BIT(0)
+#define RNG_RAN_NUM	0x10
+#define RNG_PHY_SEED	0x14
+
+#define to_hisi_rng(p)	container_of(p, struct hisi_rng, rng)
+
+static int seed_sel;
+module_param(seed_sel, int, S_IRUGO);
+MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
+
+struct hisi_rng {
+	void __iomem *base;
+	struct hwrng rng;
+};
+
+static int hisi_rng_init(struct hwrng *rng)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+	int val = RNG_EN;
+	u32 seed;
+
+	/* get a random number as initial seed */
+	get_random_bytes(&seed, sizeof(seed));
+
+	writel_relaxed(seed, hrng->base + RNG_SEED);
+
+	/**
+	 * The seed is reload periodically, there are two choice
+	 * of seeds, default seed using the value from LFSR, or
+	 * will use seed generated by ring oscillator.
+	 */
+	if (seed_sel == 1)
+		val |= RNG_RING_EN | RNG_SEED_SEL;
+
+	writel_relaxed(val, hrng->base + RNG_CTRL);
+	return 0;
+}
+
+static void hisi_rng_cleanup(struct hwrng *rng)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+
+	writel_relaxed(0, hrng->base + RNG_CTRL);
+}
+
+static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+	u32 *data = buf;
+
+	*data = readl_relaxed(hrng->base + RNG_RAN_NUM);
+	return 4;
+}
+
+static int hisi_rng_probe(struct platform_device *pdev)
+{
+	struct hisi_rng *rng;
+	struct resource *res;
+	int ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, rng);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng->base))
+		return PTR_ERR(rng->base);
+
+	rng->rng.name = pdev->name;
+	rng->rng.init = hisi_rng_init;
+	rng->rng.cleanup = hisi_rng_cleanup;
+	rng->rng.read = hisi_rng_read;
+
+	ret = devm_hwrng_register(&pdev->dev, &rng->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id hisi_rng_dt_ids[] = {
+	{ .compatible = "hisilicon,hip04-rng" },
+	{ .compatible = "hisilicon,hip05-rng" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
+
+static struct platform_driver hisi_rng_driver = {
+	.probe		= hisi_rng_probe,
+	.driver		= {
+		.name	= "hisi-rng",
+		.of_match_table = of_match_ptr(hisi_rng_dt_ids),
+	},
+};
+
+module_platform_driver(hisi_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
+MODULE_DESCRIPTION("Hisilicon random number generator driver");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 0000000..14730be
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,325 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+
+#define RNGC_COMMAND			0x0004
+#define RNGC_CONTROL			0x0008
+#define RNGC_STATUS			0x000C
+#define RNGC_ERROR			0x0010
+#define RNGC_FIFO			0x0014
+
+#define RNGC_CMD_CLR_ERR		0x00000020
+#define RNGC_CMD_CLR_INT		0x00000010
+#define RNGC_CMD_SEED			0x00000002
+#define RNGC_CMD_SELF_TEST		0x00000001
+
+#define RNGC_CTRL_MASK_ERROR		0x00000040
+#define RNGC_CTRL_MASK_DONE		0x00000020
+
+#define RNGC_STATUS_ERROR		0x00010000
+#define RNGC_STATUS_FIFO_LEVEL_MASK	0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT	8
+#define RNGC_STATUS_SEED_DONE		0x00000020
+#define RNGC_STATUS_ST_DONE		0x00000010
+
+#define RNGC_ERROR_STATUS_STAT_ERR	0x00000008
+
+#define RNGC_TIMEOUT  3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+	struct device		*dev;
+	struct clk		*clk;
+	void __iomem		*base;
+	struct hwrng		rng;
+	struct completion	rng_op_done;
+	/*
+	 * err_reg is written only by the irq handler and read only
+	 * when interrupts are masked, we need no spinlock
+	 */
+	u32			err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+	u32 ctrl, cmd;
+
+	/* mask interrupts */
+	ctrl = readl(rngc->base + RNGC_CONTROL);
+	ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+	writel(ctrl, rngc->base + RNGC_CONTROL);
+
+	/*
+	 * CLR_INT clears the interrupt only if there's no error
+	 * CLR_ERR clear the interrupt and the error register if there
+	 * is an error
+	 */
+	cmd = readl(rngc->base + RNGC_COMMAND);
+	cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+	writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+	u32 ctrl;
+
+	ctrl = readl(rngc->base + RNGC_CONTROL);
+	ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+	writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+	u32 cmd;
+	int ret;
+
+	imx_rngc_irq_unmask(rngc);
+
+	/* run self test */
+	cmd = readl(rngc->base + RNGC_COMMAND);
+	writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+	ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+	if (!ret) {
+		imx_rngc_irq_mask_clear(rngc);
+		return -ETIMEDOUT;
+	}
+
+	if (rngc->err_reg != 0)
+		return -EIO;
+
+	return 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+	unsigned int status;
+	unsigned int level;
+	int retval = 0;
+
+	while (max >= sizeof(u32)) {
+		status = readl(rngc->base + RNGC_STATUS);
+
+		/* is there some error while reading this random number? */
+		if (status & RNGC_STATUS_ERROR)
+			break;
+
+		/* how many random numbers are in FIFO? [0-16] */
+		level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
+			RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+		if (level) {
+			/* retrieve a random number from FIFO */
+			*(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+			retval += sizeof(u32);
+			data += sizeof(u32);
+			max -= sizeof(u32);
+		}
+	}
+
+	return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+	struct imx_rngc *rngc = (struct imx_rngc *)priv;
+	u32 status;
+
+	/*
+	 * clearing the interrupt will also clear the error register
+	 * read error and status before clearing
+	 */
+	status = readl(rngc->base + RNGC_STATUS);
+	rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+	imx_rngc_irq_mask_clear(rngc);
+
+	if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+		complete(&rngc->rng_op_done);
+
+	return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+	struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+	u32 cmd;
+	int ret;
+
+	/* clear error */
+	cmd = readl(rngc->base + RNGC_COMMAND);
+	writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+	/* create seed, repeat while there is some statistical error */
+	do {
+		imx_rngc_irq_unmask(rngc);
+
+		/* seed creation */
+		cmd = readl(rngc->base + RNGC_COMMAND);
+		writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+		ret = wait_for_completion_timeout(&rngc->rng_op_done,
+				RNGC_TIMEOUT);
+
+		if (!ret) {
+			imx_rngc_irq_mask_clear(rngc);
+			return -ETIMEDOUT;
+		}
+
+	} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+	return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_probe(struct platform_device *pdev)
+{
+	struct imx_rngc *rngc;
+	struct resource *res;
+	int ret;
+	int irq;
+
+	rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+	if (!rngc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rngc->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rngc->base))
+		return PTR_ERR(rngc->base);
+
+	rngc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(rngc->clk)) {
+		dev_err(&pdev->dev, "Can not get rng_clk\n");
+		return PTR_ERR(rngc->clk);
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
+		return irq;
+	}
+
+	ret = clk_prepare_enable(rngc->clk);
+	if (ret)
+		return ret;
+
+	ret = devm_request_irq(&pdev->dev,
+			irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+	if (ret) {
+		dev_err(rngc->dev, "Can't get interrupt working.\n");
+		goto err;
+	}
+
+	init_completion(&rngc->rng_op_done);
+
+	rngc->rng.name = pdev->name;
+	rngc->rng.init = imx_rngc_init;
+	rngc->rng.read = imx_rngc_read;
+
+	rngc->dev = &pdev->dev;
+	platform_set_drvdata(pdev, rngc);
+
+	imx_rngc_irq_mask_clear(rngc);
+
+	if (self_test) {
+		ret = imx_rngc_self_test(rngc);
+		if (ret) {
+			dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+			goto err;
+		}
+	}
+
+	ret = hwrng_register(&rngc->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+		goto err;
+	}
+
+	dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+	return 0;
+
+err:
+	clk_disable_unprepare(rngc->clk);
+
+	return ret;
+}
+
+static int __exit imx_rngc_remove(struct platform_device *pdev)
+{
+	struct imx_rngc *rngc = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&rngc->rng);
+
+	clk_disable_unprepare(rngc->clk);
+
+	return 0;
+}
+
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
+{
+	struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(rngc->clk);
+
+	return 0;
+}
+
+static int __maybe_unused imx_rngc_resume(struct device *dev)
+{
+	struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+	clk_prepare_enable(rngc->clk);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+	{ .compatible = "fsl,imx25-rngb", .data = NULL, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+	.driver = {
+		.name = "imx_rngc",
+		.pm = &imx_rngc_pm_ops,
+		.of_match_table = imx_rngc_dt_ids,
+	},
+	.remove = __exit_p(imx_rngc_remove),
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
new file mode 100644
index 0000000..290c880
--- /dev/null
+++ b/drivers/char/hw_random/intel-rng.c
@@ -0,0 +1,418 @@
+/*
+ * RNG driver for Intel RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stop_machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+
+#define PFX	KBUILD_MODNAME ": "
+
+/*
+ * RNG registers
+ */
+#define INTEL_RNG_HW_STATUS			0
+#define         INTEL_RNG_PRESENT		0x40
+#define         INTEL_RNG_ENABLED		0x01
+#define INTEL_RNG_STATUS			1
+#define         INTEL_RNG_DATA_PRESENT		0x01
+#define INTEL_RNG_DATA				2
+
+/*
+ * Magic address at which Intel PCI bridges locate the RNG
+ */
+#define INTEL_RNG_ADDR				0xFFBC015F
+#define INTEL_RNG_ADDR_LEN			3
+
+/*
+ * LPC bridge PCI config space registers
+ */
+#define FWH_DEC_EN1_REG_OLD			0xe3
+#define FWH_DEC_EN1_REG_NEW			0xd9 /* high byte of 16-bit register */
+#define FWH_F8_EN_MASK				0x80
+
+#define BIOS_CNTL_REG_OLD			0x4e
+#define BIOS_CNTL_REG_NEW			0xdc
+#define BIOS_CNTL_WRITE_ENABLE_MASK		0x01
+#define BIOS_CNTL_LOCK_ENABLE_MASK		0x02
+
+/*
+ * Magic address at which Intel Firmware Hubs get accessed
+ */
+#define INTEL_FWH_ADDR				0xffff0000
+#define INTEL_FWH_ADDR_LEN			2
+
+/*
+ * Intel Firmware Hub command codes (write to any address inside the device)
+ */
+#define INTEL_FWH_RESET_CMD			0xff /* aka READ_ARRAY */
+#define INTEL_FWH_READ_ID_CMD			0x90
+
+/*
+ * Intel Firmware Hub Read ID command result addresses
+ */
+#define INTEL_FWH_MANUFACTURER_CODE_ADDRESS	0x000000
+#define INTEL_FWH_DEVICE_CODE_ADDRESS		0x000001
+
+/*
+ * Intel Firmware Hub Read ID command result values
+ */
+#define INTEL_FWH_MANUFACTURER_CODE		0x89
+#define INTEL_FWH_DEVICE_CODE_8M		0xac
+#define INTEL_FWH_DEVICE_CODE_4M		0xad
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+/* AA
+	{ PCI_DEVICE(0x8086, 0x2418) }, */
+	{ PCI_DEVICE(0x8086, 0x2410) }, /* AA */
+/* AB
+	{ PCI_DEVICE(0x8086, 0x2428) }, */
+	{ PCI_DEVICE(0x8086, 0x2420) }, /* AB */
+/* ??
+	{ PCI_DEVICE(0x8086, 0x2430) }, */
+/* BAM, CAM, DBM, FBM, GxM
+	{ PCI_DEVICE(0x8086, 0x2448) }, */
+	{ PCI_DEVICE(0x8086, 0x244c) }, /* BAM */
+	{ PCI_DEVICE(0x8086, 0x248c) }, /* CAM */
+	{ PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */
+	{ PCI_DEVICE(0x8086, 0x2641) }, /* FBM */
+	{ PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */
+	{ PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */
+/* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx
+	{ PCI_DEVICE(0x8086, 0x244e) }, */
+	{ PCI_DEVICE(0x8086, 0x2440) }, /* BA */
+	{ PCI_DEVICE(0x8086, 0x2480) }, /* CA */
+	{ PCI_DEVICE(0x8086, 0x24c0) }, /* DB */
+	{ PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */
+	{ PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */
+	{ PCI_DEVICE(0x8086, 0x2640) }, /* Fx */
+	{ PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */
+	{ PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */
+/* E
+	{ PCI_DEVICE(0x8086, 0x245e) }, */
+	{ PCI_DEVICE(0x8086, 0x2450) }, /* E  */
+	{ 0, },	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static __initdata int no_fwh_detect;
+module_param(no_fwh_detect, int, 0);
+MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n"
+                                " positive value - skip if FWH space locked read-only\n"
+                                " negative value - skip always");
+
+static inline u8 hwstatus_get(void __iomem *mem)
+{
+	return readb(mem + INTEL_RNG_HW_STATUS);
+}
+
+static inline u8 hwstatus_set(void __iomem *mem,
+			      u8 hw_status)
+{
+	writeb(hw_status, mem + INTEL_RNG_HW_STATUS);
+	return hwstatus_get(mem);
+}
+
+static int intel_rng_data_present(struct hwrng *rng, int wait)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+	int data, i;
+
+	for (i = 0; i < 20; i++) {
+		data = !!(readb(mem + INTEL_RNG_STATUS) &
+			  INTEL_RNG_DATA_PRESENT);
+		if (data || !wait)
+			break;
+		udelay(10);
+	}
+	return data;
+}
+
+static int intel_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+
+	*data = readb(mem + INTEL_RNG_DATA);
+
+	return 1;
+}
+
+static int intel_rng_init(struct hwrng *rng)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+	u8 hw_status;
+	int err = -EIO;
+
+	hw_status = hwstatus_get(mem);
+	/* turn RNG h/w on, if it's off */
+	if ((hw_status & INTEL_RNG_ENABLED) == 0)
+		hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
+	if ((hw_status & INTEL_RNG_ENABLED) == 0) {
+		pr_err(PFX "cannot enable RNG, aborting\n");
+		goto out;
+	}
+	err = 0;
+out:
+	return err;
+}
+
+static void intel_rng_cleanup(struct hwrng *rng)
+{
+	void __iomem *mem = (void __iomem *)rng->priv;
+	u8 hw_status;
+
+	hw_status = hwstatus_get(mem);
+	if (hw_status & INTEL_RNG_ENABLED)
+		hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
+	else
+		pr_warn(PFX "unusual: RNG already disabled\n");
+}
+
+
+static struct hwrng intel_rng = {
+	.name		= "intel",
+	.init		= intel_rng_init,
+	.cleanup	= intel_rng_cleanup,
+	.data_present	= intel_rng_data_present,
+	.data_read	= intel_rng_data_read,
+};
+
+struct intel_rng_hw {
+	struct pci_dev *dev;
+	void __iomem *mem;
+	u8 bios_cntl_off;
+	u8 bios_cntl_val;
+	u8 fwh_dec_en1_off;
+	u8 fwh_dec_en1_val;
+};
+
+static int __init intel_rng_hw_init(void *_intel_rng_hw)
+{
+	struct intel_rng_hw *intel_rng_hw = _intel_rng_hw;
+	u8 mfc, dvc;
+
+	/* interrupts disabled in stop_machine call */
+
+	if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+		pci_write_config_byte(intel_rng_hw->dev,
+		                      intel_rng_hw->fwh_dec_en1_off,
+		                      intel_rng_hw->fwh_dec_en1_val |
+				      FWH_F8_EN_MASK);
+	if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK))
+		pci_write_config_byte(intel_rng_hw->dev,
+		                      intel_rng_hw->bios_cntl_off,
+		                      intel_rng_hw->bios_cntl_val |
+				      BIOS_CNTL_WRITE_ENABLE_MASK);
+
+	writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+	writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem);
+	mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS);
+	dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS);
+	writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+
+	if (!(intel_rng_hw->bios_cntl_val &
+	      (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)))
+		pci_write_config_byte(intel_rng_hw->dev,
+				      intel_rng_hw->bios_cntl_off,
+				      intel_rng_hw->bios_cntl_val);
+	if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+		pci_write_config_byte(intel_rng_hw->dev,
+				      intel_rng_hw->fwh_dec_en1_off,
+				      intel_rng_hw->fwh_dec_en1_val);
+
+	if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
+	    (dvc != INTEL_FWH_DEVICE_CODE_8M &&
+	     dvc != INTEL_FWH_DEVICE_CODE_4M)) {
+		pr_notice(PFX "FWH not detected\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
+					struct pci_dev *dev)
+{
+	intel_rng_hw->bios_cntl_val = 0xff;
+	intel_rng_hw->fwh_dec_en1_val = 0xff;
+	intel_rng_hw->dev = dev;
+
+	/* Check for Intel 82802 */
+	if (dev->device < 0x2640) {
+		intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD;
+		intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD;
+	} else {
+		intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW;
+		intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW;
+	}
+
+	pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off,
+			     &intel_rng_hw->fwh_dec_en1_val);
+	pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off,
+			     &intel_rng_hw->bios_cntl_val);
+
+	if ((intel_rng_hw->bios_cntl_val &
+	     (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
+	    == BIOS_CNTL_LOCK_ENABLE_MASK) {
+		static __initdata /*const*/ char warning[] =
+PFX "Firmware space is locked read-only. If you can't or\n"
+PFX "don't want to disable this in firmware setup, and if\n"
+PFX "you are certain that your system has a functional\n"
+PFX "RNG, try using the 'no_fwh_detect' option.\n";
+
+		if (no_fwh_detect)
+			return -ENODEV;
+		pr_warn("%s", warning);
+		return -EBUSY;
+	}
+
+	intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+	if (intel_rng_hw->mem == NULL)
+		return -EBUSY;
+
+	return 0;
+}
+
+
+static int __init mod_init(void)
+{
+	int err = -ENODEV;
+	int i;
+	struct pci_dev *dev = NULL;
+	void __iomem *mem = mem;
+	u8 hw_status;
+	struct intel_rng_hw *intel_rng_hw;
+
+	for (i = 0; !dev && pci_tbl[i].vendor; ++i)
+		dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device,
+				     NULL);
+
+	if (!dev)
+		goto out; /* Device not found. */
+
+	if (no_fwh_detect < 0) {
+		pci_dev_put(dev);
+		goto fwh_done;
+	}
+
+	intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL);
+	if (!intel_rng_hw) {
+		pci_dev_put(dev);
+		goto out;
+	}
+
+	err = intel_init_hw_struct(intel_rng_hw, dev);
+	if (err) {
+		pci_dev_put(dev);
+		kfree(intel_rng_hw);
+		if (err == -ENODEV)
+			goto fwh_done;
+		goto out;
+	}
+
+	/*
+	 * Since the BIOS code/data is going to disappear from its normal
+	 * location with the Read ID command, all activity on the system
+	 * must be stopped until the state is back to normal.
+	 *
+	 * Use stop_machine because IPIs can be blocked by disabling
+	 * interrupts.
+	 */
+	err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL);
+	pci_dev_put(dev);
+	iounmap(intel_rng_hw->mem);
+	kfree(intel_rng_hw);
+	if (err)
+		goto out;
+
+fwh_done:
+	err = -ENOMEM;
+	mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
+	if (!mem)
+		goto out;
+	intel_rng.priv = (unsigned long)mem;
+
+	/* Check for Random Number Generator */
+	err = -ENODEV;
+	hw_status = hwstatus_get(mem);
+	if ((hw_status & INTEL_RNG_PRESENT) == 0) {
+		iounmap(mem);
+		goto out;
+	}
+
+	pr_info("Intel 82802 RNG detected\n");
+	err = hwrng_register(&intel_rng);
+	if (err) {
+		pr_err(PFX "RNG registering failed (%d)\n",
+		       err);
+		iounmap(mem);
+	}
+out:
+	return err;
+
+}
+
+static void __exit mod_exit(void)
+{
+	void __iomem *mem = (void __iomem *)intel_rng.priv;
+
+	hwrng_unregister(&intel_rng);
+	iounmap(mem);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
new file mode 100644
index 0000000..8b5a20b
--- /dev/null
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -0,0 +1,240 @@
+/*
+* Copyright (C) 2015 Broadcom Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation version 2.
+*
+* This program is distributed "as is" WITHOUT ANY WARRANTY of any
+* kind, whether express or implied; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+/*
+ * DESCRIPTION: The Broadcom iProc RNG200 Driver
+ */
+
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+/* Registers */
+#define RNG_CTRL_OFFSET					0x00
+#define RNG_CTRL_RNG_RBGEN_MASK				0x00001FFF
+#define RNG_CTRL_RNG_RBGEN_ENABLE			0x00000001
+#define RNG_CTRL_RNG_RBGEN_DISABLE			0x00000000
+
+#define RNG_SOFT_RESET_OFFSET				0x04
+#define RNG_SOFT_RESET					0x00000001
+
+#define RBG_SOFT_RESET_OFFSET				0x08
+#define RBG_SOFT_RESET					0x00000001
+
+#define RNG_INT_STATUS_OFFSET				0x18
+#define RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK	0x80000000
+#define RNG_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK	0x00020000
+#define RNG_INT_STATUS_NIST_FAIL_IRQ_MASK		0x00000020
+#define RNG_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK	0x00000001
+
+#define RNG_FIFO_DATA_OFFSET				0x20
+
+#define RNG_FIFO_COUNT_OFFSET				0x24
+#define RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK		0x000000FF
+
+struct iproc_rng200_dev {
+	struct hwrng rng;
+	void __iomem *base;
+};
+
+#define to_rng_priv(rng)	container_of(rng, struct iproc_rng200_dev, rng)
+
+static void iproc_rng200_restart(void __iomem *rng_base)
+{
+	uint32_t val;
+
+	/* Disable RBG */
+	val = ioread32(rng_base + RNG_CTRL_OFFSET);
+	val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+	val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+	iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+
+	/* Clear all interrupt status */
+	iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET);
+
+	/* Reset RNG and RBG */
+	val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+	val |= RBG_SOFT_RESET;
+	iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+	val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+	val |= RNG_SOFT_RESET;
+	iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+	val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+	val &= ~RNG_SOFT_RESET;
+	iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+	val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+	val &= ~RBG_SOFT_RESET;
+	iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+	/* Enable RBG */
+	val = ioread32(rng_base + RNG_CTRL_OFFSET);
+	val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+	val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+	iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+}
+
+static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
+			     bool wait)
+{
+	struct iproc_rng200_dev *priv = to_rng_priv(rng);
+	uint32_t num_remaining = max;
+	uint32_t status;
+
+	#define MAX_RESETS_PER_READ	1
+	uint32_t num_resets = 0;
+
+	#define MAX_IDLE_TIME	(1 * HZ)
+	unsigned long idle_endtime = jiffies + MAX_IDLE_TIME;
+
+	while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) {
+
+		/* Is RNG sane? If not, reset it. */
+		status = ioread32(priv->base + RNG_INT_STATUS_OFFSET);
+		if ((status & (RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK |
+			RNG_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) {
+
+			if (num_resets >= MAX_RESETS_PER_READ)
+				return max - num_remaining;
+
+			iproc_rng200_restart(priv->base);
+			num_resets++;
+		}
+
+		/* Are there any random numbers available? */
+		if ((ioread32(priv->base + RNG_FIFO_COUNT_OFFSET) &
+				RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK) > 0) {
+
+			if (num_remaining >= sizeof(uint32_t)) {
+				/* Buffer has room to store entire word */
+				*(uint32_t *)buf = ioread32(priv->base +
+							RNG_FIFO_DATA_OFFSET);
+				buf += sizeof(uint32_t);
+				num_remaining -= sizeof(uint32_t);
+			} else {
+				/* Buffer can only store partial word */
+				uint32_t rnd_number = ioread32(priv->base +
+							RNG_FIFO_DATA_OFFSET);
+				memcpy(buf, &rnd_number, num_remaining);
+				buf += num_remaining;
+				num_remaining = 0;
+			}
+
+			/* Reset the IDLE timeout */
+			idle_endtime = jiffies + MAX_IDLE_TIME;
+		} else {
+			if (!wait)
+				/* Cannot wait, return immediately */
+				return max - num_remaining;
+
+			/* Can wait, give others chance to run */
+			usleep_range(min(num_remaining * 10, 500U), 500);
+		}
+	}
+
+	return max - num_remaining;
+}
+
+static int iproc_rng200_init(struct hwrng *rng)
+{
+	struct iproc_rng200_dev *priv = to_rng_priv(rng);
+	uint32_t val;
+
+	/* Setup RNG. */
+	val = ioread32(priv->base + RNG_CTRL_OFFSET);
+	val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+	val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+	iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+
+	return 0;
+}
+
+static void iproc_rng200_cleanup(struct hwrng *rng)
+{
+	struct iproc_rng200_dev *priv = to_rng_priv(rng);
+	uint32_t val;
+
+	/* Disable RNG hardware */
+	val = ioread32(priv->base + RNG_CTRL_OFFSET);
+	val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+	val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+	iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+}
+
+static int iproc_rng200_probe(struct platform_device *pdev)
+{
+	struct iproc_rng200_dev *priv;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/* Map peripheral */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "failed to get rng resources\n");
+		return -EINVAL;
+	}
+
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "failed to remap rng regs\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->rng.name = "iproc-rng200",
+	priv->rng.read = iproc_rng200_read,
+	priv->rng.init = iproc_rng200_init,
+	priv->rng.cleanup = iproc_rng200_cleanup,
+
+	/* Register driver */
+	ret = devm_hwrng_register(dev, &priv->rng);
+	if (ret) {
+		dev_err(dev, "hwrng registration failed\n");
+		return ret;
+	}
+
+	dev_info(dev, "hwrng registered\n");
+
+	return 0;
+}
+
+static const struct of_device_id iproc_rng200_of_match[] = {
+	{ .compatible = "brcm,bcm7278-rng200", },
+	{ .compatible = "brcm,iproc-rng200", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, iproc_rng200_of_match);
+
+static struct platform_driver iproc_rng200_driver = {
+	.driver = {
+		.name		= "iproc-rng200",
+		.of_match_table = iproc_rng200_of_match,
+	},
+	.probe		= iproc_rng200_probe,
+};
+module_platform_driver(iproc_rng200_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("iProc RNG200 Random Number Generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
new file mode 100644
index 0000000..beec162
--- /dev/null
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -0,0 +1,75 @@
+/*
+ * drivers/char/hw_random/ixp4xx-rng.c
+ *
+ * RNG driver for Intel IXP4xx family of NPUs
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Fixes by Michael Buesch
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/hw_random.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+
+
+static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+	void __iomem * rng_base = (void __iomem *)rng->priv;
+
+	*buffer = __raw_readl(rng_base);
+
+	return 4;
+}
+
+static struct hwrng ixp4xx_rng_ops = {
+	.name		= "ixp4xx",
+	.data_read	= ixp4xx_rng_data_read,
+};
+
+static int __init ixp4xx_rng_init(void)
+{
+	void __iomem * rng_base;
+	int err;
+
+	if (!cpu_is_ixp46x()) /* includes IXP455 */
+		return -ENOSYS;
+
+	rng_base = ioremap(0x70002100, 4);
+	if (!rng_base)
+		return -ENOMEM;
+	ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+	err = hwrng_register(&ixp4xx_rng_ops);
+	if (err)
+		iounmap(rng_base);
+
+	return err;
+}
+
+static void __exit ixp4xx_rng_exit(void)
+{
+	void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
+
+	hwrng_unregister(&ixp4xx_rng_ops);
+	iounmap(rng_base);
+}
+
+module_init(ixp4xx_rng_init);
+module_exit(ixp4xx_rng_exit);
+
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
new file mode 100644
index 0000000..62c6696
--- /dev/null
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -0,0 +1,257 @@
+/*
+ * Random Number Generator driver for the Keystone SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors:	Sandeep Nair
+ *		Vitaly Andrianov
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#define SA_CMD_STATUS_OFS			0x8
+
+/* TRNG enable control in SA System module*/
+#define SA_CMD_STATUS_REG_TRNG_ENABLE		BIT(3)
+
+/* TRNG start control in TRNG module */
+#define TRNG_CNTL_REG_TRNG_ENABLE		BIT(10)
+
+/* Data ready indicator in STATUS register */
+#define TRNG_STATUS_REG_READY			BIT(0)
+
+/* Data ready clear control in INTACK register */
+#define TRNG_INTACK_REG_READY			BIT(0)
+
+/*
+ * Number of samples taken to gather entropy during startup.
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_STARTUP_CYCLES			0
+#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT	16
+
+/*
+ * Minimum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^6.
+ */
+#define TRNG_DEF_MIN_REFILL_CYCLES		1
+#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT	0
+
+/*
+ * Maximum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_MAX_REFILL_CYCLES		0
+#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT	16
+
+/* Number of CLK input cycles between samples */
+#define TRNG_DEF_CLK_DIV_CYCLES			0
+#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT		8
+
+/* Maximum retries to get rng data */
+#define SA_MAX_RNG_DATA_RETRIES			5
+/* Delay between retries (in usecs) */
+#define SA_RNG_DATA_RETRY_DELAY			5
+
+struct trng_regs {
+	u32	output_l;
+	u32	output_h;
+	u32	status;
+	u32	intmask;
+	u32	intack;
+	u32	control;
+	u32	config;
+};
+
+struct ks_sa_rng {
+	struct device	*dev;
+	struct hwrng	rng;
+	struct clk	*clk;
+	struct regmap	*regmap_cfg;
+	struct trng_regs *reg_rng;
+};
+
+static int ks_sa_rng_init(struct hwrng *rng)
+{
+	u32 value;
+	struct device *dev = (struct device *)rng->priv;
+	struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+	/* Enable RNG module */
+	regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+			  SA_CMD_STATUS_REG_TRNG_ENABLE,
+			  SA_CMD_STATUS_REG_TRNG_ENABLE);
+
+	/* Configure RNG module */
+	writel(0, &ks_sa_rng->reg_rng->control);
+	value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
+	writel(value, &ks_sa_rng->reg_rng->control);
+
+	value =	(TRNG_DEF_MIN_REFILL_CYCLES <<
+		 TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
+		(TRNG_DEF_MAX_REFILL_CYCLES <<
+		 TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
+		(TRNG_DEF_CLK_DIV_CYCLES <<
+		 TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
+
+	writel(value, &ks_sa_rng->reg_rng->config);
+
+	/* Disable all interrupts from TRNG */
+	writel(0, &ks_sa_rng->reg_rng->intmask);
+
+	/* Enable RNG */
+	value = readl(&ks_sa_rng->reg_rng->control);
+	value |= TRNG_CNTL_REG_TRNG_ENABLE;
+	writel(value, &ks_sa_rng->reg_rng->control);
+
+	return 0;
+}
+
+static void ks_sa_rng_cleanup(struct hwrng *rng)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+	/* Disable RNG */
+	writel(0, &ks_sa_rng->reg_rng->control);
+	regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+			  SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
+}
+
+static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+	/* Read random data */
+	data[0] = readl(&ks_sa_rng->reg_rng->output_l);
+	data[1] = readl(&ks_sa_rng->reg_rng->output_h);
+
+	writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+
+	return sizeof(u32) * 2;
+}
+
+static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+	u32	ready;
+	int	j;
+
+	for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
+		ready = readl(&ks_sa_rng->reg_rng->status);
+		ready &= TRNG_STATUS_REG_READY;
+
+		if (ready || !wait)
+			break;
+
+		udelay(SA_RNG_DATA_RETRY_DELAY);
+	}
+
+	return ready;
+}
+
+static int ks_sa_rng_probe(struct platform_device *pdev)
+{
+	struct ks_sa_rng	*ks_sa_rng;
+	struct device		*dev = &pdev->dev;
+	int			ret;
+	struct resource		*mem;
+
+	ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
+	if (!ks_sa_rng)
+		return -ENOMEM;
+
+	ks_sa_rng->dev = dev;
+	ks_sa_rng->rng = (struct hwrng) {
+		.name = "ks_sa_hwrng",
+		.init = ks_sa_rng_init,
+		.data_read = ks_sa_rng_data_read,
+		.data_present = ks_sa_rng_data_present,
+		.cleanup = ks_sa_rng_cleanup,
+	};
+	ks_sa_rng->rng.priv = (unsigned long)dev;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(ks_sa_rng->reg_rng))
+		return PTR_ERR(ks_sa_rng->reg_rng);
+
+	ks_sa_rng->regmap_cfg =
+		syscon_regmap_lookup_by_phandle(dev->of_node,
+						"ti,syscon-sa-cfg");
+
+	if (IS_ERR(ks_sa_rng->regmap_cfg)) {
+		dev_err(dev, "syscon_node_to_regmap failed\n");
+		return -EINVAL;
+	}
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable SA power-domain\n");
+		pm_runtime_disable(dev);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, ks_sa_rng);
+
+	return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
+}
+
+static int ks_sa_rng_remove(struct platform_device *pdev)
+{
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id ks_sa_rng_dt_match[] = {
+	{
+		.compatible = "ti,keystone-rng",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
+
+static struct platform_driver ks_sa_rng_driver = {
+	.driver		= {
+		.name	= "ks-sa-rng",
+		.of_match_table = ks_sa_rng_dt_match,
+	},
+	.probe		= ks_sa_rng_probe,
+	.remove		= ks_sa_rng_remove,
+};
+
+module_platform_driver(ks_sa_rng_driver);
+
+MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
+MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
new file mode 100644
index 0000000..2e23be8
--- /dev/null
+++ b/drivers/char/hw_random/meson-rng.c
@@ -0,0 +1,150 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define RNG_DATA 0x00
+
+struct meson_rng_data {
+	void __iomem *base;
+	struct platform_device *pdev;
+	struct hwrng rng;
+	struct clk *core_clk;
+};
+
+static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+	struct meson_rng_data *data =
+			container_of(rng, struct meson_rng_data, rng);
+
+	*(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
+
+	return sizeof(u32);
+}
+
+static void meson_rng_clk_disable(void *data)
+{
+	clk_disable_unprepare(data);
+}
+
+static int meson_rng_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct meson_rng_data *data;
+	struct resource *res;
+	int ret;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(data->base))
+		return PTR_ERR(data->base);
+
+	data->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(data->core_clk))
+		data->core_clk = NULL;
+
+	if (data->core_clk) {
+		ret = clk_prepare_enable(data->core_clk);
+		if (ret)
+			return ret;
+		ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
+					       data->core_clk);
+		if (ret)
+			return ret;
+	}
+
+	data->rng.name = pdev->name;
+	data->rng.read = meson_rng_read;
+
+	platform_set_drvdata(pdev, data);
+
+	return devm_hwrng_register(dev, &data->rng);
+}
+
+static const struct of_device_id meson_rng_of_match[] = {
+	{ .compatible = "amlogic,meson-rng", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
+
+static struct platform_driver meson_rng_driver = {
+	.probe	= meson_rng_probe,
+	.driver	= {
+		.name = "meson-rng",
+		.of_match_table = meson_rng_of_match,
+	},
+};
+
+module_platform_driver(meson_rng_driver);
+
+MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
+MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
new file mode 100644
index 0000000..7f99cd5
--- /dev/null
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -0,0 +1,211 @@
+/*
+ * Driver for Mediatek Hardware Random Number Generator
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define MTK_RNG_DEV KBUILD_MODNAME
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* Runtime PM autosuspend timeout: */
+#define RNG_AUTOSUSPEND_TIMEOUT		100
+
+#define USEC_POLL			2
+#define TIMEOUT_POLL			20
+
+#define RNG_CTRL			0x00
+#define RNG_EN				BIT(0)
+#define RNG_READY			BIT(31)
+
+#define RNG_DATA			0x08
+
+#define to_mtk_rng(p)	container_of(p, struct mtk_rng, rng)
+
+struct mtk_rng {
+	void __iomem *base;
+	struct clk *clk;
+	struct hwrng rng;
+};
+
+static int mtk_rng_init(struct hwrng *rng)
+{
+	struct mtk_rng *priv = to_mtk_rng(rng);
+	u32 val;
+	int err;
+
+	err = clk_prepare_enable(priv->clk);
+	if (err)
+		return err;
+
+	val = readl(priv->base + RNG_CTRL);
+	val |= RNG_EN;
+	writel(val, priv->base + RNG_CTRL);
+
+	return 0;
+}
+
+static void mtk_rng_cleanup(struct hwrng *rng)
+{
+	struct mtk_rng *priv = to_mtk_rng(rng);
+	u32 val;
+
+	val = readl(priv->base + RNG_CTRL);
+	val &= ~RNG_EN;
+	writel(val, priv->base + RNG_CTRL);
+
+	clk_disable_unprepare(priv->clk);
+}
+
+static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
+{
+	struct mtk_rng *priv = to_mtk_rng(rng);
+	int ready;
+
+	ready = readl(priv->base + RNG_CTRL) & RNG_READY;
+	if (!ready && wait)
+		readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
+					  ready & RNG_READY, USEC_POLL,
+					  TIMEOUT_POLL);
+	return !!ready;
+}
+
+static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+	struct mtk_rng *priv = to_mtk_rng(rng);
+	int retval = 0;
+
+	pm_runtime_get_sync((struct device *)priv->rng.priv);
+
+	while (max >= sizeof(u32)) {
+		if (!mtk_rng_wait_ready(rng, wait))
+			break;
+
+		*(u32 *)buf = readl(priv->base + RNG_DATA);
+		retval += sizeof(u32);
+		buf += sizeof(u32);
+		max -= sizeof(u32);
+	}
+
+	pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
+	pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+
+	return retval || !wait ? retval : -EIO;
+}
+
+static int mtk_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+	struct mtk_rng *priv;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no iomem resource\n");
+		return -ENXIO;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->rng.name = pdev->name;
+#ifndef CONFIG_PM
+	priv->rng.init = mtk_rng_init;
+	priv->rng.cleanup = mtk_rng_cleanup;
+#endif
+	priv->rng.read = mtk_rng_read;
+	priv->rng.priv = (unsigned long)&pdev->dev;
+	priv->rng.quality = 900;
+
+	priv->clk = devm_clk_get(&pdev->dev, "rng");
+	if (IS_ERR(priv->clk)) {
+		ret = PTR_ERR(priv->clk);
+		dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+		return ret;
+	}
+
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register rng device: %d\n",
+			ret);
+		return ret;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dev_info(&pdev->dev, "registered RNG driver\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int mtk_rng_runtime_suspend(struct device *dev)
+{
+	struct mtk_rng *priv = dev_get_drvdata(dev);
+
+	mtk_rng_cleanup(&priv->rng);
+
+	return 0;
+}
+
+static int mtk_rng_runtime_resume(struct device *dev)
+{
+	struct mtk_rng *priv = dev_get_drvdata(dev);
+
+	return mtk_rng_init(&priv->rng);
+}
+
+static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
+			    mtk_rng_runtime_resume, NULL);
+#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
+#else	/* CONFIG_PM */
+#define MTK_RNG_PM_OPS NULL
+#endif	/* CONFIG_PM */
+
+static const struct of_device_id mtk_rng_match[] = {
+	{ .compatible = "mediatek,mt7623-rng" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_rng_match);
+
+static struct platform_driver mtk_rng_driver = {
+	.probe          = mtk_rng_probe,
+	.driver = {
+		.name = MTK_RNG_DEV,
+		.pm = MTK_RNG_PM_OPS,
+		.of_match_table = mtk_rng_match,
+	},
+};
+
+module_platform_driver(mtk_rng_driver);
+
+MODULE_DESCRIPTION("Mediatek Random Number Generator Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
new file mode 100644
index 0000000..f83bee5
--- /dev/null
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -0,0 +1,216 @@
+/*
+ * RNG driver for Freescale RNGA
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Author: Alan Carvalho de Assis <acassis@gmail.com>
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * This driver is based on other RNG drivers.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* RNGA Registers */
+#define RNGA_CONTROL			0x00
+#define RNGA_STATUS			0x04
+#define RNGA_ENTROPY			0x08
+#define RNGA_OUTPUT_FIFO		0x0c
+#define RNGA_MODE			0x10
+#define RNGA_VERIFICATION_CONTROL	0x14
+#define RNGA_OSC_CONTROL_COUNTER	0x18
+#define RNGA_OSC1_COUNTER		0x1c
+#define RNGA_OSC2_COUNTER		0x20
+#define RNGA_OSC_COUNTER_STATUS		0x24
+
+/* RNGA Registers Range */
+#define RNG_ADDR_RANGE			0x28
+
+/* RNGA Control Register */
+#define RNGA_CONTROL_SLEEP		0x00000010
+#define RNGA_CONTROL_CLEAR_INT		0x00000008
+#define RNGA_CONTROL_MASK_INTS		0x00000004
+#define RNGA_CONTROL_HIGH_ASSURANCE	0x00000002
+#define RNGA_CONTROL_GO			0x00000001
+
+#define RNGA_STATUS_LEVEL_MASK		0x0000ff00
+
+/* RNGA Status Register */
+#define RNGA_STATUS_OSC_DEAD		0x80000000
+#define RNGA_STATUS_SLEEP		0x00000010
+#define RNGA_STATUS_ERROR_INT		0x00000008
+#define RNGA_STATUS_FIFO_UNDERFLOW	0x00000004
+#define RNGA_STATUS_LAST_READ_STATUS	0x00000002
+#define RNGA_STATUS_SECURITY_VIOLATION	0x00000001
+
+struct mxc_rng {
+	struct device *dev;
+	struct hwrng rng;
+	void __iomem *mem;
+	struct clk *clk;
+};
+
+static int mxc_rnga_data_present(struct hwrng *rng, int wait)
+{
+	int i;
+	struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+	for (i = 0; i < 20; i++) {
+		/* how many random numbers are in FIFO? [0-16] */
+		int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) &
+				RNGA_STATUS_LEVEL_MASK) >> 8;
+		if (level || !wait)
+			return !!level;
+		udelay(10);
+	}
+	return 0;
+}
+
+static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
+{
+	int err;
+	u32 ctrl;
+	struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+	/* retrieve a random number from FIFO */
+	*data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO);
+
+	/* some error while reading this random number? */
+	err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
+
+	/* if error: clear error interrupt, but doesn't return random number */
+	if (err) {
+		dev_dbg(mxc_rng->dev, "Error while reading random number!\n");
+		ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+		__raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
+					mxc_rng->mem + RNGA_CONTROL);
+		return 0;
+	} else
+		return 4;
+}
+
+static int mxc_rnga_init(struct hwrng *rng)
+{
+	u32 ctrl, osc;
+	struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+	/* wake up */
+	ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+	__raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL);
+
+	/* verify if oscillator is working */
+	osc = __raw_readl(mxc_rng->mem + RNGA_STATUS);
+	if (osc & RNGA_STATUS_OSC_DEAD) {
+		dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n");
+		return -ENODEV;
+	}
+
+	/* go running */
+	ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+	__raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
+
+	return 0;
+}
+
+static void mxc_rnga_cleanup(struct hwrng *rng)
+{
+	u32 ctrl;
+	struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+	ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+
+	/* stop rnga */
+	__raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
+}
+
+static int __init mxc_rnga_probe(struct platform_device *pdev)
+{
+	int err;
+	struct resource *res;
+	struct mxc_rng *mxc_rng;
+
+	mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL);
+	if (!mxc_rng)
+		return -ENOMEM;
+
+	mxc_rng->dev = &pdev->dev;
+	mxc_rng->rng.name = "mxc-rnga";
+	mxc_rng->rng.init = mxc_rnga_init;
+	mxc_rng->rng.cleanup = mxc_rnga_cleanup,
+	mxc_rng->rng.data_present = mxc_rnga_data_present,
+	mxc_rng->rng.data_read = mxc_rnga_data_read,
+
+	mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mxc_rng->clk)) {
+		dev_err(&pdev->dev, "Could not get rng_clk!\n");
+		return PTR_ERR(mxc_rng->clk);
+	}
+
+	err = clk_prepare_enable(mxc_rng->clk);
+	if (err)
+		return err;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mxc_rng->mem)) {
+		err = PTR_ERR(mxc_rng->mem);
+		goto err_ioremap;
+	}
+
+	err = hwrng_register(&mxc_rng->rng);
+	if (err) {
+		dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
+		goto err_ioremap;
+	}
+
+	return 0;
+
+err_ioremap:
+	clk_disable_unprepare(mxc_rng->clk);
+	return err;
+}
+
+static int __exit mxc_rnga_remove(struct platform_device *pdev)
+{
+	struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&mxc_rng->rng);
+
+	clk_disable_unprepare(mxc_rng->clk);
+
+	return 0;
+}
+
+static const struct of_device_id mxc_rnga_of_match[] = {
+	{ .compatible = "fsl,imx21-rnga", },
+	{ .compatible = "fsl,imx31-rnga", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
+
+static struct platform_driver mxc_rnga_driver = {
+	.driver = {
+		.name = "mxc_rnga",
+		.of_match_table = mxc_rnga_of_match,
+	},
+	.remove = __exit_p(mxc_rnga_remove),
+};
+
+module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/n2-asm.S b/drivers/char/hw_random/n2-asm.S
new file mode 100644
index 0000000..c205df4
--- /dev/null
+++ b/drivers/char/hw_random/n2-asm.S
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2rng.h"
+
+	.text
+
+ENTRY(sun4v_rng_get_diag_ctl)
+	mov	HV_FAST_RNG_GET_DIAG_CTL, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_rng_get_diag_ctl)
+
+ENTRY(sun4v_rng_ctl_read_v1)
+	mov	%o1, %o3
+	mov	%o2, %o4
+	mov	HV_FAST_RNG_CTL_READ, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o3]
+	retl
+	 stx	%o2, [%o4]
+ENDPROC(sun4v_rng_ctl_read_v1)
+
+ENTRY(sun4v_rng_ctl_read_v2)
+	save	%sp, -192, %sp
+	mov	%i0, %o0
+	mov	%i1, %o1
+	mov	HV_FAST_RNG_CTL_READ, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%i2]
+	stx	%o2, [%i3]
+	stx	%o3, [%i4]
+	stx	%o4, [%i5]
+	ret
+	restore	%g0, %o0, %o0
+ENDPROC(sun4v_rng_ctl_read_v2)
+
+ENTRY(sun4v_rng_ctl_write_v1)
+	mov	%o3, %o4
+	mov	HV_FAST_RNG_CTL_WRITE, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%o4]
+ENDPROC(sun4v_rng_ctl_write_v1)
+
+ENTRY(sun4v_rng_ctl_write_v2)
+	mov	HV_FAST_RNG_CTL_WRITE, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_rng_ctl_write_v2)
+
+ENTRY(sun4v_rng_data_read_diag_v1)
+	mov	%o2, %o4
+	mov	HV_FAST_RNG_DATA_READ_DIAG, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v1)
+
+ENTRY(sun4v_rng_data_read_diag_v2)
+	mov	%o3, %o4
+	mov	HV_FAST_RNG_DATA_READ_DIAG, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v2)
+
+ENTRY(sun4v_rng_data_read)
+	mov	%o1, %o4
+	mov	HV_FAST_RNG_DATA_READ, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%o4]
+ENDPROC(sun4v_rng_data_read)
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
new file mode 100644
index 0000000..f841151
--- /dev/null
+++ b/drivers/char/hw_random/n2-drv.c
@@ -0,0 +1,871 @@
+/* n2-drv.c: Niagara-2 RNG driver.
+ *
+ * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/hw_random.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/hypervisor.h>
+
+#include "n2rng.h"
+
+#define DRV_MODULE_NAME		"n2rng"
+#define PFX DRV_MODULE_NAME	": "
+#define DRV_MODULE_VERSION	"0.3"
+#define DRV_MODULE_RELDATE	"Jan 7, 2017"
+
+static char version[] =
+	DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 RNG driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* The Niagara2 RNG provides a 64-bit read-only random number
+ * register, plus a control register.  Access to the RNG is
+ * virtualized through the hypervisor so that both guests and control
+ * nodes can access the device.
+ *
+ * The entropy source consists of raw entropy sources, each
+ * constructed from a voltage controlled oscillator whose phase is
+ * jittered by thermal noise sources.
+ *
+ * The oscillator in each of the three raw entropy sources run at
+ * different frequencies.  Normally, all three generator outputs are
+ * gathered, xored together, and fed into a CRC circuit, the output of
+ * which is the 64-bit read-only register.
+ *
+ * Some time is necessary for all the necessary entropy to build up
+ * such that a full 64-bits of entropy are available in the register.
+ * In normal operating mode (RNG_CTL_LFSR is set), the chip implements
+ * an interlock which blocks register reads until sufficient entropy
+ * is available.
+ *
+ * A control register is provided for adjusting various aspects of RNG
+ * operation, and to enable diagnostic modes.  Each of the three raw
+ * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}).  Also
+ * provided are fields for controlling the minimum time in cycles
+ * between read accesses to the register (RNG_CTL_WAIT, this controls
+ * the interlock described in the previous paragraph).
+ *
+ * The standard setting is to have the mode bit (RNG_CTL_LFSR) set,
+ * all three entropy sources enabled, and the interlock time set
+ * appropriately.
+ *
+ * The CRC polynomial used by the chip is:
+ *
+ * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 +
+ *        x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 +
+ *        x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1
+ *
+ * The RNG_CTL_VCO value of each noise cell must be programmed
+ * separately.  This is why 4 control register values must be provided
+ * to the hypervisor.  During a write, the hypervisor writes them all,
+ * one at a time, to the actual RNG_CTL register.  The first three
+ * values are used to setup the desired RNG_CTL_VCO for each entropy
+ * source, for example:
+ *
+ *	control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1
+ *	control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2
+ *	control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3
+ *
+ * And then the fourth value sets the final chip state and enables
+ * desired.
+ */
+
+static int n2rng_hv_err_trans(unsigned long hv_err)
+{
+	switch (hv_err) {
+	case HV_EOK:
+		return 0;
+	case HV_EWOULDBLOCK:
+		return -EAGAIN;
+	case HV_ENOACCESS:
+		return -EPERM;
+	case HV_EIO:
+		return -EIO;
+	case HV_EBUSY:
+		return -EBUSY;
+	case HV_EBADALIGN:
+	case HV_ENORADDR:
+		return -EFAULT;
+	default:
+		return -EINVAL;
+	}
+}
+
+static unsigned long n2rng_generic_read_control_v2(unsigned long ra,
+						   unsigned long unit)
+{
+	unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status;
+	int block = 0, busy = 0;
+
+	while (1) {
+		hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state,
+					       &ticks,
+					       &watchdog_delta,
+					       &watchdog_status);
+		if (hv_err == HV_EOK)
+			break;
+
+		if (hv_err == HV_EBUSY) {
+			if (++busy >= N2RNG_BUSY_LIMIT)
+				break;
+
+			udelay(1);
+		} else if (hv_err == HV_EWOULDBLOCK) {
+			if (++block >= N2RNG_BLOCK_LIMIT)
+				break;
+
+			__delay(ticks);
+		} else
+			break;
+	}
+
+	return hv_err;
+}
+
+/* In multi-socket situations, the hypervisor might need to
+ * queue up the RNG control register write if it's for a unit
+ * that is on a cpu socket other than the one we are executing on.
+ *
+ * We poll here waiting for a successful read of that control
+ * register to make sure the write has been actually performed.
+ */
+static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit)
+{
+	unsigned long ra = __pa(&np->scratch_control[0]);
+
+	return n2rng_generic_read_control_v2(ra, unit);
+}
+
+static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit,
+					 unsigned long state,
+					 unsigned long control_ra,
+					 unsigned long watchdog_timeout,
+					 unsigned long *ticks)
+{
+	unsigned long hv_err;
+
+	if (np->hvapi_major == 1) {
+		hv_err = sun4v_rng_ctl_write_v1(control_ra, state,
+						watchdog_timeout, ticks);
+	} else {
+		hv_err = sun4v_rng_ctl_write_v2(control_ra, state,
+						watchdog_timeout, unit);
+		if (hv_err == HV_EOK)
+			hv_err = n2rng_control_settle_v2(np, unit);
+		*ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+	}
+
+	return hv_err;
+}
+
+static int n2rng_generic_read_data(unsigned long data_ra)
+{
+	unsigned long ticks, hv_err;
+	int block = 0, hcheck = 0;
+
+	while (1) {
+		hv_err = sun4v_rng_data_read(data_ra, &ticks);
+		if (hv_err == HV_EOK)
+			return 0;
+
+		if (hv_err == HV_EWOULDBLOCK) {
+			if (++block >= N2RNG_BLOCK_LIMIT)
+				return -EWOULDBLOCK;
+			__delay(ticks);
+		} else if (hv_err == HV_ENOACCESS) {
+			return -EPERM;
+		} else if (hv_err == HV_EIO) {
+			if (++hcheck >= N2RNG_HCHECK_LIMIT)
+				return -EIO;
+			udelay(10000);
+		} else
+			return -ENODEV;
+	}
+}
+
+static unsigned long n2rng_read_diag_data_one(struct n2rng *np,
+					      unsigned long unit,
+					      unsigned long data_ra,
+					      unsigned long data_len,
+					      unsigned long *ticks)
+{
+	unsigned long hv_err;
+
+	if (np->hvapi_major == 1) {
+		hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks);
+	} else {
+		hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len,
+						     unit, ticks);
+		if (!*ticks)
+			*ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+	}
+	return hv_err;
+}
+
+static int n2rng_generic_read_diag_data(struct n2rng *np,
+					unsigned long unit,
+					unsigned long data_ra,
+					unsigned long data_len)
+{
+	unsigned long ticks, hv_err;
+	int block = 0;
+
+	while (1) {
+		hv_err = n2rng_read_diag_data_one(np, unit,
+						  data_ra, data_len,
+						  &ticks);
+		if (hv_err == HV_EOK)
+			return 0;
+
+		if (hv_err == HV_EWOULDBLOCK) {
+			if (++block >= N2RNG_BLOCK_LIMIT)
+				return -EWOULDBLOCK;
+			__delay(ticks);
+		} else if (hv_err == HV_ENOACCESS) {
+			return -EPERM;
+		} else if (hv_err == HV_EIO) {
+			return -EIO;
+		} else
+			return -ENODEV;
+	}
+}
+
+
+static int n2rng_generic_write_control(struct n2rng *np,
+				       unsigned long control_ra,
+				       unsigned long unit,
+				       unsigned long state)
+{
+	unsigned long hv_err, ticks;
+	int block = 0, busy = 0;
+
+	while (1) {
+		hv_err = n2rng_write_ctl_one(np, unit, state, control_ra,
+					     np->wd_timeo, &ticks);
+		if (hv_err == HV_EOK)
+			return 0;
+
+		if (hv_err == HV_EWOULDBLOCK) {
+			if (++block >= N2RNG_BLOCK_LIMIT)
+				return -EWOULDBLOCK;
+			__delay(ticks);
+		} else if (hv_err == HV_EBUSY) {
+			if (++busy >= N2RNG_BUSY_LIMIT)
+				return -EBUSY;
+			udelay(1);
+		} else
+			return -ENODEV;
+	}
+}
+
+/* Just try to see if we can successfully access the control register
+ * of the RNG on the domain on which we are currently executing.
+ */
+static int n2rng_try_read_ctl(struct n2rng *np)
+{
+	unsigned long hv_err;
+	unsigned long x;
+
+	if (np->hvapi_major == 1) {
+		hv_err = sun4v_rng_get_diag_ctl();
+	} else {
+		/* We purposefully give invalid arguments, HV_NOACCESS
+		 * is higher priority than the errors we'd get from
+		 * these other cases, and that's the error we are
+		 * truly interested in.
+		 */
+		hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x);
+		switch (hv_err) {
+		case HV_EWOULDBLOCK:
+		case HV_ENOACCESS:
+			break;
+		default:
+			hv_err = HV_EOK;
+			break;
+		}
+	}
+
+	return n2rng_hv_err_trans(hv_err);
+}
+
+static u64 n2rng_control_default(struct n2rng *np, int ctl)
+{
+	u64 val = 0;
+
+	if (np->data->chip_version == 1) {
+		val = ((2 << RNG_v1_CTL_ASEL_SHIFT) |
+			(N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v1_CTL_WAIT_SHIFT) |
+			 RNG_CTL_LFSR);
+
+		switch (ctl) {
+		case 0:
+			val |= (1 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES1;
+			break;
+		case 1:
+			val |= (2 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES2;
+			break;
+		case 2:
+			val |= (3 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES3;
+			break;
+		case 3:
+			val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3;
+			break;
+		default:
+			break;
+		}
+
+	} else {
+		val = ((2 << RNG_v2_CTL_ASEL_SHIFT) |
+			(N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v2_CTL_WAIT_SHIFT) |
+			 RNG_CTL_LFSR);
+
+		switch (ctl) {
+		case 0:
+			val |= (1 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES1;
+			break;
+		case 1:
+			val |= (2 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES2;
+			break;
+		case 2:
+			val |= (3 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES3;
+			break;
+		case 3:
+			val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return val;
+}
+
+static void n2rng_control_swstate_init(struct n2rng *np)
+{
+	int i;
+
+	np->flags |= N2RNG_FLAG_CONTROL;
+
+	np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT;
+	np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT;
+	np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT;
+
+	for (i = 0; i < np->num_units; i++) {
+		struct n2rng_unit *up = &np->units[i];
+
+		up->control[0] = n2rng_control_default(np, 0);
+		up->control[1] = n2rng_control_default(np, 1);
+		up->control[2] = n2rng_control_default(np, 2);
+		up->control[3] = n2rng_control_default(np, 3);
+	}
+
+	np->hv_state = HV_RNG_STATE_UNCONFIGURED;
+}
+
+static int n2rng_grab_diag_control(struct n2rng *np)
+{
+	int i, busy_count, err = -ENODEV;
+
+	busy_count = 0;
+	for (i = 0; i < 100; i++) {
+		err = n2rng_try_read_ctl(np);
+		if (err != -EAGAIN)
+			break;
+
+		if (++busy_count > 100) {
+			dev_err(&np->op->dev,
+				"Grab diag control timeout.\n");
+			return -ENODEV;
+		}
+
+		udelay(1);
+	}
+
+	return err;
+}
+
+static int n2rng_init_control(struct n2rng *np)
+{
+	int err = n2rng_grab_diag_control(np);
+
+	/* Not in the control domain, that's OK we are only a consumer
+	 * of the RNG data, we don't setup and program it.
+	 */
+	if (err == -EPERM)
+		return 0;
+	if (err)
+		return err;
+
+	n2rng_control_swstate_init(np);
+
+	return 0;
+}
+
+static int n2rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct n2rng *np = (struct n2rng *) rng->priv;
+	unsigned long ra = __pa(&np->test_data);
+	int len;
+
+	if (!(np->flags & N2RNG_FLAG_READY)) {
+		len = 0;
+	} else if (np->flags & N2RNG_FLAG_BUFFER_VALID) {
+		np->flags &= ~N2RNG_FLAG_BUFFER_VALID;
+		*data = np->buffer;
+		len = 4;
+	} else {
+		int err = n2rng_generic_read_data(ra);
+		if (!err) {
+			np->flags |= N2RNG_FLAG_BUFFER_VALID;
+			np->buffer = np->test_data >> 32;
+			*data = np->test_data & 0xffffffff;
+			len = 4;
+		} else {
+			dev_err(&np->op->dev, "RNG error, retesting\n");
+			np->flags &= ~N2RNG_FLAG_READY;
+			if (!(np->flags & N2RNG_FLAG_SHUTDOWN))
+				schedule_delayed_work(&np->work, 0);
+			len = 0;
+		}
+	}
+
+	return len;
+}
+
+/* On a guest node, just make sure we can read random data properly.
+ * If a control node reboots or reloads it's n2rng driver, this won't
+ * work during that time.  So we have to keep probing until the device
+ * becomes usable.
+ */
+static int n2rng_guest_check(struct n2rng *np)
+{
+	unsigned long ra = __pa(&np->test_data);
+
+	return n2rng_generic_read_data(ra);
+}
+
+static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit,
+				   u64 *pre_control, u64 pre_state,
+				   u64 *buffer, unsigned long buf_len,
+				   u64 *post_control, u64 post_state)
+{
+	unsigned long post_ctl_ra = __pa(post_control);
+	unsigned long pre_ctl_ra = __pa(pre_control);
+	unsigned long buffer_ra = __pa(buffer);
+	int err;
+
+	err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state);
+	if (err)
+		return err;
+
+	err = n2rng_generic_read_diag_data(np, unit,
+					   buffer_ra, buf_len);
+
+	(void) n2rng_generic_write_control(np, post_ctl_ra, unit,
+					   post_state);
+
+	return err;
+}
+
+static u64 advance_polynomial(u64 poly, u64 val, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		int highbit_set = ((s64)val < 0);
+
+		val <<= 1;
+		if (highbit_set)
+			val ^= poly;
+	}
+
+	return val;
+}
+
+static int n2rng_test_buffer_find(struct n2rng *np, u64 val)
+{
+	int i, count = 0;
+
+	/* Purposefully skip over the first word.  */
+	for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) {
+		if (np->test_buffer[i] == val)
+			count++;
+	}
+	return count;
+}
+
+static void n2rng_dump_test_buffer(struct n2rng *np)
+{
+	int i;
+
+	for (i = 0; i < SELFTEST_BUFFER_WORDS; i++)
+		dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n",
+			i, np->test_buffer[i]);
+}
+
+static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit)
+{
+	u64 val;
+	int err, matches, limit;
+
+	switch (np->data->id) {
+	case N2_n2_rng:
+	case N2_vf_rng:
+	case N2_kt_rng:
+	case N2_m4_rng:  /* yes, m4 uses the old value */
+		val = RNG_v1_SELFTEST_VAL;
+		break;
+	default:
+		val = RNG_v2_SELFTEST_VAL;
+		break;
+	}
+
+	matches = 0;
+	for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) {
+		matches += n2rng_test_buffer_find(np, val);
+		if (matches >= SELFTEST_MATCH_GOAL)
+			break;
+		val = advance_polynomial(SELFTEST_POLY, val, 1);
+	}
+
+	err = 0;
+	if (limit >= SELFTEST_LOOPS_MAX) {
+		err = -ENODEV;
+		dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit);
+		n2rng_dump_test_buffer(np);
+	} else
+		dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit);
+
+	return err;
+}
+
+static int n2rng_control_selftest(struct n2rng *np, unsigned long unit)
+{
+	int err;
+	u64 base, base3;
+
+	switch (np->data->id) {
+	case N2_n2_rng:
+	case N2_vf_rng:
+	case N2_kt_rng:
+		base = RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT;
+		base3 = base | RNG_CTL_LFSR |
+			((RNG_v1_SELFTEST_TICKS - 2) << RNG_v1_CTL_WAIT_SHIFT);
+		break;
+	case N2_m4_rng:
+		base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT;
+		base3 = base | RNG_CTL_LFSR |
+			((RNG_v1_SELFTEST_TICKS - 2) << RNG_v2_CTL_WAIT_SHIFT);
+		break;
+	default:
+		base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT;
+		base3 = base | RNG_CTL_LFSR |
+			(RNG_v2_SELFTEST_TICKS << RNG_v2_CTL_WAIT_SHIFT);
+		break;
+	}
+
+	np->test_control[0] = base;
+	np->test_control[1] = base;
+	np->test_control[2] = base;
+	np->test_control[3] = base3;
+
+	err = n2rng_entropy_diag_read(np, unit, np->test_control,
+				      HV_RNG_STATE_HEALTHCHECK,
+				      np->test_buffer,
+				      sizeof(np->test_buffer),
+				      &np->units[unit].control[0],
+				      np->hv_state);
+	if (err)
+		return err;
+
+	return n2rng_check_selftest_buffer(np, unit);
+}
+
+static int n2rng_control_check(struct n2rng *np)
+{
+	int i;
+
+	for (i = 0; i < np->num_units; i++) {
+		int err = n2rng_control_selftest(np, i);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+/* The sanity checks passed, install the final configuration into the
+ * chip, it's ready to use.
+ */
+static int n2rng_control_configure_units(struct n2rng *np)
+{
+	int unit, err;
+
+	err = 0;
+	for (unit = 0; unit < np->num_units; unit++) {
+		struct n2rng_unit *up = &np->units[unit];
+		unsigned long ctl_ra = __pa(&up->control[0]);
+		int esrc;
+		u64 base, shift;
+
+		if (np->data->chip_version == 1) {
+			base = ((np->accum_cycles << RNG_v1_CTL_WAIT_SHIFT) |
+			      (RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT) |
+			      RNG_CTL_LFSR);
+			shift = RNG_v1_CTL_VCO_SHIFT;
+		} else {
+			base = ((np->accum_cycles << RNG_v2_CTL_WAIT_SHIFT) |
+			      (RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT) |
+			      RNG_CTL_LFSR);
+			shift = RNG_v2_CTL_VCO_SHIFT;
+		}
+
+		/* XXX This isn't the best.  We should fetch a bunch
+		 * XXX of words using each entropy source combined XXX
+		 * with each VCO setting, and see which combinations
+		 * XXX give the best random data.
+		 */
+		for (esrc = 0; esrc < 3; esrc++)
+			up->control[esrc] = base |
+				(esrc << shift) |
+				(RNG_CTL_ES1 << esrc);
+
+		up->control[3] = base |
+			(RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3);
+
+		err = n2rng_generic_write_control(np, ctl_ra, unit,
+						  HV_RNG_STATE_CONFIGURED);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+static void n2rng_work(struct work_struct *work)
+{
+	struct n2rng *np = container_of(work, struct n2rng, work.work);
+	int err = 0;
+	static int retries = 4;
+
+	if (!(np->flags & N2RNG_FLAG_CONTROL)) {
+		err = n2rng_guest_check(np);
+	} else {
+		preempt_disable();
+		err = n2rng_control_check(np);
+		preempt_enable();
+
+		if (!err)
+			err = n2rng_control_configure_units(np);
+	}
+
+	if (!err) {
+		np->flags |= N2RNG_FLAG_READY;
+		dev_info(&np->op->dev, "RNG ready\n");
+	}
+
+	if (--retries == 0)
+		dev_err(&np->op->dev, "Self-test retries failed, RNG not ready\n");
+	else if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN))
+		schedule_delayed_work(&np->work, HZ * 2);
+}
+
+static void n2rng_driver_version(void)
+{
+	static int n2rng_version_printed;
+
+	if (n2rng_version_printed++ == 0)
+		pr_info("%s", version);
+}
+
+static const struct of_device_id n2rng_match[];
+static int n2rng_probe(struct platform_device *op)
+{
+	const struct of_device_id *match;
+	int err = -ENOMEM;
+	struct n2rng *np;
+
+	match = of_match_device(n2rng_match, &op->dev);
+	if (!match)
+		return -EINVAL;
+
+	n2rng_driver_version();
+	np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL);
+	if (!np)
+		goto out;
+	np->op = op;
+	np->data = (struct n2rng_template *)match->data;
+
+	INIT_DELAYED_WORK(&np->work, n2rng_work);
+
+	if (np->data->multi_capable)
+		np->flags |= N2RNG_FLAG_MULTI;
+
+	err = -ENODEV;
+	np->hvapi_major = 2;
+	if (sun4v_hvapi_register(HV_GRP_RNG,
+				 np->hvapi_major,
+				 &np->hvapi_minor)) {
+		np->hvapi_major = 1;
+		if (sun4v_hvapi_register(HV_GRP_RNG,
+					 np->hvapi_major,
+					 &np->hvapi_minor)) {
+			dev_err(&op->dev, "Cannot register suitable "
+				"HVAPI version.\n");
+			goto out;
+		}
+	}
+
+	if (np->flags & N2RNG_FLAG_MULTI) {
+		if (np->hvapi_major < 2) {
+			dev_err(&op->dev, "multi-unit-capable RNG requires "
+				"HVAPI major version 2 or later, got %lu\n",
+				np->hvapi_major);
+			goto out_hvapi_unregister;
+		}
+		np->num_units = of_getintprop_default(op->dev.of_node,
+						      "rng-#units", 0);
+		if (!np->num_units) {
+			dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
+			goto out_hvapi_unregister;
+		}
+	} else {
+		np->num_units = 1;
+	}
+
+	dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
+		 np->hvapi_major, np->hvapi_minor);
+	np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units),
+				 GFP_KERNEL);
+	err = -ENOMEM;
+	if (!np->units)
+		goto out_hvapi_unregister;
+
+	err = n2rng_init_control(np);
+	if (err)
+		goto out_hvapi_unregister;
+
+	dev_info(&op->dev, "Found %s RNG, units: %d\n",
+		 ((np->flags & N2RNG_FLAG_MULTI) ?
+		  "multi-unit-capable" : "single-unit"),
+		 np->num_units);
+
+	np->hwrng.name = DRV_MODULE_NAME;
+	np->hwrng.data_read = n2rng_data_read;
+	np->hwrng.priv = (unsigned long) np;
+
+	err = hwrng_register(&np->hwrng);
+	if (err)
+		goto out_hvapi_unregister;
+
+	platform_set_drvdata(op, np);
+
+	schedule_delayed_work(&np->work, 0);
+
+	return 0;
+
+out_hvapi_unregister:
+	sun4v_hvapi_unregister(HV_GRP_RNG);
+
+out:
+	return err;
+}
+
+static int n2rng_remove(struct platform_device *op)
+{
+	struct n2rng *np = platform_get_drvdata(op);
+
+	np->flags |= N2RNG_FLAG_SHUTDOWN;
+
+	cancel_delayed_work_sync(&np->work);
+
+	hwrng_unregister(&np->hwrng);
+
+	sun4v_hvapi_unregister(HV_GRP_RNG);
+
+	return 0;
+}
+
+static struct n2rng_template n2_template = {
+	.id = N2_n2_rng,
+	.multi_capable = 0,
+	.chip_version = 1,
+};
+
+static struct n2rng_template vf_template = {
+	.id = N2_vf_rng,
+	.multi_capable = 1,
+	.chip_version = 1,
+};
+
+static struct n2rng_template kt_template = {
+	.id = N2_kt_rng,
+	.multi_capable = 1,
+	.chip_version = 1,
+};
+
+static struct n2rng_template m4_template = {
+	.id = N2_m4_rng,
+	.multi_capable = 1,
+	.chip_version = 2,
+};
+
+static struct n2rng_template m7_template = {
+	.id = N2_m7_rng,
+	.multi_capable = 1,
+	.chip_version = 2,
+};
+
+static const struct of_device_id n2rng_match[] = {
+	{
+		.name		= "random-number-generator",
+		.compatible	= "SUNW,n2-rng",
+		.data		= &n2_template,
+	},
+	{
+		.name		= "random-number-generator",
+		.compatible	= "SUNW,vf-rng",
+		.data		= &vf_template,
+	},
+	{
+		.name		= "random-number-generator",
+		.compatible	= "SUNW,kt-rng",
+		.data		= &kt_template,
+	},
+	{
+		.name		= "random-number-generator",
+		.compatible	= "ORCL,m4-rng",
+		.data		= &m4_template,
+	},
+	{
+		.name		= "random-number-generator",
+		.compatible	= "ORCL,m7-rng",
+		.data		= &m7_template,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, n2rng_match);
+
+static struct platform_driver n2rng_driver = {
+	.driver = {
+		.name = "n2rng",
+		.of_match_table = n2rng_match,
+	},
+	.probe		= n2rng_probe,
+	.remove		= n2rng_remove,
+};
+
+module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
new file mode 100644
index 0000000..9a870f5
--- /dev/null
+++ b/drivers/char/hw_random/n2rng.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* n2rng.h: Niagara2 RNG defines.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#ifndef _N2RNG_H
+#define _N2RNG_H
+
+/* ver1 devices - n2-rng, vf-rng, kt-rng */
+#define RNG_v1_CTL_WAIT       0x0000000001fffe00ULL /* Minimum wait time    */
+#define RNG_v1_CTL_WAIT_SHIFT 9
+#define RNG_v1_CTL_BYPASS     0x0000000000000100ULL /* VCO voltage source   */
+#define RNG_v1_CTL_VCO        0x00000000000000c0ULL /* VCO rate control     */
+#define RNG_v1_CTL_VCO_SHIFT  6
+#define RNG_v1_CTL_ASEL       0x0000000000000030ULL /* Analog MUX select    */
+#define RNG_v1_CTL_ASEL_SHIFT 4
+#define RNG_v1_CTL_ASEL_NOOUT 2
+
+/* these are the same in v2 as in v1 */
+#define RNG_CTL_LFSR       0x0000000000000008ULL /* Use LFSR or plain shift */
+#define RNG_CTL_ES3        0x0000000000000004ULL /* Enable entropy source 3 */
+#define RNG_CTL_ES2        0x0000000000000002ULL /* Enable entropy source 2 */
+#define RNG_CTL_ES1        0x0000000000000001ULL /* Enable entropy source 1 */
+
+/* ver2 devices - m4-rng, m7-rng */
+#define RNG_v2_CTL_WAIT       0x0000000007fff800ULL /* Minimum wait time    */
+#define RNG_v2_CTL_WAIT_SHIFT 12
+#define RNG_v2_CTL_BYPASS     0x0000000000000400ULL /* VCO voltage source   */
+#define RNG_v2_CTL_VCO        0x0000000000000300ULL /* VCO rate control     */
+#define RNG_v2_CTL_VCO_SHIFT  9
+#define RNG_v2_CTL_PERF       0x0000000000000180ULL /* Perf */
+#define RNG_v2_CTL_ASEL       0x0000000000000070ULL /* Analog MUX select    */
+#define RNG_v2_CTL_ASEL_SHIFT 4
+#define RNG_v2_CTL_ASEL_NOOUT 7
+
+
+#define HV_FAST_RNG_GET_DIAG_CTL	0x130
+#define HV_FAST_RNG_CTL_READ		0x131
+#define HV_FAST_RNG_CTL_WRITE		0x132
+#define HV_FAST_RNG_DATA_READ_DIAG	0x133
+#define HV_FAST_RNG_DATA_READ		0x134
+
+#define HV_RNG_STATE_UNCONFIGURED	0
+#define HV_RNG_STATE_CONFIGURED		1
+#define HV_RNG_STATE_HEALTHCHECK	2
+#define HV_RNG_STATE_ERROR		3
+
+#define HV_RNG_NUM_CONTROL		4
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_rng_get_diag_ctl(void);
+extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
+					   unsigned long *state,
+					   unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_read_v2(unsigned long ctl_regs_ra,
+					   unsigned long unit,
+					   unsigned long *state,
+					   unsigned long *tick_delta,
+					   unsigned long *watchdog,
+					   unsigned long *write_status);
+extern unsigned long sun4v_rng_ctl_write_v1(unsigned long ctl_regs_ra,
+					    unsigned long state,
+					    unsigned long write_timeout,
+					    unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_write_v2(unsigned long ctl_regs_ra,
+					    unsigned long state,
+					    unsigned long write_timeout,
+					    unsigned long unit);
+extern unsigned long sun4v_rng_data_read_diag_v1(unsigned long data_ra,
+						 unsigned long len,
+						 unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read_diag_v2(unsigned long data_ra,
+						 unsigned long len,
+						 unsigned long unit,
+						 unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read(unsigned long data_ra,
+					 unsigned long *tick_delta);
+
+enum n2rng_compat_id {
+	N2_n2_rng,
+	N2_vf_rng,
+	N2_kt_rng,
+	N2_m4_rng,
+	N2_m7_rng,
+};
+
+struct n2rng_template {
+	enum n2rng_compat_id id;
+	int multi_capable;
+	int chip_version;
+};
+
+struct n2rng_unit {
+	u64			control[HV_RNG_NUM_CONTROL];
+};
+
+struct n2rng {
+	struct platform_device	*op;
+
+	unsigned long		flags;
+#define N2RNG_FLAG_MULTI	0x00000001 /* Multi-unit capable RNG */
+#define N2RNG_FLAG_CONTROL	0x00000002 /* Operating in control domain */
+#define N2RNG_FLAG_READY	0x00000008 /* Ready for hw-rng layer      */
+#define N2RNG_FLAG_SHUTDOWN	0x00000010 /* Driver unregistering        */
+#define N2RNG_FLAG_BUFFER_VALID	0x00000020 /* u32 buffer holds valid data */
+
+	struct n2rng_template	*data;
+	int			num_units;
+	struct n2rng_unit	*units;
+
+	struct hwrng		hwrng;
+	u32			buffer;
+
+	/* Registered hypervisor group API major and minor version.  */
+	unsigned long		hvapi_major;
+	unsigned long		hvapi_minor;
+
+	struct delayed_work	work;
+
+	unsigned long		hv_state; /* HV_RNG_STATE_foo */
+
+	unsigned long		health_check_sec;
+	unsigned long		accum_cycles;
+	unsigned long		wd_timeo;
+#define N2RNG_HEALTH_CHECK_SEC_DEFAULT	0
+#define N2RNG_ACCUM_CYCLES_DEFAULT	2048
+#define N2RNG_WD_TIMEO_DEFAULT		0
+
+	u64			scratch_control[HV_RNG_NUM_CONTROL];
+
+#define RNG_v1_SELFTEST_TICKS	38859
+#define RNG_v1_SELFTEST_VAL	((u64)0xB8820C7BD387E32C)
+#define RNG_v2_SELFTEST_TICKS	64
+#define RNG_v2_SELFTEST_VAL	((u64)0xffffffffffffffff)
+#define SELFTEST_POLY		((u64)0x231DCEE91262B8A3)
+#define SELFTEST_MATCH_GOAL	6
+#define SELFTEST_LOOPS_MAX	40000
+#define SELFTEST_BUFFER_WORDS	8
+
+	u64			test_data;
+	u64			test_control[HV_RNG_NUM_CONTROL];
+	u64			test_buffer[SELFTEST_BUFFER_WORDS];
+};
+
+#define N2RNG_BLOCK_LIMIT	60000
+#define N2RNG_BUSY_LIMIT	100
+#define N2RNG_HCHECK_LIMIT	100
+
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
new file mode 100644
index 0000000..9c85815
--- /dev/null
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -0,0 +1,106 @@
+/*
+ * Nomadik RNG support
+ *  Copyright 2009 Alessandro Rubini
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static struct clk *rng_clk;
+
+static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	void __iomem *base = (void __iomem *)rng->priv;
+
+	/*
+	 * The register is 32 bits and gives 16 random bits (low half).
+	 * A subsequent read will delay the core for 400ns, so we just read
+	 * once and accept the very unlikely very small delay, even if wait==0.
+	 */
+	*(u16 *)data = __raw_readl(base + 8) & 0xffff;
+	return 2;
+}
+
+/* we have at most one RNG per machine, granted */
+static struct hwrng nmk_rng = {
+	.name		= "nomadik",
+	.read		= nmk_rng_read,
+};
+
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+{
+	void __iomem *base;
+	int ret;
+
+	rng_clk = devm_clk_get(&dev->dev, NULL);
+	if (IS_ERR(rng_clk)) {
+		dev_err(&dev->dev, "could not get rng clock\n");
+		ret = PTR_ERR(rng_clk);
+		return ret;
+	}
+
+	clk_prepare_enable(rng_clk);
+
+	ret = amba_request_regions(dev, dev->dev.init_name);
+	if (ret)
+		goto out_clk;
+	ret = -ENOMEM;
+	base = devm_ioremap(&dev->dev, dev->res.start,
+			    resource_size(&dev->res));
+	if (!base)
+		goto out_release;
+	nmk_rng.priv = (unsigned long)base;
+	ret = hwrng_register(&nmk_rng);
+	if (ret)
+		goto out_release;
+	return 0;
+
+out_release:
+	amba_release_regions(dev);
+out_clk:
+	clk_disable(rng_clk);
+	return ret;
+}
+
+static int nmk_rng_remove(struct amba_device *dev)
+{
+	hwrng_unregister(&nmk_rng);
+	amba_release_regions(dev);
+	clk_disable(rng_clk);
+	return 0;
+}
+
+static struct amba_id nmk_rng_ids[] = {
+	{
+		.id	= 0x000805e1,
+		.mask	= 0x000fffff, /* top bits are rev and cfg: accept all */
+	},
+	{0, 0},
+};
+
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
+static struct amba_driver nmk_rng_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "rng",
+		},
+	.probe = nmk_rng_probe,
+	.remove = nmk_rng_remove,
+	.id_table = nmk_rng_ids,
+};
+
+module_amba_driver(nmk_rng_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
new file mode 100644
index 0000000..8c78aa0
--- /dev/null
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -0,0 +1,118 @@
+/*
+ * Hardware Random Number Generator support for Cavium Networks
+ * Octeon processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/gfp.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-rnm-defs.h>
+
+struct octeon_rng {
+	struct hwrng ops;
+	void __iomem *control_status;
+	void __iomem *result;
+};
+
+static int octeon_rng_init(struct hwrng *rng)
+{
+	union cvmx_rnm_ctl_status ctl;
+	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+	ctl.u64 = 0;
+	ctl.s.ent_en = 1; /* Enable the entropy source.  */
+	ctl.s.rng_en = 1; /* Enable the RNG hardware.  */
+	cvmx_write_csr((u64)p->control_status, ctl.u64);
+	return 0;
+}
+
+static void octeon_rng_cleanup(struct hwrng *rng)
+{
+	union cvmx_rnm_ctl_status ctl;
+	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+	ctl.u64 = 0;
+	/* Disable everything.  */
+	cvmx_write_csr((u64)p->control_status, ctl.u64);
+}
+
+static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+	*data = cvmx_read64_uint32((u64)p->result);
+	return sizeof(u32);
+}
+
+static int octeon_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res_ports;
+	struct resource *res_result;
+	struct octeon_rng *rng;
+	int ret;
+	struct hwrng ops = {
+		.name = "octeon",
+		.init = octeon_rng_init,
+		.cleanup = octeon_rng_cleanup,
+		.data_read = octeon_rng_data_read
+	};
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_ports)
+		return -ENOENT;
+
+	res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res_result)
+		return -ENOENT;
+
+
+	rng->control_status = devm_ioremap_nocache(&pdev->dev,
+						   res_ports->start,
+						   sizeof(u64));
+	if (!rng->control_status)
+		return -ENOENT;
+
+	rng->result = devm_ioremap_nocache(&pdev->dev,
+					   res_result->start,
+					   sizeof(u64));
+	if (!rng->result)
+		return -ENOENT;
+
+	rng->ops = ops;
+
+	platform_set_drvdata(pdev, &rng->ops);
+	ret = devm_hwrng_register(&pdev->dev, &rng->ops);
+	if (ret)
+		return -ENOENT;
+
+	dev_info(&pdev->dev, "Octeon Random Number Generator\n");
+
+	return 0;
+}
+
+static struct platform_driver octeon_rng_driver = {
+	.driver = {
+		.name		= "octeon_rng",
+	},
+	.probe		= octeon_rng_probe,
+};
+
+module_platform_driver(octeon_rng_driver);
+
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
new file mode 100644
index 0000000..b65ff69
--- /dev/null
+++ b/drivers/char/hw_random/omap-rng.c
@@ -0,0 +1,582 @@
+/*
+ * omap-rng.c - RNG driver for TI OMAP CPU family
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Mostly based on original driver:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+
+#define RNG_REG_STATUS_RDY			(1 << 0)
+
+#define RNG_REG_INTACK_RDY_MASK			(1 << 0)
+#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK	(1 << 1)
+#define RNG_SHUTDOWN_OFLO_MASK			(1 << 1)
+
+#define RNG_CONTROL_STARTUP_CYCLES_SHIFT	16
+#define RNG_CONTROL_STARTUP_CYCLES_MASK		(0xffff << 16)
+#define RNG_CONTROL_ENABLE_TRNG_SHIFT		10
+#define RNG_CONTROL_ENABLE_TRNG_MASK		(1 << 10)
+
+#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT	16
+#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK	(0xffff << 16)
+#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT	0
+#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK	(0xff << 0)
+
+#define RNG_CONTROL_STARTUP_CYCLES		0xff
+#define RNG_CONFIG_MIN_REFIL_CYCLES		0x21
+#define RNG_CONFIG_MAX_REFIL_CYCLES		0x22
+
+#define RNG_ALARMCNT_ALARM_TH_SHIFT		0x0
+#define RNG_ALARMCNT_ALARM_TH_MASK		(0xff << 0)
+#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT		16
+#define RNG_ALARMCNT_SHUTDOWN_TH_MASK		(0x1f << 16)
+#define RNG_ALARM_THRESHOLD			0xff
+#define RNG_SHUTDOWN_THRESHOLD			0x4
+
+#define RNG_REG_FROENABLE_MASK			0xffffff
+#define RNG_REG_FRODETUNE_MASK			0xffffff
+
+#define OMAP2_RNG_OUTPUT_SIZE			0x4
+#define OMAP4_RNG_OUTPUT_SIZE			0x8
+#define EIP76_RNG_OUTPUT_SIZE			0x10
+
+enum {
+	RNG_OUTPUT_0_REG = 0,
+	RNG_OUTPUT_1_REG,
+	RNG_OUTPUT_2_REG,
+	RNG_OUTPUT_3_REG,
+	RNG_STATUS_REG,
+	RNG_INTMASK_REG,
+	RNG_INTACK_REG,
+	RNG_CONTROL_REG,
+	RNG_CONFIG_REG,
+	RNG_ALARMCNT_REG,
+	RNG_FROENABLE_REG,
+	RNG_FRODETUNE_REG,
+	RNG_ALARMMASK_REG,
+	RNG_ALARMSTOP_REG,
+	RNG_REV_REG,
+	RNG_SYSCONFIG_REG,
+};
+
+static const u16 reg_map_omap2[] = {
+	[RNG_OUTPUT_0_REG]	= 0x0,
+	[RNG_STATUS_REG]	= 0x4,
+	[RNG_CONFIG_REG]	= 0x28,
+	[RNG_REV_REG]		= 0x3c,
+	[RNG_SYSCONFIG_REG]	= 0x40,
+};
+
+static const u16 reg_map_omap4[] = {
+	[RNG_OUTPUT_0_REG]	= 0x0,
+	[RNG_OUTPUT_1_REG]	= 0x4,
+	[RNG_STATUS_REG]	= 0x8,
+	[RNG_INTMASK_REG]	= 0xc,
+	[RNG_INTACK_REG]	= 0x10,
+	[RNG_CONTROL_REG]	= 0x14,
+	[RNG_CONFIG_REG]	= 0x18,
+	[RNG_ALARMCNT_REG]	= 0x1c,
+	[RNG_FROENABLE_REG]	= 0x20,
+	[RNG_FRODETUNE_REG]	= 0x24,
+	[RNG_ALARMMASK_REG]	= 0x28,
+	[RNG_ALARMSTOP_REG]	= 0x2c,
+	[RNG_REV_REG]		= 0x1FE0,
+	[RNG_SYSCONFIG_REG]	= 0x1FE4,
+};
+
+static const u16 reg_map_eip76[] = {
+	[RNG_OUTPUT_0_REG]	= 0x0,
+	[RNG_OUTPUT_1_REG]	= 0x4,
+	[RNG_OUTPUT_2_REG]	= 0x8,
+	[RNG_OUTPUT_3_REG]	= 0xc,
+	[RNG_STATUS_REG]	= 0x10,
+	[RNG_INTACK_REG]	= 0x10,
+	[RNG_CONTROL_REG]	= 0x14,
+	[RNG_CONFIG_REG]	= 0x18,
+	[RNG_ALARMCNT_REG]	= 0x1c,
+	[RNG_FROENABLE_REG]	= 0x20,
+	[RNG_FRODETUNE_REG]	= 0x24,
+	[RNG_ALARMMASK_REG]	= 0x28,
+	[RNG_ALARMSTOP_REG]	= 0x2c,
+	[RNG_REV_REG]		= 0x7c,
+};
+
+struct omap_rng_dev;
+/**
+ * struct omap_rng_pdata - RNG IP block-specific data
+ * @regs: Pointer to the register offsets structure.
+ * @data_size: No. of bytes in RNG output.
+ * @data_present: Callback to determine if data is available.
+ * @init: Callback for IP specific initialization sequence.
+ * @cleanup: Callback for IP specific cleanup sequence.
+ */
+struct omap_rng_pdata {
+	u16	*regs;
+	u32	data_size;
+	u32	(*data_present)(struct omap_rng_dev *priv);
+	int	(*init)(struct omap_rng_dev *priv);
+	void	(*cleanup)(struct omap_rng_dev *priv);
+};
+
+struct omap_rng_dev {
+	void __iomem			*base;
+	struct device			*dev;
+	const struct omap_rng_pdata	*pdata;
+	struct hwrng rng;
+	struct clk 			*clk;
+	struct clk			*clk_reg;
+};
+
+static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
+{
+	return __raw_readl(priv->base + priv->pdata->regs[reg]);
+}
+
+static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
+				      u32 val)
+{
+	__raw_writel(val, priv->base + priv->pdata->regs[reg]);
+}
+
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+			    bool wait)
+{
+	struct omap_rng_dev *priv;
+	int i, present;
+
+	priv = (struct omap_rng_dev *)rng->priv;
+
+	if (max < priv->pdata->data_size)
+		return 0;
+
+	for (i = 0; i < 20; i++) {
+		present = priv->pdata->data_present(priv);
+		if (present || !wait)
+			break;
+
+		udelay(10);
+	}
+	if (!present)
+		return 0;
+
+	memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+		      priv->pdata->data_size);
+
+	if (priv->pdata->regs[RNG_INTACK_REG])
+		omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
+
+	return priv->pdata->data_size;
+}
+
+static int omap_rng_init(struct hwrng *rng)
+{
+	struct omap_rng_dev *priv;
+
+	priv = (struct omap_rng_dev *)rng->priv;
+	return priv->pdata->init(priv);
+}
+
+static void omap_rng_cleanup(struct hwrng *rng)
+{
+	struct omap_rng_dev *priv;
+
+	priv = (struct omap_rng_dev *)rng->priv;
+	priv->pdata->cleanup(priv);
+}
+
+
+static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
+{
+	return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1;
+}
+
+static int omap2_rng_init(struct omap_rng_dev *priv)
+{
+	omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1);
+	return 0;
+}
+
+static void omap2_rng_cleanup(struct omap_rng_dev *priv)
+{
+	omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0);
+}
+
+static struct omap_rng_pdata omap2_rng_pdata = {
+	.regs		= (u16 *)reg_map_omap2,
+	.data_size	= OMAP2_RNG_OUTPUT_SIZE,
+	.data_present	= omap2_rng_data_present,
+	.init		= omap2_rng_init,
+	.cleanup	= omap2_rng_cleanup,
+};
+
+#if defined(CONFIG_OF)
+static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
+{
+	return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
+}
+
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+	u32 val;
+
+	/* Return if RNG is already running. */
+	if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+		return 0;
+
+	/*  Number of 512 bit blocks of raw Noise Source output data that must
+	 *  be processed by either the Conditioning Function or the
+	 *  SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+	 *  output value.
+	 */
+	val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+	/* Number of FRO samples that are XOR-ed together into one bit to be
+	 * shifted into the main shift register
+	 */
+	val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+	omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+	/* Enable all available FROs */
+	omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+	omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+	/* Enable TRNG */
+	val = RNG_CONTROL_ENABLE_TRNG_MASK;
+	omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+	return 0;
+}
+
+static int omap4_rng_init(struct omap_rng_dev *priv)
+{
+	u32 val;
+
+	/* Return if RNG is already running. */
+	if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+		return 0;
+
+	val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+	val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+	omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+	omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+	omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+	val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT;
+	val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT;
+	omap_rng_write(priv, RNG_ALARMCNT_REG, val);
+
+	val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT;
+	val |= RNG_CONTROL_ENABLE_TRNG_MASK;
+	omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+	return 0;
+}
+
+static void omap4_rng_cleanup(struct omap_rng_dev *priv)
+{
+	int val;
+
+	val = omap_rng_read(priv, RNG_CONTROL_REG);
+	val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
+	omap_rng_write(priv, RNG_CONTROL_REG, val);
+}
+
+static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
+{
+	struct omap_rng_dev *priv = dev_id;
+	u32 fro_detune, fro_enable;
+
+	/*
+	 * Interrupt raised by a fro shutdown threshold, do the following:
+	 * 1. Clear the alarm events.
+	 * 2. De tune the FROs which are shutdown.
+	 * 3. Re enable the shutdown FROs.
+	 */
+	omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0);
+	omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0);
+
+	fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG);
+	fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK;
+	fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG);
+	fro_enable = RNG_REG_FROENABLE_MASK;
+
+	omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune);
+	omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable);
+
+	omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK);
+
+	return IRQ_HANDLED;
+}
+
+static struct omap_rng_pdata omap4_rng_pdata = {
+	.regs		= (u16 *)reg_map_omap4,
+	.data_size	= OMAP4_RNG_OUTPUT_SIZE,
+	.data_present	= omap4_rng_data_present,
+	.init		= omap4_rng_init,
+	.cleanup	= omap4_rng_cleanup,
+};
+
+static struct omap_rng_pdata eip76_rng_pdata = {
+	.regs		= (u16 *)reg_map_eip76,
+	.data_size	= EIP76_RNG_OUTPUT_SIZE,
+	.data_present	= omap4_rng_data_present,
+	.init		= eip76_rng_init,
+	.cleanup	= omap4_rng_cleanup,
+};
+
+static const struct of_device_id omap_rng_of_match[] = {
+		{
+			.compatible	= "ti,omap2-rng",
+			.data		= &omap2_rng_pdata,
+		},
+		{
+			.compatible	= "ti,omap4-rng",
+			.data		= &omap4_rng_pdata,
+		},
+		{
+			.compatible	= "inside-secure,safexcel-eip76",
+			.data		= &eip76_rng_pdata,
+		},
+		{},
+};
+MODULE_DEVICE_TABLE(of, omap_rng_of_match);
+
+static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
+					  struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct device *dev = &pdev->dev;
+	int irq, err;
+
+	match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
+	if (!match) {
+		dev_err(dev, "no compatible OF match\n");
+		return -EINVAL;
+	}
+	priv->pdata = match->data;
+
+	if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+	    of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(dev, "%s: error getting IRQ resource - %d\n",
+				__func__, irq);
+			return irq;
+		}
+
+		err = devm_request_irq(dev, irq, omap4_rng_irq,
+				       IRQF_TRIGGER_NONE, dev_name(dev), priv);
+		if (err) {
+			dev_err(dev, "unable to request irq %d, err = %d\n",
+				irq, err);
+			return err;
+		}
+
+		/*
+		 * On OMAP4, enabling the shutdown_oflo interrupt is
+		 * done in the interrupt mask register. There is no
+		 * such register on EIP76, and it's enabled by the
+		 * same bit in the control register
+		 */
+		if (priv->pdata->regs[RNG_INTMASK_REG])
+			omap_rng_write(priv, RNG_INTMASK_REG,
+				       RNG_SHUTDOWN_OFLO_MASK);
+		else
+			omap_rng_write(priv, RNG_CONTROL_REG,
+				       RNG_SHUTDOWN_OFLO_MASK);
+	}
+	return 0;
+}
+#else
+static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
+					  struct platform_device *pdev)
+{
+	return -EINVAL;
+}
+#endif
+
+static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
+{
+	/* Only OMAP2/3 can be non-DT */
+	omap_rng->pdata = &omap2_rng_pdata;
+	return 0;
+}
+
+static int omap_rng_probe(struct platform_device *pdev)
+{
+	struct omap_rng_dev *priv;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->rng.read = omap_rng_do_read;
+	priv->rng.init = omap_rng_init;
+	priv->rng.cleanup = omap_rng_cleanup;
+
+	priv->rng.priv = (unsigned long)priv;
+	platform_set_drvdata(pdev, priv);
+	priv->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base)) {
+		ret = PTR_ERR(priv->base);
+		goto err_ioremap;
+	}
+
+	priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+	if (!priv->rng.name) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
+		pm_runtime_put_noidle(&pdev->dev);
+		goto err_ioremap;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (!IS_ERR(priv->clk)) {
+		ret = clk_prepare_enable(priv->clk);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Unable to enable the clk: %d\n", ret);
+			goto err_register;
+		}
+	}
+
+	priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
+	if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (!IS_ERR(priv->clk_reg)) {
+		ret = clk_prepare_enable(priv->clk_reg);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Unable to enable the register clk: %d\n",
+				ret);
+			goto err_register;
+		}
+	}
+
+	ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
+				get_omap_rng_device_details(priv);
+	if (ret)
+		goto err_register;
+
+	ret = hwrng_register(&priv->rng);
+	if (ret)
+		goto err_register;
+
+	dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
+		 omap_rng_read(priv, RNG_REV_REG));
+
+	return 0;
+
+err_register:
+	priv->base = NULL;
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	clk_disable_unprepare(priv->clk_reg);
+	clk_disable_unprepare(priv->clk);
+err_ioremap:
+	dev_err(dev, "initialization failed.\n");
+	return ret;
+}
+
+static int omap_rng_remove(struct platform_device *pdev)
+{
+	struct omap_rng_dev *priv = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&priv->rng);
+
+	priv->pdata->cleanup(priv);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk_reg);
+
+	return 0;
+}
+
+static int __maybe_unused omap_rng_suspend(struct device *dev)
+{
+	struct omap_rng_dev *priv = dev_get_drvdata(dev);
+
+	priv->pdata->cleanup(priv);
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int __maybe_unused omap_rng_resume(struct device *dev)
+{
+	struct omap_rng_dev *priv = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to runtime_get device: %d\n", ret);
+		pm_runtime_put_noidle(dev);
+		return ret;
+	}
+
+	priv->pdata->init(priv);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
+
+static struct platform_driver omap_rng_driver = {
+	.driver = {
+		.name		= "omap_rng",
+		.pm		= &omap_rng_pm,
+		.of_match_table = of_match_ptr(omap_rng_of_match),
+	},
+	.probe		= omap_rng_probe,
+	.remove		= omap_rng_remove,
+};
+
+module_platform_driver(omap_rng_driver);
+MODULE_ALIAS("platform:omap_rng");
+MODULE_AUTHOR("Deepak Saxena (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 0000000..38b7190
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,141 @@
+/*
+ * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#define RNG_RESET			0x01
+#define RNG_GEN_PRNG_HW_INIT		0x02
+#define RNG_GEN_HW			0x08
+
+/* param1: ptr, param2: count, param3: flag */
+static u32 (*omap3_rom_rng_call)(u32, u32, u32);
+
+static struct delayed_work idle_work;
+static int rng_idle;
+static struct clk *rng_clk;
+
+static void omap3_rom_rng_idle(struct work_struct *work)
+{
+	int r;
+
+	r = omap3_rom_rng_call(0, 0, RNG_RESET);
+	if (r != 0) {
+		pr_err("reset failed: %d\n", r);
+		return;
+	}
+	clk_disable_unprepare(rng_clk);
+	rng_idle = 1;
+}
+
+static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+{
+	u32 r;
+	u32 ptr;
+
+	cancel_delayed_work_sync(&idle_work);
+	if (rng_idle) {
+		r = clk_prepare_enable(rng_clk);
+		if (r)
+			return r;
+
+		r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+		if (r != 0) {
+			clk_disable_unprepare(rng_clk);
+			pr_err("HW init failed: %d\n", r);
+			return -EIO;
+		}
+		rng_idle = 0;
+	}
+
+	ptr = virt_to_phys(buf);
+	r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
+	schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
+	if (r != 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
+{
+	int r;
+
+	r = omap3_rom_rng_get_random(data, 4);
+	if (r < 0)
+		return r;
+	return 4;
+}
+
+static struct hwrng omap3_rom_rng_ops = {
+	.name		= "omap3-rom",
+	.read		= omap3_rom_rng_read,
+};
+
+static int omap3_rom_rng_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	pr_info("initializing\n");
+
+	omap3_rom_rng_call = pdev->dev.platform_data;
+	if (!omap3_rom_rng_call) {
+		pr_err("omap3_rom_rng_call is NULL\n");
+		return -EINVAL;
+	}
+
+	INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
+	rng_clk = devm_clk_get(&pdev->dev, "ick");
+	if (IS_ERR(rng_clk)) {
+		pr_err("unable to get RNG clock\n");
+		return PTR_ERR(rng_clk);
+	}
+
+	/* Leave the RNG in reset state. */
+	ret = clk_prepare_enable(rng_clk);
+	if (ret)
+		return ret;
+	omap3_rom_rng_idle(0);
+
+	return hwrng_register(&omap3_rom_rng_ops);
+}
+
+static int omap3_rom_rng_remove(struct platform_device *pdev)
+{
+	cancel_delayed_work_sync(&idle_work);
+	hwrng_unregister(&omap3_rom_rng_ops);
+	clk_disable_unprepare(rng_clk);
+	return 0;
+}
+
+static struct platform_driver omap3_rom_rng_driver = {
+	.driver = {
+		.name		= "omap3-rom-rng",
+	},
+	.probe		= omap3_rom_rng_probe,
+	.remove		= omap3_rom_rng_remove,
+};
+
+module_platform_driver(omap3_rom_rng_driver);
+
+MODULE_ALIAS("platform:omap3-rom-rng");
+MODULE_AUTHOR("Juha Yrjola");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 0000000..545df48
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip rng
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define SDCRNG_CTL_REG			0x00
+#define   SDCRNG_CTL_FVLD_M		0x0000f000
+#define   SDCRNG_CTL_FVLD_S		12
+#define   SDCRNG_CTL_KSZ		0x00000800
+#define   SDCRNG_CTL_RSRC_CRG		0x00000010
+#define   SDCRNG_CTL_RSRC_RRG		0x00000000
+#define   SDCRNG_CTL_CE			0x00000004
+#define   SDCRNG_CTL_RE			0x00000002
+#define   SDCRNG_CTL_DR			0x00000001
+#define   SDCRNG_CTL_SELECT_RRG_RNG	(SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
+#define   SDCRNG_CTL_SELECT_CRG_RNG	(SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
+#define SDCRNG_VAL_REG			0x20
+
+#define MODULE_NAME "pasemi_rng"
+
+static int pasemi_rng_data_present(struct hwrng *rng, int wait)
+{
+	void __iomem *rng_regs = (void __iomem *)rng->priv;
+	int data, i;
+
+	for (i = 0; i < 20; i++) {
+		data = (in_le32(rng_regs + SDCRNG_CTL_REG)
+			& SDCRNG_CTL_FVLD_M) ? 1 : 0;
+		if (data || !wait)
+			break;
+		udelay(10);
+	}
+	return data;
+}
+
+static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	void __iomem *rng_regs = (void __iomem *)rng->priv;
+	*data = in_le32(rng_regs + SDCRNG_VAL_REG);
+	return 4;
+}
+
+static int pasemi_rng_init(struct hwrng *rng)
+{
+	void __iomem *rng_regs = (void __iomem *)rng->priv;
+	u32 ctl;
+
+	ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
+	out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
+	out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
+
+	return 0;
+}
+
+static void pasemi_rng_cleanup(struct hwrng *rng)
+{
+	void __iomem *rng_regs = (void __iomem *)rng->priv;
+	u32 ctl;
+
+	ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
+	out_le32(rng_regs + SDCRNG_CTL_REG,
+		 in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
+}
+
+static struct hwrng pasemi_rng = {
+	.name		= MODULE_NAME,
+	.init		= pasemi_rng_init,
+	.cleanup	= pasemi_rng_cleanup,
+	.data_present	= pasemi_rng_data_present,
+	.data_read	= pasemi_rng_data_read,
+};
+
+static int rng_probe(struct platform_device *pdev)
+{
+	void __iomem *rng_regs;
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng_regs))
+		return PTR_ERR(rng_regs);
+
+	pasemi_rng.priv = (unsigned long)rng_regs;
+
+	pr_info("Registering PA Semi RNG\n");
+	return devm_hwrng_register(&pdev->dev, &pasemi_rng);
+}
+
+static const struct of_device_id rng_match[] = {
+	{ .compatible      = "1682m-rng", },
+	{ .compatible      = "pasemi,pwrficient-rng", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rng_match);
+
+static struct platform_driver rng_driver = {
+	.driver = {
+		.name = "pasemi-rng",
+		.of_match_table = rng_match,
+	},
+	.probe		= rng_probe,
+};
+
+module_platform_driver(rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
new file mode 100644
index 0000000..9b5e68a
--- /dev/null
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -0,0 +1,151 @@
+/*
+ * PIC32 RNG driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RNGCON		0x04
+#define  TRNGEN		BIT(8)
+#define  PRNGEN		BIT(9)
+#define  PRNGCONT	BIT(10)
+#define  TRNGMOD	BIT(11)
+#define  SEEDLOAD	BIT(12)
+#define RNGPOLY1	0x08
+#define RNGPOLY2	0x0C
+#define RNGNUMGEN1	0x10
+#define RNGNUMGEN2	0x14
+#define RNGSEED1	0x18
+#define RNGSEED2	0x1C
+#define RNGRCNT		0x20
+#define  RCNT_MASK	0x7F
+
+struct pic32_rng {
+	void __iomem	*base;
+	struct hwrng	rng;
+	struct clk	*clk;
+};
+
+/*
+ * The TRNG can generate up to 24Mbps. This is a timeout that should be safe
+ * enough given the instructions in the loop and that the TRNG may not always
+ * be at maximum rate.
+ */
+#define RNG_TIMEOUT 500
+
+static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+			  bool wait)
+{
+	struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+	u64 *data = buf;
+	u32 t;
+	unsigned int timeout = RNG_TIMEOUT;
+
+	do {
+		t = readl(priv->base + RNGRCNT) & RCNT_MASK;
+		if (t == 64) {
+			/* TRNG value comes through the seed registers */
+			*data = ((u64)readl(priv->base + RNGSEED2) << 32) +
+				readl(priv->base + RNGSEED1);
+			return 8;
+		}
+	} while (wait && --timeout);
+
+	return -EIO;
+}
+
+static int pic32_rng_probe(struct platform_device *pdev)
+{
+	struct pic32_rng *priv;
+	struct resource *res;
+	u32 v;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	/* enable TRNG in enhanced mode */
+	v = TRNGEN | TRNGMOD;
+	writel(v, priv->base + RNGCON);
+
+	priv->rng.name = pdev->name;
+	priv->rng.read = pic32_rng_read;
+
+	ret = hwrng_register(&priv->rng);
+	if (ret)
+		goto err_register;
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+
+err_register:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int pic32_rng_remove(struct platform_device *pdev)
+{
+	struct pic32_rng *rng = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&rng->rng);
+	writel(0, rng->base + RNGCON);
+	clk_disable_unprepare(rng->clk);
+	return 0;
+}
+
+static const struct of_device_id pic32_rng_of_match[] = {
+	{ .compatible	= "microchip,pic32mzda-rng", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
+
+static struct platform_driver pic32_rng_driver = {
+	.probe		= pic32_rng_probe,
+	.remove		= pic32_rng_remove,
+	.driver		= {
+		.name	= "pic32-rng",
+		.of_match_table = of_match_ptr(pic32_rng_of_match),
+	},
+};
+
+module_platform_driver(pic32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 RNG Driver");
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644
index 0000000..791182a
--- /dev/null
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	unsigned long *buf;
+	int i, len;
+
+	/* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+	len = max / sizeof(unsigned long);
+
+	buf = (unsigned long *)data;
+
+	for (i = 0; i < len; i++)
+		powernv_get_random_long(buf++);
+
+	return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+	.name = "powernv-rng",
+	.read = powernv_rng_read,
+};
+
+static int powernv_rng_remove(struct platform_device *pdev)
+{
+	hwrng_unregister(&powernv_hwrng);
+
+	return 0;
+}
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	rc = hwrng_register(&powernv_hwrng);
+	if (rc) {
+		/* We only register one device, ignore any others */
+		if (rc == -EEXIST)
+			rc = -ENODEV;
+
+		return rc;
+	}
+
+	pr_info("Registered powernv hwrng.\n");
+
+	return 0;
+}
+
+static const struct of_device_id powernv_rng_match[] = {
+	{ .compatible	= "ibm,power-rng",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+	.driver = {
+		.name = "powernv_rng",
+		.of_match_table = powernv_rng_match,
+	},
+	.probe	= powernv_rng_probe,
+	.remove = powernv_rng_remove,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
new file mode 100644
index 0000000..4e2a3f6
--- /dev/null
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 Michael Neuling IBM Corporation
+ *
+ * Driver for the pseries hardware RNG for POWER7+ and above
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <asm/vio.h>
+
+
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	u64 buffer[PLPAR_HCALL_BUFSIZE];
+	int rc;
+
+	rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
+	if (rc != H_SUCCESS) {
+		pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+		return -EIO;
+	}
+	memcpy(data, buffer, 8);
+
+	/* The hypervisor interface returns 64 bits */
+	return 8;
+}
+
+/**
+ * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
+ *
+ * This is a required function for a driver to operate in a CMO environment
+ * but this device does not make use of DMA allocations, return 0.
+ *
+ * Return value:
+ *	Number of bytes of IO data the driver will need to perform well -> 0
+ */
+static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
+{
+	return 0;
+};
+
+static struct hwrng pseries_rng = {
+	.name		= KBUILD_MODNAME,
+	.read		= pseries_rng_read,
+};
+
+static int pseries_rng_probe(struct vio_dev *dev,
+		const struct vio_device_id *id)
+{
+	return hwrng_register(&pseries_rng);
+}
+
+static int pseries_rng_remove(struct vio_dev *dev)
+{
+	hwrng_unregister(&pseries_rng);
+	return 0;
+}
+
+static const struct vio_device_id pseries_rng_driver_ids[] = {
+	{ "ibm,random-v1", "ibm,random"},
+	{ "", "" }
+};
+MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
+
+static struct vio_driver pseries_rng_driver = {
+	.name = KBUILD_MODNAME,
+	.probe = pseries_rng_probe,
+	.remove = pseries_rng_remove,
+	.get_desired_dma = pseries_rng_get_desired_dma,
+	.id_table = pseries_rng_driver_ids
+};
+
+static int __init rng_init(void)
+{
+	pr_info("Registering IBM pSeries RNG driver\n");
+	return vio_register_driver(&pseries_rng_driver);
+}
+
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+	vio_unregister_driver(&pseries_rng_driver);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>");
+MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
new file mode 100644
index 0000000..aca48e8
--- /dev/null
+++ b/drivers/char/hw_random/s390-trng.c
@@ -0,0 +1,268 @@
+/*
+ * s390 TRNG device driver
+ *
+ * Driver for the TRNG (true random number generation) command
+ * available via CPACF extension MSA 7 on the s390 arch.
+
+ * Copyright IBM Corp. 2017
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#define KMSG_COMPONENT "trng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/random.h>
+#include <linux/sched/signal.h>
+#include <asm/debug.h>
+#include <asm/cpacf.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 CPACF TRNG device driver");
+
+
+/* trng related debug feature things */
+
+static debug_info_t *debug_info;
+
+#define DEBUG_DBG(...)	debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
+#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
+#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
+#define DEBUG_ERR(...)	debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+
+
+/* trng helpers */
+
+static atomic64_t trng_dev_counter = ATOMIC64_INIT(0);
+static atomic64_t trng_hwrng_counter = ATOMIC64_INIT(0);
+
+
+/* file io functions */
+
+static int trng_open(struct inode *inode, struct file *file)
+{
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t trng_read(struct file *file, char __user *ubuf,
+			 size_t nbytes, loff_t *ppos)
+{
+	u8 buf[32];
+	u8 *p = buf;
+	unsigned int n;
+	ssize_t ret = 0;
+
+	/*
+	 * use buf for requests <= sizeof(buf),
+	 * otherwise allocate one page and fetch
+	 * pagewise.
+	 */
+
+	if (nbytes > sizeof(buf)) {
+		p = (u8 *) __get_free_page(GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+	}
+
+	while (nbytes) {
+		if (need_resched()) {
+			if (signal_pending(current)) {
+				if (ret == 0)
+					ret = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+		}
+		n = nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes;
+		cpacf_trng(NULL, 0, p, n);
+		atomic64_add(n, &trng_dev_counter);
+		if (copy_to_user(ubuf, p, n)) {
+			ret = -EFAULT;
+			break;
+		}
+		nbytes -= n;
+		ubuf += n;
+		ret += n;
+	}
+
+	if (p != buf)
+		free_page((unsigned long) p);
+
+	DEBUG_DBG("trng_read()=%zd\n", ret);
+	return ret;
+}
+
+
+/* sysfs */
+
+static ssize_t trng_counter_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	u64 dev_counter = atomic64_read(&trng_dev_counter);
+	u64 hwrng_counter = atomic64_read(&trng_hwrng_counter);
+#if IS_ENABLED(CONFIG_ARCH_RANDOM)
+	u64 arch_counter = atomic64_read(&s390_arch_random_counter);
+
+	return snprintf(buf, PAGE_SIZE,
+			"trng:  %llu\n"
+			"hwrng: %llu\n"
+			"arch:  %llu\n"
+			"total: %llu\n",
+			dev_counter, hwrng_counter, arch_counter,
+			dev_counter + hwrng_counter + arch_counter);
+#else
+	return snprintf(buf, PAGE_SIZE,
+			"trng:  %llu\n"
+			"hwrng: %llu\n"
+			"total: %llu\n",
+			dev_counter, hwrng_counter,
+			dev_counter + hwrng_counter);
+#endif
+}
+static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL);
+
+static struct attribute *trng_dev_attrs[] = {
+	&dev_attr_byte_counter.attr,
+	NULL
+};
+
+static const struct attribute_group trng_dev_attr_group = {
+	.attrs = trng_dev_attrs
+};
+
+static const struct attribute_group *trng_dev_attr_groups[] = {
+	&trng_dev_attr_group,
+	NULL
+};
+
+static const struct file_operations trng_fops = {
+	.owner		= THIS_MODULE,
+	.open		= &trng_open,
+	.release	= NULL,
+	.read		= &trng_read,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice trng_dev = {
+	.name	= "trng",
+	.minor	= MISC_DYNAMIC_MINOR,
+	.mode	= 0444,
+	.fops	= &trng_fops,
+	.groups = trng_dev_attr_groups,
+};
+
+
+/* hwrng_register */
+
+static inline void _trng_hwrng_read(u8 *buf, size_t len)
+{
+	cpacf_trng(NULL, 0, buf, len);
+	atomic64_add(len, &trng_hwrng_counter);
+}
+
+static int trng_hwrng_data_read(struct hwrng *rng, u32 *data)
+{
+	size_t len = sizeof(*data);
+
+	_trng_hwrng_read((u8 *) data, len);
+
+	DEBUG_DBG("trng_hwrng_data_read()=%zu\n", len);
+
+	return len;
+}
+
+static int trng_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	size_t len = max <= PAGE_SIZE ? max : PAGE_SIZE;
+
+	_trng_hwrng_read((u8 *) data, len);
+
+	DEBUG_DBG("trng_hwrng_read()=%zu\n", len);
+
+	return len;
+}
+
+/*
+ * hwrng register struct
+ * The trng is suppost to have 100% entropy, and thus
+ * we register with a very high quality value.
+ */
+static struct hwrng trng_hwrng_dev = {
+	.name		= "s390-trng",
+	.data_read	= trng_hwrng_data_read,
+	.read		= trng_hwrng_read,
+	.quality	= 999,
+};
+
+
+/* init and exit */
+
+static void __init trng_debug_init(void)
+{
+	debug_info = debug_register("trng", 1, 1, 4 * sizeof(long));
+	debug_register_view(debug_info, &debug_sprintf_view);
+	debug_set_level(debug_info, 3);
+}
+
+static void trng_debug_exit(void)
+{
+	debug_unregister(debug_info);
+}
+
+static int __init trng_init(void)
+{
+	int ret;
+
+	trng_debug_init();
+
+	/* check if subfunction CPACF_PRNO_TRNG is available */
+	if (!cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) {
+		DEBUG_INFO("trng_init CPACF_PRNO_TRNG not available\n");
+		ret = -ENODEV;
+		goto out_dbg;
+	}
+
+	ret = misc_register(&trng_dev);
+	if (ret) {
+		DEBUG_WARN("trng_init misc_register() failed rc=%d\n", ret);
+		goto out_dbg;
+	}
+
+	ret = hwrng_register(&trng_hwrng_dev);
+	if (ret) {
+		DEBUG_WARN("trng_init hwrng_register() failed rc=%d\n", ret);
+		goto out_misc;
+	}
+
+	DEBUG_DBG("trng_init successful\n");
+
+	return 0;
+
+out_misc:
+	misc_deregister(&trng_dev);
+out_dbg:
+	trng_debug_exit();
+	return ret;
+}
+
+static void __exit trng_exit(void)
+{
+	hwrng_unregister(&trng_hwrng_dev);
+	misc_deregister(&trng_dev);
+	trng_debug_exit();
+}
+
+module_cpu_feature_match(MSA, trng_init);
+module_exit(trng_exit);
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
new file mode 100644
index 0000000..938ec10
--- /dev/null
+++ b/drivers/char/hw_random/st-rng.c
@@ -0,0 +1,149 @@
+/*
+ * ST Random Number Generator Driver ST's Platforms
+ *
+ * Author: Pankaj Dev: <pankaj.dev@st.com>
+ *         Lee Jones <lee.jones@linaro.org>
+ *
+ * Copyright (C) 2015 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Registers */
+#define ST_RNG_STATUS_REG		0x20
+#define ST_RNG_DATA_REG			0x24
+
+/* Registers fields */
+#define ST_RNG_STATUS_BAD_SEQUENCE	BIT(0)
+#define ST_RNG_STATUS_BAD_ALTERNANCE	BIT(1)
+#define ST_RNG_STATUS_FIFO_FULL		BIT(5)
+
+#define ST_RNG_SAMPLE_SIZE		2 /* 2 Byte (16bit) samples */
+#define ST_RNG_FIFO_DEPTH		4
+#define ST_RNG_FIFO_SIZE		(ST_RNG_FIFO_DEPTH * ST_RNG_SAMPLE_SIZE)
+
+/*
+ * Samples are documented to be available every 0.667us, so in theory
+ * the 4 sample deep FIFO should take 2.668us to fill.  However, during
+ * thorough testing, it became apparent that filling the FIFO actually
+ * takes closer to 12us.  We then multiply by 2 in order to account for
+ * the lack of udelay()'s reliability, suggested by Russell King.
+ */
+#define ST_RNG_FILL_FIFO_TIMEOUT	(12 * 2)
+
+struct st_rng_data {
+	void __iomem	*base;
+	struct clk	*clk;
+	struct hwrng	ops;
+};
+
+static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct st_rng_data *ddata = (struct st_rng_data *)rng->priv;
+	u32 status;
+	int i;
+
+	/* Wait until FIFO is full - max 4uS*/
+	for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
+		status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
+		if (status & ST_RNG_STATUS_FIFO_FULL)
+			break;
+		udelay(1);
+	}
+
+	if (i == ST_RNG_FILL_FIFO_TIMEOUT)
+		return 0;
+
+	for (i = 0; i < ST_RNG_FIFO_SIZE && i < max; i += 2)
+		*(u16 *)(data + i) =
+			readl_relaxed(ddata->base + ST_RNG_DATA_REG);
+
+	return i;	/* No of bytes read */
+}
+
+static int st_rng_probe(struct platform_device *pdev)
+{
+	struct st_rng_data *ddata;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	int ret;
+
+	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = clk_prepare_enable(clk);
+	if (ret)
+		return ret;
+
+	ddata->ops.priv	= (unsigned long)ddata;
+	ddata->ops.read	= st_rng_read;
+	ddata->ops.name	= pdev->name;
+	ddata->base	= base;
+	ddata->clk	= clk;
+
+	dev_set_drvdata(&pdev->dev, ddata);
+
+	ret = hwrng_register(&ddata->ops);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register HW RNG\n");
+		clk_disable_unprepare(clk);
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Successfully registered HW RNG\n");
+
+	return 0;
+}
+
+static int st_rng_remove(struct platform_device *pdev)
+{
+	struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
+
+	hwrng_unregister(&ddata->ops);
+
+	clk_disable_unprepare(ddata->clk);
+
+	return 0;
+}
+
+static const struct of_device_id st_rng_match[] = {
+	{ .compatible = "st,rng" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_rng_match);
+
+static struct platform_driver st_rng_driver = {
+	.driver = {
+		.name = "st-hwrandom",
+		.of_match_table = of_match_ptr(st_rng_match),
+	},
+	.probe = st_rng_probe,
+	.remove = st_rng_remove
+};
+
+module_platform_driver(st_rng_driver);
+
+MODULE_AUTHOR("Pankaj Dev <pankaj.dev@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
new file mode 100644
index 0000000..042860d
--- /dev/null
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2015, Daniel Thompson
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define RNG_CR 0x00
+#define RNG_CR_RNGEN BIT(2)
+#define RNG_CR_CED BIT(5)
+
+#define RNG_SR 0x04
+#define RNG_SR_SEIS BIT(6)
+#define RNG_SR_CEIS BIT(5)
+#define RNG_SR_DRDY BIT(0)
+
+#define RNG_DR 0x08
+
+struct stm32_rng_private {
+	struct hwrng rng;
+	void __iomem *base;
+	struct clk *clk;
+	struct reset_control *rst;
+	bool ced;
+};
+
+static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct stm32_rng_private *priv =
+	    container_of(rng, struct stm32_rng_private, rng);
+	u32 sr;
+	int retval = 0;
+
+	pm_runtime_get_sync((struct device *) priv->rng.priv);
+
+	while (max > sizeof(u32)) {
+		sr = readl_relaxed(priv->base + RNG_SR);
+		/* Manage timeout which is based on timer and take */
+		/* care of initial delay time when enabling rng	*/
+		if (!sr && wait) {
+			retval = readl_relaxed_poll_timeout_atomic(priv->base
+								   + RNG_SR,
+								   sr, sr,
+								   10, 50000);
+			if (retval)
+				dev_err((struct device *)priv->rng.priv,
+					"%s: timeout %x!\n", __func__, sr);
+		}
+
+		/* If error detected or data not ready... */
+		if (sr != RNG_SR_DRDY) {
+			if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
+					"bad RNG status - %x\n", sr))
+				writel_relaxed(0, priv->base + RNG_SR);
+			break;
+		}
+
+		*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
+
+		retval += sizeof(u32);
+		data += sizeof(u32);
+		max -= sizeof(u32);
+	}
+
+	pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
+	pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
+
+	return retval || !wait ? retval : -EIO;
+}
+
+static int stm32_rng_init(struct hwrng *rng)
+{
+	struct stm32_rng_private *priv =
+	    container_of(rng, struct stm32_rng_private, rng);
+	int err;
+
+	err = clk_prepare_enable(priv->clk);
+	if (err)
+		return err;
+
+	if (priv->ced)
+		writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+	else
+		writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
+			       priv->base + RNG_CR);
+
+	/* clear error indicators */
+	writel_relaxed(0, priv->base + RNG_SR);
+
+	return 0;
+}
+
+static void stm32_rng_cleanup(struct hwrng *rng)
+{
+	struct stm32_rng_private *priv =
+	    container_of(rng, struct stm32_rng_private, rng);
+
+	writel_relaxed(0, priv->base + RNG_CR);
+	clk_disable_unprepare(priv->clk);
+}
+
+static int stm32_rng_probe(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct device_node *np = ofdev->dev.of_node;
+	struct stm32_rng_private *priv;
+	struct resource res;
+	int err;
+
+	priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	err = of_address_to_resource(np, 0, &res);
+	if (err)
+		return err;
+
+	priv->base = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(&ofdev->dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+	if (!IS_ERR(priv->rst)) {
+		reset_control_assert(priv->rst);
+		udelay(2);
+		reset_control_deassert(priv->rst);
+	}
+
+	priv->ced = of_property_read_bool(np, "clock-error-detect");
+
+	dev_set_drvdata(dev, priv);
+
+	priv->rng.name = dev_driver_string(dev),
+#ifndef CONFIG_PM
+	priv->rng.init = stm32_rng_init,
+	priv->rng.cleanup = stm32_rng_cleanup,
+#endif
+	priv->rng.read = stm32_rng_read,
+	priv->rng.priv = (unsigned long) dev;
+
+	pm_runtime_set_autosuspend_delay(dev, 100);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_enable(dev);
+
+	return devm_hwrng_register(dev, &priv->rng);
+}
+
+#ifdef CONFIG_PM
+static int stm32_rng_runtime_suspend(struct device *dev)
+{
+	struct stm32_rng_private *priv = dev_get_drvdata(dev);
+
+	stm32_rng_cleanup(&priv->rng);
+
+	return 0;
+}
+
+static int stm32_rng_runtime_resume(struct device *dev)
+{
+	struct stm32_rng_private *priv = dev_get_drvdata(dev);
+
+	return stm32_rng_init(&priv->rng);
+}
+#endif
+
+static const struct dev_pm_ops stm32_rng_pm_ops = {
+	SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
+			   stm32_rng_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+};
+
+
+static const struct of_device_id stm32_rng_match[] = {
+	{
+		.compatible = "st,stm32-rng",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_rng_match);
+
+static struct platform_driver stm32_rng_driver = {
+	.driver = {
+		.name = "stm32-rng",
+		.pm = &stm32_rng_pm_ops,
+		.of_match_table = stm32_rng_match,
+	},
+	.probe = stm32_rng_probe,
+};
+
+module_platform_driver(stm32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Thompson <daniel.thompson@linaro.org>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 RNG device driver");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644
index 0000000..f615684
--- /dev/null
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -0,0 +1,216 @@
+/*
+ * drivers/char/hw_random/timeriomem-rng.c
+ *
+ * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * Derived from drivers/char/hw_random/omap-rng.c
+ *   Copyright 2005 (c) MontaVista Software, Inc.
+ *   Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ *   This driver is useful for platforms that have an IO range that provides
+ *   periodic random data from a single IO memory address.  All the platform
+ *   has to do is provide the address and 'wait time' that new data becomes
+ *   available.
+ *
+ * TODO: add support for reading sizes other than 32bits and masking
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/timeriomem-rng.h>
+
+struct timeriomem_rng_private {
+	void __iomem		*io_base;
+	ktime_t			period;
+	unsigned int		present:1;
+
+	struct hrtimer		timer;
+	struct completion	completion;
+
+	struct hwrng		rng_ops;
+};
+
+static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+				size_t max, bool wait)
+{
+	struct timeriomem_rng_private *priv =
+		container_of(hwrng, struct timeriomem_rng_private, rng_ops);
+	int retval = 0;
+	int period_us = ktime_to_us(priv->period);
+
+	/*
+	 * There may not have been enough time for new data to be generated
+	 * since the last request.  If the caller doesn't want to wait, let them
+	 * bail out.  Otherwise, wait for the completion.  If the new data has
+	 * already been generated, the completion should already be available.
+	 */
+	if (!wait && !priv->present)
+		return 0;
+
+	wait_for_completion(&priv->completion);
+
+	do {
+		/*
+		 * After the first read, all additional reads will need to wait
+		 * for the RNG to generate new data.  Since the period can have
+		 * a wide range of values (1us to 1s have been observed), allow
+		 * for 1% tolerance in the sleep time rather than a fixed value.
+		 */
+		if (retval > 0)
+			usleep_range(period_us,
+					period_us + min(1, period_us / 100));
+
+		*(u32 *)data = readl(priv->io_base);
+		retval += sizeof(u32);
+		data += sizeof(u32);
+		max -= sizeof(u32);
+	} while (wait && max > sizeof(u32));
+
+	/*
+	 * Block any new callers until the RNG has had time to generate new
+	 * data.
+	 */
+	priv->present = 0;
+	reinit_completion(&priv->completion);
+	hrtimer_forward_now(&priv->timer, priv->period);
+	hrtimer_restart(&priv->timer);
+
+	return retval;
+}
+
+static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer)
+{
+	struct timeriomem_rng_private *priv
+		= container_of(timer, struct timeriomem_rng_private, timer);
+
+	priv->present = 1;
+	complete(&priv->completion);
+
+	return HRTIMER_NORESTART;
+}
+
+static int timeriomem_rng_probe(struct platform_device *pdev)
+{
+	struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
+	struct timeriomem_rng_private *priv;
+	struct resource *res;
+	int err = 0;
+	int period;
+
+	if (!pdev->dev.of_node && !pdata) {
+		dev_err(&pdev->dev, "timeriomem_rng_data is missing\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENXIO;
+
+	if (res->start % 4 != 0 || resource_size(res) != 4) {
+		dev_err(&pdev->dev,
+			"address must be four bytes wide and aligned\n");
+		return -EINVAL;
+	}
+
+	/* Allocate memory for the device structure (and zero it) */
+	priv = devm_kzalloc(&pdev->dev,
+			sizeof(struct timeriomem_rng_private), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+
+	if (pdev->dev.of_node) {
+		int i;
+
+		if (!of_property_read_u32(pdev->dev.of_node,
+						"period", &i))
+			period = i;
+		else {
+			dev_err(&pdev->dev, "missing period\n");
+			return -EINVAL;
+		}
+
+		if (!of_property_read_u32(pdev->dev.of_node,
+						"quality", &i))
+			priv->rng_ops.quality = i;
+		else
+			priv->rng_ops.quality = 0;
+	} else {
+		period = pdata->period;
+		priv->rng_ops.quality = pdata->quality;
+	}
+
+	priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+	init_completion(&priv->completion);
+	hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	priv->timer.function = timeriomem_rng_trigger;
+
+	priv->rng_ops.name = dev_name(&pdev->dev);
+	priv->rng_ops.read = timeriomem_rng_read;
+
+	priv->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->io_base)) {
+		return PTR_ERR(priv->io_base);
+	}
+
+	/* Assume random data is already available. */
+	priv->present = 1;
+	complete(&priv->completion);
+
+	err = hwrng_register(&priv->rng_ops);
+	if (err) {
+		dev_err(&pdev->dev, "problem registering\n");
+		return err;
+	}
+
+	dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
+			priv->io_base, period);
+
+	return 0;
+}
+
+static int timeriomem_rng_remove(struct platform_device *pdev)
+{
+	struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&priv->rng_ops);
+	hrtimer_cancel(&priv->timer);
+
+	return 0;
+}
+
+static const struct of_device_id timeriomem_rng_match[] = {
+	{ .compatible = "timeriomem_rng" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, timeriomem_rng_match);
+
+static struct platform_driver timeriomem_rng_driver = {
+	.driver = {
+		.name		= "timeriomem_rng",
+		.of_match_table	= timeriomem_rng_match,
+	},
+	.probe		= timeriomem_rng_probe,
+	.remove		= timeriomem_rng_remove,
+};
+
+module_platform_driver(timeriomem_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
new file mode 100644
index 0000000..1093583
--- /dev/null
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -0,0 +1,159 @@
+/*
+ * RNG driver for TX4939 Random Number Generators (RNG)
+ *
+ * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/gfp.h>
+
+#define TX4939_RNG_RCSR		0x00000000
+#define TX4939_RNG_ROR(n)	(0x00000018 + (n) * 8)
+
+#define TX4939_RNG_RCSR_INTE	0x00000008
+#define TX4939_RNG_RCSR_RST	0x00000004
+#define TX4939_RNG_RCSR_FIN	0x00000002
+#define TX4939_RNG_RCSR_ST	0x00000001
+
+struct tx4939_rng {
+	struct hwrng rng;
+	void __iomem *base;
+	u64 databuf[3];
+	unsigned int data_avail;
+};
+
+static void rng_io_start(void)
+{
+#ifndef CONFIG_64BIT
+	/*
+	 * readq is reading a 64-bit register using a 64-bit load.  On
+	 * a 32-bit kernel however interrupts or any other processor
+	 * exception would clobber the upper 32-bit of the processor
+	 * register so interrupts need to be disabled.
+	 */
+	local_irq_disable();
+#endif
+}
+
+static void rng_io_end(void)
+{
+#ifndef CONFIG_64BIT
+	local_irq_enable();
+#endif
+}
+
+static u64 read_rng(void __iomem *base, unsigned int offset)
+{
+	return ____raw_readq(base + offset);
+}
+
+static void write_rng(u64 val, void __iomem *base, unsigned int offset)
+{
+	return ____raw_writeq(val, base + offset);
+}
+
+static int tx4939_rng_data_present(struct hwrng *rng, int wait)
+{
+	struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+	int i;
+
+	if (rngdev->data_avail)
+		return rngdev->data_avail;
+	for (i = 0; i < 20; i++) {
+		rng_io_start();
+		if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
+		      & TX4939_RNG_RCSR_ST)) {
+			rngdev->databuf[0] =
+				read_rng(rngdev->base, TX4939_RNG_ROR(0));
+			rngdev->databuf[1] =
+				read_rng(rngdev->base, TX4939_RNG_ROR(1));
+			rngdev->databuf[2] =
+				read_rng(rngdev->base, TX4939_RNG_ROR(2));
+			rngdev->data_avail =
+				sizeof(rngdev->databuf) / sizeof(u32);
+			/* Start RNG */
+			write_rng(TX4939_RNG_RCSR_ST,
+				  rngdev->base, TX4939_RNG_RCSR);
+			wait = 0;
+		}
+		rng_io_end();
+		if (!wait)
+			break;
+		/* 90 bus clock cycles by default for generation */
+		ndelay(90 * 5);
+	}
+	return rngdev->data_avail;
+}
+
+static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+	struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+
+	rngdev->data_avail--;
+	*buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
+	return sizeof(u32);
+}
+
+static int __init tx4939_rng_probe(struct platform_device *dev)
+{
+	struct tx4939_rng *rngdev;
+	struct resource *r;
+	int i;
+
+	rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
+	if (!rngdev)
+		return -ENOMEM;
+	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	rngdev->base = devm_ioremap_resource(&dev->dev, r);
+	if (IS_ERR(rngdev->base))
+		return PTR_ERR(rngdev->base);
+
+	rngdev->rng.name = dev_name(&dev->dev);
+	rngdev->rng.data_present = tx4939_rng_data_present;
+	rngdev->rng.data_read = tx4939_rng_data_read;
+
+	rng_io_start();
+	/* Reset RNG */
+	write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
+	write_rng(0, rngdev->base, TX4939_RNG_RCSR);
+	/* Start RNG */
+	write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
+	rng_io_end();
+	/*
+	 * Drop first two results.  From the datasheet:
+	 * The quality of the random numbers generated immediately
+	 * after reset can be insufficient.  Therefore, do not use
+	 * random numbers obtained from the first and second
+	 * generations; use the ones from the third or subsequent
+	 * generation.
+	 */
+	for (i = 0; i < 2; i++) {
+		rngdev->data_avail = 0;
+		if (!tx4939_rng_data_present(&rngdev->rng, 1))
+			return -EIO;
+	}
+
+	platform_set_drvdata(dev, rngdev);
+	return devm_hwrng_register(&dev->dev, &rngdev->rng);
+}
+
+static struct platform_driver tx4939_rng_driver = {
+	.driver		= {
+		.name	= "tx4939-rng",
+	},
+};
+
+module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
new file mode 100644
index 0000000..ffe9b0c
--- /dev/null
+++ b/drivers/char/hw_random/via-rng.c
@@ -0,0 +1,228 @@
+/*
+ * RNG driver for VIA RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <crypto/padlock.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <asm/cpu_device_id.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+#include <asm/fpu/api.h>
+
+
+
+
+enum {
+	VIA_STRFILT_CNT_SHIFT	= 16,
+	VIA_STRFILT_FAIL	= (1 << 15),
+	VIA_STRFILT_ENABLE	= (1 << 14),
+	VIA_RAWBITS_ENABLE	= (1 << 13),
+	VIA_RNG_ENABLE		= (1 << 6),
+	VIA_NOISESRC1		= (1 << 8),
+	VIA_NOISESRC2		= (1 << 9),
+	VIA_XSTORE_CNT_MASK	= 0x0F,
+
+	VIA_RNG_CHUNK_8		= 0x00,	/* 64 rand bits, 64 stored bits */
+	VIA_RNG_CHUNK_4		= 0x01,	/* 32 rand bits, 32 stored bits */
+	VIA_RNG_CHUNK_4_MASK	= 0xFFFFFFFF,
+	VIA_RNG_CHUNK_2		= 0x02,	/* 16 rand bits, 32 stored bits */
+	VIA_RNG_CHUNK_2_MASK	= 0xFFFF,
+	VIA_RNG_CHUNK_1		= 0x03,	/* 8 rand bits, 32 stored bits */
+	VIA_RNG_CHUNK_1_MASK	= 0xFF,
+};
+
+/*
+ * Investigate using the 'rep' prefix to obtain 32 bits of random data
+ * in one insn.  The upside is potentially better performance.  The
+ * downside is that the instruction becomes no longer atomic.  Due to
+ * this, just like familiar issues with /dev/random itself, the worst
+ * case of a 'rep xstore' could potentially pause a cpu for an
+ * unreasonably long time.  In practice, this condition would likely
+ * only occur when the hardware is failing.  (or so we hope :))
+ *
+ * Another possible performance boost may come from simply buffering
+ * until we have 4 bytes, thus returning a u32 at a time,
+ * instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, but the
+ * kernel doesn't use CR0.TS, so this doesn't matter.
+ */
+
+static inline u32 xstore(u32 *addr, u32 edx_in)
+{
+	u32 eax_out;
+
+	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
+		: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
+
+	return eax_out;
+}
+
+static int via_rng_data_present(struct hwrng *rng, int wait)
+{
+	char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+	u32 bytes_out;
+	int i;
+
+	/* We choose the recommended 1-byte-per-instruction RNG rate,
+	 * for greater randomness at the expense of speed.  Larger
+	 * values 2, 4, or 8 bytes-per-instruction yield greater
+	 * speed at lesser randomness.
+	 *
+	 * If you change this to another VIA_CHUNK_n, you must also
+	 * change the ->n_bytes values in rng_vendor_ops[] tables.
+	 * VIA_CHUNK_8 requires further code changes.
+	 *
+	 * A copy of MSR_VIA_RNG is placed in eax_out when xstore
+	 * completes.
+	 */
+
+	for (i = 0; i < 20; i++) {
+		*via_rng_datum = 0; /* paranoia, not really necessary */
+		bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+		bytes_out &= VIA_XSTORE_CNT_MASK;
+		if (bytes_out || !wait)
+			break;
+		udelay(10);
+	}
+	rng->priv = *via_rng_datum;
+	return bytes_out ? 1 : 0;
+}
+
+static int via_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	u32 via_rng_datum = (u32)rng->priv;
+
+	*data = via_rng_datum;
+
+	return 1;
+}
+
+static int via_rng_init(struct hwrng *rng)
+{
+	struct cpuinfo_x86 *c = &cpu_data(0);
+	u32 lo, hi, old_lo;
+
+	/* VIA Nano CPUs don't have the MSR_VIA_RNG anymore.  The RNG
+	 * is always enabled if CPUID rng_en is set.  There is no
+	 * RNG configuration like it used to be the case in this
+	 * register */
+	if (((c->x86 == 6) && (c->x86_model >= 0x0f))  || (c->x86 > 6)){
+		if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
+			pr_err(PFX "can't enable hardware RNG "
+				"if XSTORE is not enabled\n");
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	/* Control the RNG via MSR.  Tread lightly and pay very close
+	 * close attention to values written, as the reserved fields
+	 * are documented to be "undefined and unpredictable"; but it
+	 * does not say to write them as zero, so I make a guess that
+	 * we restore the values we find in the register.
+	 */
+	rdmsr(MSR_VIA_RNG, lo, hi);
+
+	old_lo = lo;
+	lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
+	lo &= ~VIA_XSTORE_CNT_MASK;
+	lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
+	lo |= VIA_RNG_ENABLE;
+	lo |= VIA_NOISESRC1;
+
+	/* Enable secondary noise source on CPUs where it is present. */
+
+	/* Nehemiah stepping 8 and higher */
+	if ((c->x86_model == 9) && (c->x86_stepping > 7))
+		lo |= VIA_NOISESRC2;
+
+	/* Esther */
+	if (c->x86_model >= 10)
+		lo |= VIA_NOISESRC2;
+
+	if (lo != old_lo)
+		wrmsr(MSR_VIA_RNG, lo, hi);
+
+	/* perhaps-unnecessary sanity check; remove after testing if
+	   unneeded */
+	rdmsr(MSR_VIA_RNG, lo, hi);
+	if ((lo & VIA_RNG_ENABLE) == 0) {
+		pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+
+static struct hwrng via_rng = {
+	.name		= "via",
+	.init		= via_rng_init,
+	.data_present	= via_rng_data_present,
+	.data_read	= via_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+	int err;
+
+	if (!boot_cpu_has(X86_FEATURE_XSTORE))
+		return -ENODEV;
+
+	pr_info("VIA RNG detected\n");
+	err = hwrng_register(&via_rng);
+	if (err) {
+		pr_err(PFX "RNG registering failed (%d)\n",
+		       err);
+		goto out;
+	}
+out:
+	return err;
+}
+
+static void __exit mod_exit(void)
+{
+	hwrng_unregister(&via_rng);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
+	X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
+	{}
+};
+
+MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
new file mode 100644
index 0000000..b89df66
--- /dev/null
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -0,0 +1,231 @@
+/*
+ * Randomness driver for virtio
+ *  Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_rng.h>
+#include <linux/module.h>
+
+static DEFINE_IDA(rng_index_ida);
+
+struct virtrng_info {
+	struct hwrng hwrng;
+	struct virtqueue *vq;
+	struct completion have_data;
+	char name[25];
+	unsigned int data_avail;
+	int index;
+	bool busy;
+	bool hwrng_register_done;
+	bool hwrng_removed;
+};
+
+static void random_recv_done(struct virtqueue *vq)
+{
+	struct virtrng_info *vi = vq->vdev->priv;
+
+	/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+	if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
+		return;
+
+	complete(&vi->have_data);
+}
+
+/* The host will fill any buffer we give it with sweet, sweet randomness. */
+static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
+{
+	struct scatterlist sg;
+
+	sg_init_one(&sg, buf, size);
+
+	/* There should always be room for one buffer. */
+	virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
+
+	virtqueue_kick(vi->vq);
+}
+
+static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+{
+	int ret;
+	struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+
+	if (vi->hwrng_removed)
+		return -ENODEV;
+
+	if (!vi->busy) {
+		vi->busy = true;
+		init_completion(&vi->have_data);
+		register_buffer(vi, buf, size);
+	}
+
+	if (!wait)
+		return 0;
+
+	ret = wait_for_completion_killable(&vi->have_data);
+	if (ret < 0)
+		return ret;
+
+	vi->busy = false;
+
+	return vi->data_avail;
+}
+
+static void virtio_cleanup(struct hwrng *rng)
+{
+	struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+
+	if (vi->busy)
+		wait_for_completion(&vi->have_data);
+}
+
+static int probe_common(struct virtio_device *vdev)
+{
+	int err, index;
+	struct virtrng_info *vi = NULL;
+
+	vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL);
+	if (!vi)
+		return -ENOMEM;
+
+	vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL);
+	if (index < 0) {
+		err = index;
+		goto err_ida;
+	}
+	sprintf(vi->name, "virtio_rng.%d", index);
+	init_completion(&vi->have_data);
+
+	vi->hwrng = (struct hwrng) {
+		.read = virtio_read,
+		.cleanup = virtio_cleanup,
+		.priv = (unsigned long)vi,
+		.name = vi->name,
+		.quality = 1000,
+	};
+	vdev->priv = vi;
+
+	/* We expect a single virtqueue. */
+	vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+	if (IS_ERR(vi->vq)) {
+		err = PTR_ERR(vi->vq);
+		goto err_find;
+	}
+
+	return 0;
+
+err_find:
+	ida_simple_remove(&rng_index_ida, index);
+err_ida:
+	kfree(vi);
+	return err;
+}
+
+static void remove_common(struct virtio_device *vdev)
+{
+	struct virtrng_info *vi = vdev->priv;
+
+	vi->hwrng_removed = true;
+	vi->data_avail = 0;
+	complete(&vi->have_data);
+	vdev->config->reset(vdev);
+	vi->busy = false;
+	if (vi->hwrng_register_done)
+		hwrng_unregister(&vi->hwrng);
+	vdev->config->del_vqs(vdev);
+	ida_simple_remove(&rng_index_ida, vi->index);
+	kfree(vi);
+}
+
+static int virtrng_probe(struct virtio_device *vdev)
+{
+	return probe_common(vdev);
+}
+
+static void virtrng_remove(struct virtio_device *vdev)
+{
+	remove_common(vdev);
+}
+
+static void virtrng_scan(struct virtio_device *vdev)
+{
+	struct virtrng_info *vi = vdev->priv;
+	int err;
+
+	err = hwrng_register(&vi->hwrng);
+	if (!err)
+		vi->hwrng_register_done = true;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtrng_freeze(struct virtio_device *vdev)
+{
+	remove_common(vdev);
+	return 0;
+}
+
+static int virtrng_restore(struct virtio_device *vdev)
+{
+	int err;
+
+	err = probe_common(vdev);
+	if (!err) {
+		struct virtrng_info *vi = vdev->priv;
+
+		/*
+		 * Set hwrng_removed to ensure that virtio_read()
+		 * does not block waiting for data before the
+		 * registration is complete.
+		 */
+		vi->hwrng_removed = true;
+		err = hwrng_register(&vi->hwrng);
+		if (!err) {
+			vi->hwrng_register_done = true;
+			vi->hwrng_removed = false;
+		}
+	}
+
+	return err;
+}
+#endif
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_rng_driver = {
+	.driver.name =	KBUILD_MODNAME,
+	.driver.owner =	THIS_MODULE,
+	.id_table =	id_table,
+	.probe =	virtrng_probe,
+	.remove =	virtrng_remove,
+	.scan =		virtrng_scan,
+#ifdef CONFIG_PM_SLEEP
+	.freeze =	virtrng_freeze,
+	.restore =	virtrng_restore,
+#endif
+};
+
+module_virtio_driver(virtio_rng_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio random number driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
new file mode 100644
index 0000000..7175579
--- /dev/null
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -0,0 +1,432 @@
+/*
+ * APM X-Gene SoC RNG Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Rameshwar Prasad Sahu <rsahu@apm.com>
+ *	   Shamal Winchurkar <swinchurkar@apm.com>
+ *	   Feng Kan <fkan@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/timer.h>
+
+#define RNG_MAX_DATUM			4
+#define MAX_TRY				100
+#define XGENE_RNG_RETRY_COUNT		20
+#define XGENE_RNG_RETRY_INTERVAL	10
+
+/* RNG  Registers */
+#define RNG_INOUT_0			0x00
+#define RNG_INTR_STS_ACK		0x10
+#define RNG_CONTROL			0x14
+#define RNG_CONFIG			0x18
+#define RNG_ALARMCNT			0x1c
+#define RNG_FROENABLE			0x20
+#define RNG_FRODETUNE			0x24
+#define RNG_ALARMMASK			0x28
+#define RNG_ALARMSTOP			0x2c
+#define RNG_OPTIONS			0x78
+#define RNG_EIP_REV			0x7c
+
+#define MONOBIT_FAIL_MASK		BIT(7)
+#define POKER_FAIL_MASK			BIT(6)
+#define LONG_RUN_FAIL_MASK		BIT(5)
+#define RUN_FAIL_MASK			BIT(4)
+#define NOISE_FAIL_MASK			BIT(3)
+#define STUCK_OUT_MASK			BIT(2)
+#define SHUTDOWN_OFLO_MASK		BIT(1)
+#define READY_MASK			BIT(0)
+
+#define MAJOR_HW_REV_RD(src)		(((src) & 0x0f000000) >> 24)
+#define MINOR_HW_REV_RD(src)		(((src) & 0x00f00000) >> 20)
+#define HW_PATCH_LEVEL_RD(src)		(((src) & 0x000f0000) >> 16)
+#define MAX_REFILL_CYCLES_SET(dst, src) \
+			((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000))
+#define MIN_REFILL_CYCLES_SET(dst, src) \
+			((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ALARM_THRESHOLD_SET(dst, src) \
+			((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ENABLE_RNG_SET(dst, src) \
+			((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10)))
+#define REGSPEC_TEST_MODE_SET(dst, src) \
+			((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8)))
+#define MONOBIT_FAIL_MASK_SET(dst, src) \
+			((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7)))
+#define POKER_FAIL_MASK_SET(dst, src) \
+			((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6)))
+#define LONG_RUN_FAIL_MASK_SET(dst, src) \
+			((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5)))
+#define RUN_FAIL_MASK_SET(dst, src) \
+			((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4)))
+#define NOISE_FAIL_MASK_SET(dst, src) \
+			((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3)))
+#define STUCK_OUT_MASK_SET(dst, src) \
+			((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2)))
+#define SHUTDOWN_OFLO_MASK_SET(dst, src) \
+			((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1)))
+
+struct xgene_rng_dev {
+	u32 irq;
+	void  __iomem *csr_base;
+	u32 revision;
+	u32 datum_size;
+	u32 failure_cnt;	/* Failure count last minute */
+	unsigned long failure_ts;/* First failure timestamp */
+	struct timer_list failure_timer;
+	struct device *dev;
+	struct clk *clk;
+};
+
+static void xgene_rng_expired_timer(struct timer_list *t)
+{
+	struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
+
+	/* Clear failure counter as timer expired */
+	disable_irq(ctx->irq);
+	ctx->failure_cnt = 0;
+	del_timer(&ctx->failure_timer);
+	enable_irq(ctx->irq);
+}
+
+static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
+{
+	ctx->failure_timer.expires = jiffies + 120 * HZ;
+	add_timer(&ctx->failure_timer);
+}
+
+/*
+ * Initialize or reinit free running oscillators (FROs)
+ */
+static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
+{
+	writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
+	writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
+	writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
+	writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
+}
+
+static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
+{
+	u32 val;
+
+	val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+	if (val & MONOBIT_FAIL_MASK)
+		/*
+		 * LFSR detected an out-of-bounds number of 1s after
+		 * checking 20,000 bits (test T1 as specified in the
+		 * AIS-31 standard)
+		 */
+		dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
+	if (val & POKER_FAIL_MASK)
+		/*
+		 * LFSR detected an out-of-bounds value in at least one
+		 * of the 16 poker_count_X counters or an out of bounds sum
+		 * of squares value after checking 20,000 bits (test T2 as
+		 * specified in the AIS-31 standard)
+		 */
+		dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
+	if (val & LONG_RUN_FAIL_MASK)
+		/*
+		 * LFSR detected a sequence of 34 identical bits
+		 * (test T4 as specified in the AIS-31 standard)
+		 */
+		dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
+	if (val & RUN_FAIL_MASK)
+		/*
+		 * LFSR detected an outof-bounds value for at least one
+		 * of the running counters after checking 20,000 bits
+		 * (test T3 as specified in the AIS-31 standard)
+		 */
+		dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
+	if (val & NOISE_FAIL_MASK)
+		/* LFSR detected a sequence of 48 identical bits */
+		dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
+	if (val & STUCK_OUT_MASK)
+		/*
+		 * Detected output data registers generated same value twice
+		 * in a row
+		 */
+		dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
+
+	if (val & SHUTDOWN_OFLO_MASK) {
+		u32 frostopped;
+
+		/* FROs shut down after a second error event. Try recover. */
+		if (++ctx->failure_cnt == 1) {
+			/* 1st time, just recover */
+			ctx->failure_ts = jiffies;
+			frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+			xgene_rng_init_fro(ctx, frostopped);
+
+			/*
+			 * We must start a timer to clear out this error
+			 * in case the system timer wrap around
+			 */
+			xgene_rng_start_timer(ctx);
+		} else {
+			/* 2nd time failure in lesser than 1 minute? */
+			if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
+				dev_err(ctx->dev,
+					"FRO shutdown failure error 0x%08X\n",
+					val);
+			} else {
+				/* 2nd time failure after 1 minutes, recover */
+				ctx->failure_ts = jiffies;
+				ctx->failure_cnt = 1;
+				/*
+				 * We must start a timer to clear out this
+				 * error in case the system timer wrap
+				 * around
+				 */
+				xgene_rng_start_timer(ctx);
+			}
+			frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+			xgene_rng_init_fro(ctx, frostopped);
+		}
+	}
+	/* Clear them all */
+	writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
+}
+
+static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
+{
+	struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+
+	/* RNG Alarm Counter overflow */
+	xgene_rng_chk_overflow(ctx);
+
+	return IRQ_HANDLED;
+}
+
+static int xgene_rng_data_present(struct hwrng *rng, int wait)
+{
+	struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+	u32 i, val = 0;
+
+	for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) {
+		val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+		if ((val & READY_MASK) || !wait)
+			break;
+		udelay(XGENE_RNG_RETRY_INTERVAL);
+	}
+
+	return (val & READY_MASK);
+}
+
+static int xgene_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+	int i;
+
+	for (i = 0; i < ctx->datum_size; i++)
+		data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
+
+	/* Clear ready bit to start next transaction */
+	writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+	return ctx->datum_size << 2;
+}
+
+static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
+{
+	u32 val;
+
+	writel(0x00000000, ctx->csr_base + RNG_CONTROL);
+
+	val = MAX_REFILL_CYCLES_SET(0, 10);
+	val = MIN_REFILL_CYCLES_SET(val, 10);
+	writel(val, ctx->csr_base + RNG_CONFIG);
+
+	val = ALARM_THRESHOLD_SET(0, 0xFF);
+	writel(val, ctx->csr_base + RNG_ALARMCNT);
+
+	xgene_rng_init_fro(ctx, 0);
+
+	writel(MONOBIT_FAIL_MASK |
+		POKER_FAIL_MASK	|
+		LONG_RUN_FAIL_MASK |
+		RUN_FAIL_MASK |
+		NOISE_FAIL_MASK |
+		STUCK_OUT_MASK |
+		SHUTDOWN_OFLO_MASK |
+		READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+	val = ENABLE_RNG_SET(0, 1);
+	val = MONOBIT_FAIL_MASK_SET(val, 1);
+	val = POKER_FAIL_MASK_SET(val, 1);
+	val = LONG_RUN_FAIL_MASK_SET(val, 1);
+	val = RUN_FAIL_MASK_SET(val, 1);
+	val = NOISE_FAIL_MASK_SET(val, 1);
+	val = STUCK_OUT_MASK_SET(val, 1);
+	val = SHUTDOWN_OFLO_MASK_SET(val, 1);
+	writel(val, ctx->csr_base + RNG_CONTROL);
+}
+
+static int xgene_rng_init(struct hwrng *rng)
+{
+	struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+
+	ctx->failure_cnt = 0;
+	timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0);
+
+	ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
+
+	dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
+		MAJOR_HW_REV_RD(ctx->revision),
+		MINOR_HW_REV_RD(ctx->revision),
+		HW_PATCH_LEVEL_RD(ctx->revision));
+
+	dev_dbg(ctx->dev, "Options 0x%08X",
+		readl(ctx->csr_base + RNG_OPTIONS));
+
+	xgene_rng_init_internal(ctx);
+
+	ctx->datum_size = RNG_MAX_DATUM;
+
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_rng_acpi_match[] = {
+	{ "APMC0D18", },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_rng_acpi_match);
+#endif
+
+static struct hwrng xgene_rng_func = {
+	.name		= "xgene-rng",
+	.init		= xgene_rng_init,
+	.data_present	= xgene_rng_data_present,
+	.data_read	= xgene_rng_data_read,
+};
+
+static int xgene_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct xgene_rng_dev *ctx;
+	int rc = 0;
+
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ctx);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ctx->csr_base))
+		return PTR_ERR(ctx->csr_base);
+
+	rc = platform_get_irq(pdev, 0);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "No IRQ resource\n");
+		return rc;
+	}
+	ctx->irq = rc;
+
+	dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
+		ctx->csr_base, ctx->irq);
+
+	rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
+				dev_name(&pdev->dev), ctx);
+	if (rc) {
+		dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
+		return rc;
+	}
+
+	/* Enable IP clock */
+	ctx->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(ctx->clk)) {
+		dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
+	} else {
+		rc = clk_prepare_enable(ctx->clk);
+		if (rc) {
+			dev_warn(&pdev->dev,
+				 "clock prepare enable failed for RNG");
+			return rc;
+		}
+	}
+
+	xgene_rng_func.priv = (unsigned long) ctx;
+
+	rc = hwrng_register(&xgene_rng_func);
+	if (rc) {
+		dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
+		if (!IS_ERR(ctx->clk))
+			clk_disable_unprepare(ctx->clk);
+		return rc;
+	}
+
+	rc = device_init_wakeup(&pdev->dev, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
+			rc);
+		if (!IS_ERR(ctx->clk))
+			clk_disable_unprepare(ctx->clk);
+		hwrng_unregister(&xgene_rng_func);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int xgene_rng_remove(struct platform_device *pdev)
+{
+	struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
+	int rc;
+
+	rc = device_init_wakeup(&pdev->dev, 0);
+	if (rc)
+		dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
+	if (!IS_ERR(ctx->clk))
+		clk_disable_unprepare(ctx->clk);
+	hwrng_unregister(&xgene_rng_func);
+
+	return rc;
+}
+
+static const struct of_device_id xgene_rng_of_match[] = {
+	{ .compatible = "apm,xgene-rng" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
+
+static struct platform_driver xgene_rng_driver = {
+	.probe = xgene_rng_probe,
+	.remove	= xgene_rng_remove,
+	.driver = {
+		.name		= "xgene-rng",
+		.of_match_table = xgene_rng_of_match,
+		.acpi_match_table = ACPI_PTR(xgene_rng_acpi_match),
+	},
+};
+
+module_platform_driver(xgene_rng_driver);
+MODULE_DESCRIPTION("APM X-Gene RNG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 0000000..c108441
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,128 @@
+#
+# IPMI device configuration
+#
+
+menuconfig IPMI_HANDLER
+       tristate 'IPMI top-level message handler'
+       depends on HAS_IOMEM
+       select IPMI_DMI_DECODE if DMI
+       help
+         This enables the central IPMI message handler, required for IPMI
+	 to work.
+
+         IPMI is a standard for managing sensors (temperature,
+         voltage, etc.) in a system.
+
+         See <file:Documentation/IPMI.txt> for more details on the driver.
+
+	 If unsure, say N.
+
+config IPMI_DMI_DECODE
+       bool
+
+if IPMI_HANDLER
+
+config IPMI_PANIC_EVENT
+       bool 'Generate a panic event to all BMCs on a panic'
+       help
+	 When a panic occurs, this will cause the IPMI message handler to,
+	 by default, generate an IPMI event describing the panic to each
+	 interface registered with the message handler.  This is always
+	 available, the module parameter for ipmi_msghandler named
+	 panic_op can be set to "event" to chose this value, this config
+	 simply causes the default value to be set to "event".
+
+config IPMI_PANIC_STRING
+	bool 'Generate OEM events containing the panic string'
+	depends on IPMI_PANIC_EVENT
+	help
+	  When a panic occurs, this will cause the IPMI message handler to,
+	  by default, generate IPMI OEM type f0 events holding the IPMB
+	  address of the panic generator (byte 4 of the event), a sequence
+	  number for the string (byte 5 of the event) and part of the
+	  string (the rest of the event).  Bytes 1, 2, and 3 are the normal
+	  usage for an OEM event.  You can fetch these events and use the
+	  sequence numbers to piece the string together.  This config
+	  parameter sets the default value to generate these events,
+	  the module parameter for ipmi_msghandler named panic_op can
+	  be set to "string" to chose this value, this config simply
+	  causes the default value to be set to "string".
+
+config IPMI_DEVICE_INTERFACE
+       tristate 'Device interface for IPMI'
+       help
+         This provides an IOCTL interface to the IPMI message handler so
+	 userland processes may use IPMI.  It supports poll() and select().
+
+config IPMI_SI
+       tristate 'IPMI System Interface handler'
+       help
+         Provides a driver for System Interfaces (KCS, SMIC, BT).
+	 Currently, only KCS and SMIC are supported.  If
+	 you are using IPMI, you should probably say "y" here.
+
+config IPMI_SSIF
+       tristate 'IPMI SMBus handler (SSIF)'
+       select I2C
+       help
+         Provides a driver for a SMBus interface to a BMC, meaning that you
+	 have a driver that must be accessed over an I2C bus instead of a
+	 standard interface.  This module requires I2C support.
+
+config IPMI_POWERNV
+       depends on PPC_POWERNV
+       tristate 'POWERNV (OPAL firmware) IPMI interface'
+       help
+         Provides a driver for OPAL firmware-based IPMI interfaces.
+
+config IPMI_WATCHDOG
+       tristate 'IPMI Watchdog Timer'
+       help
+         This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+       tristate 'IPMI Poweroff'
+       help
+         This enables a function to power off the system with IPMI if
+	 the IPMI management controller is capable of this.
+
+endif # IPMI_HANDLER
+
+config IPMI_KCS_BMC
+	tristate
+
+config ASPEED_KCS_IPMI_BMC
+	depends on ARCH_ASPEED || COMPILE_TEST
+	select IPMI_KCS_BMC
+	select REGMAP_MMIO
+	tristate "Aspeed KCS IPMI BMC driver"
+	help
+	  Provides a driver for the KCS (Keyboard Controller Style) IPMI
+	  interface found on Aspeed SOCs (AST2400 and AST2500).
+
+	  The driver implements the BMC side of the KCS contorller, it
+	  provides the access of KCS IO space for BMC side.
+
+config NPCM7XX_KCS_IPMI_BMC
+	depends on ARCH_NPCM7XX || COMPILE_TEST
+	select IPMI_KCS_BMC
+	select REGMAP_MMIO
+	tristate "NPCM7xx KCS IPMI BMC driver"
+	help
+	  Provides a driver for the KCS (Keyboard Controller Style) IPMI
+	  interface found on Nuvoton NPCM7xx SOCs.
+
+	  The driver implements the BMC side of the KCS contorller, it
+	  provides the access of KCS IO space for BMC side.
+
+	  This support is also available as a module.  If so, the module
+	  will be called kcs_bmc_npcm7xx.
+
+config ASPEED_BT_IPMI_BMC
+	depends on ARCH_ASPEED || COMPILE_TEST
+       depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+	tristate "BT IPMI bmc driver"
+	help
+	  Provides a driver for the BT (Block Transfer) IPMI interface
+	  found on Aspeed SOCs (AST2400 and AST2500). The driver
+	  implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 0000000..7a3baf3
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
+	ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
+	ipmi_si_port_io.o ipmi_si_mem_io.o
+ifdef CONFIG_PCI
+ipmi_si-y += ipmi_si_pci.o
+endif
+ifdef CONFIG_PARISC
+ipmi_si-y += ipmi_si_parisc.o
+endif
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
+obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
+obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 0000000..40b9927
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME	"ipmi-bt-host"
+
+#define BT_IO_BASE	0xe4
+#define BT_IRQ		10
+
+#define BT_CR0		0x0
+#define   BT_CR0_IO_BASE		16
+#define   BT_CR0_IRQ			12
+#define   BT_CR0_EN_CLR_SLV_RDP		0x8
+#define   BT_CR0_EN_CLR_SLV_WRP		0x4
+#define   BT_CR0_ENABLE_IBT		0x1
+#define BT_CR1		0x4
+#define   BT_CR1_IRQ_H2B	0x01
+#define   BT_CR1_IRQ_HBUSY	0x40
+#define BT_CR2		0x8
+#define   BT_CR2_IRQ_H2B	0x01
+#define   BT_CR2_IRQ_HBUSY	0x40
+#define BT_CR3		0xc
+#define BT_CTRL		0x10
+#define   BT_CTRL_B_BUSY		0x80
+#define   BT_CTRL_H_BUSY		0x40
+#define   BT_CTRL_OEM0			0x20
+#define   BT_CTRL_SMS_ATN		0x10
+#define   BT_CTRL_B2H_ATN		0x08
+#define   BT_CTRL_H2B_ATN		0x04
+#define   BT_CTRL_CLR_RD_PTR		0x02
+#define   BT_CTRL_CLR_WR_PTR		0x01
+#define BT_BMC2HOST	0x14
+#define BT_INTMASK	0x18
+#define   BT_INTMASK_B2H_IRQEN		0x01
+#define   BT_INTMASK_B2H_IRQ		0x02
+#define   BT_INTMASK_BMC_HWRST		0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+	struct device		dev;
+	struct miscdevice	miscdev;
+	struct regmap		*map;
+	int			offset;
+	int			irq;
+	wait_queue_head_t	queue;
+	struct timer_list	poll_timer;
+	struct mutex		mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static const struct regmap_config bt_regmap_cfg = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+	uint32_t val = 0;
+	int rc;
+
+	rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val);
+	WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+	return rc == 0 ? (u8) val : 0;
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+	int rc;
+
+	rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data);
+	WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+	if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+		bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+	if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+		bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+	return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		buf[i] = bt_read(bt_bmc);
+	return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+	bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		bt_write(bt_bmc, buf[i]);
+	return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+	return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	if (atomic_inc_return(&open_count) == 1) {
+		clr_b_busy(bt_bmc);
+		return 0;
+	}
+
+	atomic_dec(&open_count);
+	return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5:N
+ *    Length  NetFn/LUN  Seq     Cmd     Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	u8 len;
+	int len_byte = 1;
+	u8 kbuffer[BT_BMC_BUFFER_SIZE];
+	ssize_t ret = 0;
+	ssize_t nread;
+
+	WARN_ON(*ppos);
+
+	if (wait_event_interruptible(bt_bmc->queue,
+				     bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+		return -ERESTARTSYS;
+
+	mutex_lock(&bt_bmc->mutex);
+
+	if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+		ret = -EIO;
+		goto out_unlock;
+	}
+
+	set_b_busy(bt_bmc);
+	clr_h2b_atn(bt_bmc);
+	clr_rd_ptr(bt_bmc);
+
+	/*
+	 * The BT frames start with the message length, which does not
+	 * include the length byte.
+	 */
+	kbuffer[0] = bt_read(bt_bmc);
+	len = kbuffer[0];
+
+	/* We pass the length back to userspace as well */
+	if (len + 1 > count)
+		len = count - 1;
+
+	while (len) {
+		nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+		bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+		if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+			ret = -EFAULT;
+			break;
+		}
+		len -= nread;
+		buf += nread + len_byte;
+		ret += nread + len_byte;
+		len_byte = 0;
+	}
+
+	clr_b_busy(bt_bmc);
+
+out_unlock:
+	mutex_unlock(&bt_bmc->mutex);
+	return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5  Byte 6:N
+ *    Length  NetFn/LUN  Seq     Cmd     Code    Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	u8 kbuffer[BT_BMC_BUFFER_SIZE];
+	ssize_t ret = 0;
+	ssize_t nwritten;
+
+	/*
+	 * send a minimum response size
+	 */
+	if (count < 5)
+		return -EINVAL;
+
+	WARN_ON(*ppos);
+
+	/*
+	 * There's no interrupt for clearing bmc busy so we have to
+	 * poll
+	 */
+	if (wait_event_interruptible(bt_bmc->queue,
+				     !(bt_inb(bt_bmc, BT_CTRL) &
+				       (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+		return -ERESTARTSYS;
+
+	mutex_lock(&bt_bmc->mutex);
+
+	if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+		     (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+		ret = -EIO;
+		goto out_unlock;
+	}
+
+	clr_wr_ptr(bt_bmc);
+
+	while (count) {
+		nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+		if (copy_from_user(&kbuffer, buf, nwritten)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		bt_writen(bt_bmc, kbuffer, nwritten);
+
+		count -= nwritten;
+		buf += nwritten;
+		ret += nwritten;
+	}
+
+	set_b2h_atn(bt_bmc);
+
+out_unlock:
+	mutex_unlock(&bt_bmc->mutex);
+	return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long param)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	switch (cmd) {
+	case BT_BMC_IOCTL_SMS_ATN:
+		set_sms_atn(bt_bmc);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	atomic_dec(&open_count);
+	set_b_busy(bt_bmc);
+	return 0;
+}
+
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	__poll_t mask = 0;
+	u8 ctrl;
+
+	poll_wait(file, &bt_bmc->queue, wait);
+
+	ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+	if (ctrl & BT_CTRL_H2B_ATN)
+		mask |= EPOLLIN;
+
+	if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+		mask |= EPOLLOUT;
+
+	return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= bt_bmc_open,
+	.read		= bt_bmc_read,
+	.write		= bt_bmc_write,
+	.release	= bt_bmc_release,
+	.poll		= bt_bmc_poll,
+	.unlocked_ioctl	= bt_bmc_ioctl,
+};
+
+static void poll_timer(struct timer_list *t)
+{
+	struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+
+	bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+	wake_up(&bt_bmc->queue);
+	add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+	struct bt_bmc *bt_bmc = arg;
+	u32 reg;
+	int rc;
+
+	rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, &reg);
+	if (rc)
+		return IRQ_NONE;
+
+	reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+	if (!reg)
+		return IRQ_NONE;
+
+	/* ack pending IRQs */
+	regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg);
+
+	wake_up(&bt_bmc->queue);
+	return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+			     struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc;
+
+	bt_bmc->irq = platform_get_irq(pdev, 0);
+	if (!bt_bmc->irq)
+		return -ENODEV;
+
+	rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+			      DEVICE_NAME, bt_bmc);
+	if (rc < 0) {
+		dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+		bt_bmc->irq = 0;
+		return rc;
+	}
+
+	/*
+	 * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+	 * H2B will be asserted when the bmc has data for us; HBUSY
+	 * will be cleared (along with B2H) when we can write the next
+	 * message to the BT buffer
+	 */
+	rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1,
+				(BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY),
+				(BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY));
+
+	return rc;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+	struct bt_bmc *bt_bmc;
+	struct device *dev;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -ENODEV;
+
+	dev = &pdev->dev;
+	dev_info(dev, "Found bt bmc device\n");
+
+	bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+	if (!bt_bmc)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, bt_bmc);
+
+	bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+	if (IS_ERR(bt_bmc->map)) {
+		struct resource *res;
+		void __iomem *base;
+
+		/*
+		 * Assume it's not the MFD-based devicetree description, in
+		 * which case generate a regmap ourselves
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(base))
+			return PTR_ERR(base);
+
+		bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg);
+		bt_bmc->offset = 0;
+	} else {
+		rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset);
+		if (rc)
+			return rc;
+	}
+
+	mutex_init(&bt_bmc->mutex);
+	init_waitqueue_head(&bt_bmc->queue);
+
+	bt_bmc->miscdev.minor	= MISC_DYNAMIC_MINOR,
+		bt_bmc->miscdev.name	= DEVICE_NAME,
+		bt_bmc->miscdev.fops	= &bt_bmc_fops,
+		bt_bmc->miscdev.parent = dev;
+	rc = misc_register(&bt_bmc->miscdev);
+	if (rc) {
+		dev_err(dev, "Unable to register misc device\n");
+		return rc;
+	}
+
+	bt_bmc_config_irq(bt_bmc, pdev);
+
+	if (bt_bmc->irq) {
+		dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+	} else {
+		dev_info(dev, "No IRQ; using timer\n");
+		timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
+		bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+		add_timer(&bt_bmc->poll_timer);
+	}
+
+	regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0,
+		     (BT_IO_BASE << BT_CR0_IO_BASE) |
+		     (BT_IRQ << BT_CR0_IRQ) |
+		     BT_CR0_EN_CLR_SLV_RDP |
+		     BT_CR0_EN_CLR_SLV_WRP |
+		     BT_CR0_ENABLE_IBT);
+
+	clr_b_busy(bt_bmc);
+
+	return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+	struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+	misc_deregister(&bt_bmc->miscdev);
+	if (!bt_bmc->irq)
+		del_timer_sync(&bt_bmc->poll_timer);
+	return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+	{ .compatible = "aspeed,ast2400-ibt-bmc" },
+	{ .compatible = "aspeed,ast2500-ibt-bmc" },
+	{ },
+};
+
+static struct platform_driver bt_bmc_driver = {
+	.driver = {
+		.name		= DEVICE_NAME,
+		.of_match_table = bt_bmc_match,
+	},
+	.probe = bt_bmc_probe,
+	.remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 0000000..97d6856
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  ipmi_bt_sm.c
+ *
+ *  The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ *  of the driver architecture at http://sourceforge.net/projects/openipmi 
+ *
+ *  Author:	Rocky Craig <first.last@hp.com>
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define BT_DEBUG_OFF	0	/* Used in production */
+#define BT_DEBUG_ENABLE	1	/* Generic messages */
+#define BT_DEBUG_MSG	2	/* Prints all request/response buffers */
+#define BT_DEBUG_STATES	4	/* Verbose look at state changes */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
+
+static int bt_debug; /* 0 == BT_DEBUG_OFF */
+
+module_param(bt_debug, int, 0644);
+MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers.  However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
+
+#define BT_NORMAL_TIMEOUT	5	/* seconds */
+#define BT_NORMAL_RETRY_LIMIT	2
+#define BT_RESET_DELAY		6	/* seconds after warm reset */
+
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
+
+enum bt_states {
+	BT_STATE_IDLE = 0,	/* Order is critical in this list */
+	BT_STATE_XACTION_START,
+	BT_STATE_WRITE_BYTES,
+	BT_STATE_WRITE_CONSUME,
+	BT_STATE_READ_WAIT,
+	BT_STATE_CLEAR_B2H,
+	BT_STATE_READ_BYTES,
+	BT_STATE_RESET1,	/* These must come last */
+	BT_STATE_RESET2,
+	BT_STATE_RESET3,
+	BT_STATE_RESTART,
+	BT_STATE_PRINTME,
+	BT_STATE_LONG_BUSY	/* BT doesn't get hosed :-) */
+};
+
+/*
+ * Macros seen at the end of state "case" blocks.  They help with legibility
+ * and debugging.
+ */
+
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
+
+struct si_sm_data {
+	enum bt_states	state;
+	unsigned char	seq;		/* BT sequence number */
+	struct si_sm_io	*io;
+	unsigned char	write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+	int		write_count;
+	unsigned char	read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+	int		read_count;
+	int		truncated;
+	long		timeout;	/* microseconds countdown */
+	int		error_retries;	/* end of "common" fields */
+	int		nonzero_status;	/* hung BMCs stay all 0 */
+	enum bt_states	complete;	/* to divert the state machine */
+	long		BT_CAP_req2rsp;
+	int		BT_CAP_retries;	/* Recommended retries */
+};
+
+#define BT_CLR_WR_PTR	0x01	/* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR	0x02
+#define BT_H2B_ATN	0x04
+#define BT_B2H_ATN	0x08
+#define BT_SMS_ATN	0x10
+#define BT_OEM0		0x20
+#define BT_H_BUSY	0x40
+#define BT_B_BUSY	0x80
+
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing.  To absolutely
+ * clear it, check its state and write if set.  This avoids the "get
+ * current then use as mask" scheme to modify one bit.  Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
+
+#define BT_STATUS	bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x)	bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST	bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x)	bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R	bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x)	bt->io->outputb(bt->io, 2, x)
+
+/*
+ * Convenience routines for debugging.  These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
+
+static char *state2txt(unsigned char state)
+{
+	switch (state) {
+	case BT_STATE_IDLE:		return("IDLE");
+	case BT_STATE_XACTION_START:	return("XACTION");
+	case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
+	case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
+	case BT_STATE_READ_WAIT:	return("RD_WAIT");
+	case BT_STATE_CLEAR_B2H:	return("CLEAR_B2H");
+	case BT_STATE_READ_BYTES:	return("RD_BYTES");
+	case BT_STATE_RESET1:		return("RESET1");
+	case BT_STATE_RESET2:		return("RESET2");
+	case BT_STATE_RESET3:		return("RESET3");
+	case BT_STATE_RESTART:		return("RESTART");
+	case BT_STATE_LONG_BUSY:	return("LONG_BUSY");
+	}
+	return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status)
+{
+	/*
+	 * This cannot be called by two threads at the same time and
+	 * the buffer is always consumed immediately, so the static is
+	 * safe to use.
+	 */
+	static char buf[40];
+
+	strcpy(buf, "[ ");
+	if (status & BT_B_BUSY)
+		strcat(buf, "B_BUSY ");
+	if (status & BT_H_BUSY)
+		strcat(buf, "H_BUSY ");
+	if (status & BT_OEM0)
+		strcat(buf, "OEM0 ");
+	if (status & BT_SMS_ATN)
+		strcat(buf, "SMS ");
+	if (status & BT_B2H_ATN)
+		strcat(buf, "B2H ");
+	if (status & BT_H2B_ATN)
+		strcat(buf, "H2B ");
+	strcat(buf, "]");
+	return buf;
+}
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
+
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+	memset(bt, 0, sizeof(struct si_sm_data));
+	if (bt->io != io) {
+		/* external: one-time only things */
+		bt->io = io;
+		bt->seq = 0;
+	}
+	bt->state = BT_STATE_IDLE;	/* start here */
+	bt->complete = BT_STATE_IDLE;	/* end here */
+	bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
+	bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+	return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+	bt->read_data[0] = 4;				/* # following bytes */
+	bt->read_data[1] = bt->write_data[1] | 4;	/* Odd NetFn/LUN */
+	bt->read_data[2] = bt->write_data[2];		/* seq (ignored) */
+	bt->read_data[3] = bt->write_data[3];		/* Command */
+	bt->read_data[4] = completion_code;
+	bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
+static int bt_start_transaction(struct si_sm_data *bt,
+				unsigned char *data,
+				unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > IPMI_MAX_MSG_LENGTH)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if (bt->state == BT_STATE_LONG_BUSY)
+		return IPMI_NODE_BUSY_ERR;
+
+	if (bt->state != BT_STATE_IDLE)
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+		printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
+		for (i = 0; i < size; i ++)
+			printk(" %02x", data[i]);
+		printk("\n");
+	}
+	bt->write_data[0] = size + 1;	/* all data plus seq byte */
+	bt->write_data[1] = *data;	/* NetFn/LUN */
+	bt->write_data[2] = bt->seq++;
+	memcpy(bt->write_data + 3, data + 1, size - 1);
+	bt->write_count = size + 2;
+	bt->error_retries = 0;
+	bt->nonzero_status = 0;
+	bt->truncated = 0;
+	bt->state = BT_STATE_XACTION_START;
+	bt->timeout = bt->BT_CAP_req2rsp;
+	force_result(bt, IPMI_ERR_UNSPECIFIED);
+	return 0;
+}
+
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this.  Strip out the length and seq bytes.
+ */
+
+static int bt_get_result(struct si_sm_data *bt,
+			 unsigned char *data,
+			 unsigned int length)
+{
+	int i, msg_len;
+
+	msg_len = bt->read_count - 2;		/* account for length & seq */
+	if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+		force_result(bt, IPMI_ERR_UNSPECIFIED);
+		msg_len = 3;
+	}
+	data[0] = bt->read_data[1];
+	data[1] = bt->read_data[3];
+	if (length < msg_len || bt->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		msg_len = 3;
+	} else
+		memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: result %d bytes:", msg_len);
+		for (i = 0; i < msg_len; i++)
+			printk(" %02x", data[i]);
+		printk("\n");
+	}
+	return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST	0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+					status2txt(BT_STATUS));
+	if (BT_STATUS & BT_H_BUSY)
+		BT_CONTROL(BT_H_BUSY);	/* force clear */
+	BT_CONTROL(BT_CLR_WR_PTR);	/* always reset */
+	BT_CONTROL(BT_SMS_ATN);		/* always clear */
+	BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/*
+ * Get rid of an unwanted/stale response.  This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+	int i, size;
+
+	if (!(BT_STATUS & BT_B2H_ATN)) 	/* Not signalling a response */
+		return;
+
+	BT_CONTROL(BT_H_BUSY);		/* now set */
+	BT_CONTROL(BT_B2H_ATN);		/* always clear */
+	BT_STATUS;			/* pause */
+	BT_CONTROL(BT_B2H_ATN);		/* some BMCs are stubborn */
+	BT_CONTROL(BT_CLR_RD_PTR);	/* always reset */
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: stale response %s; ",
+			status2txt(BT_STATUS));
+	size = BMC2HOST;
+	for (i = 0; i < size ; i++)
+		BMC2HOST;
+	BT_CONTROL(BT_H_BUSY);		/* now clear */
+	if (bt_debug)
+		printk("drained %d bytes\n", size + 1);
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+	int i;
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+			bt->write_count, bt->seq);
+		for (i = 0; i < bt->write_count; i++)
+			printk(" %02x", bt->write_data[i]);
+		printk("\n");
+	}
+	for (i = 0; i < bt->write_count; i++)
+		HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+	unsigned int i;
+
+	/*
+	 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+	 * Keep layout of first four bytes aligned with write_data[]
+	 */
+
+	bt->read_data[0] = BMC2HOST;
+	bt->read_count = bt->read_data[0];
+
+	if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+		if (bt_debug & BT_DEBUG_MSG)
+			printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+				bt->read_count);
+		bt->truncated = 1;
+		return 1;	/* let next XACTION START clean it up */
+	}
+	for (i = 1; i <= bt->read_count; i++)
+		bt->read_data[i] = BMC2HOST;
+	bt->read_count++;	/* Account internally for length byte */
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		int max = bt->read_count;
+
+		printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+			max, bt->read_data[2]);
+		if (max > 16)
+			max = 16;
+		for (i = 0; i < max; i++)
+			printk(KERN_CONT " %02x", bt->read_data[i]);
+		printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
+	}
+
+	/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+	if ((bt->read_data[3] == bt->write_data[3]) &&
+	    (bt->read_data[2] == bt->write_data[2]) &&
+	    ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+			return 1;
+
+	if (bt_debug & BT_DEBUG_MSG)
+		printk(KERN_WARNING "IPMI BT: bad packet: "
+		"want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+		bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
+		bt->read_data[1],  bt->read_data[2],  bt->read_data[3]);
+	return 0;
+}
+
+/* Restart if retries are left, or return an error completion code */
+
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+					unsigned char status,
+					unsigned char cCode)
+{
+	char *reason;
+
+	bt->timeout = bt->BT_CAP_req2rsp;
+
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		reason = "timeout";
+		break;
+	default:
+		reason = "internal error";
+		break;
+	}
+
+	printk(KERN_WARNING "IPMI BT: %s in %s %s ", 	/* open-ended line */
+		reason, STATE2TXT, STATUS2TXT);
+
+	/*
+	 * Per the IPMI spec, retries are based on the sequence number
+	 * known only to this module, so manage a restart here.
+	 */
+	(bt->error_retries)++;
+	if (bt->error_retries < bt->BT_CAP_retries) {
+		printk("%d retries left\n",
+			bt->BT_CAP_retries - bt->error_retries);
+		bt->state = BT_STATE_RESTART;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	printk(KERN_WARNING "failed %d retries, sending error response\n",
+	       bt->BT_CAP_retries);
+	if (!bt->nonzero_status)
+		printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+	/* this is most likely during insmod */
+	else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+		printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+		bt->state = BT_STATE_RESET1;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	/*
+	 * Concoct a useful error message, set up the next state, and
+	 * be done with this sequence.
+	 */
+
+	bt->state = BT_STATE_IDLE;
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		if (status & BT_B_BUSY) {
+			cCode = IPMI_NODE_BUSY_ERR;
+			bt->state = BT_STATE_LONG_BUSY;
+		}
+		break;
+	default:
+		break;
+	}
+	force_result(bt, cCode);
+	return SI_SM_TRANSACTION_COMPLETE;
+}
+
+/* Check status and (usually) take action and change this state machine. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+	unsigned char status;
+	static enum bt_states last_printed = BT_STATE_PRINTME;
+	int i;
+
+	status = BT_STATUS;
+	bt->nonzero_status |= status;
+	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
+		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
+			STATE2TXT,
+			STATUS2TXT,
+			bt->timeout,
+			time);
+		last_printed = bt->state;
+	}
+
+	/*
+	 * Commands that time out may still (eventually) provide a response.
+	 * This stale response will get in the way of a new response so remove
+	 * it if possible (hopefully during IDLE).  Even if it comes up later
+	 * it will be rejected by its (now-forgotten) seq number.
+	 */
+
+	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+		drain_BMC2HOST(bt);
+		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+	}
+
+	if ((bt->state != BT_STATE_IDLE) &&
+	    (bt->state <  BT_STATE_PRINTME)) {
+		/* check timeout */
+		bt->timeout -= time;
+		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+			return error_recovery(bt,
+					      status,
+					      IPMI_TIMEOUT_ERR);
+	}
+
+	switch (bt->state) {
+
+	/*
+	 * Idle state first checks for asynchronous messages from another
+	 * channel, then does some opportunistic housekeeping.
+	 */
+
+	case BT_STATE_IDLE:
+		if (status & BT_SMS_ATN) {
+			BT_CONTROL(BT_SMS_ATN);	/* clear it */
+			return SI_SM_ATTN;
+		}
+
+		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
+			BT_CONTROL(BT_H_BUSY);
+
+		BT_SI_SM_RETURN(SI_SM_IDLE);
+
+	case BT_STATE_XACTION_START:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		if (BT_STATUS & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* force clear */
+		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_WRITE_BYTES:
+		if (status & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* clear */
+		BT_CONTROL(BT_CLR_WR_PTR);
+		write_all_bytes(bt);
+		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
+		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_WRITE_CONSUME:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	/* Spinning hard can suppress B2H_ATN and force a timeout */
+
+	case BT_STATE_READ_WAIT:
+		if (!(status & BT_B2H_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_CONTROL(BT_H_BUSY);		/* set */
+
+		/*
+		 * Uncached, ordered writes should just proceed serially but
+		 * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+		 * workaround without too much penalty to the general case.
+		 */
+
+		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
+		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_CLEAR_B2H:
+		if (status & BT_B2H_ATN) {
+			/* keep hitting it */
+			BT_CONTROL(BT_B2H_ATN);
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		}
+		BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_READ_BYTES:
+		if (!(status & BT_H_BUSY))
+			/* check in case of retry */
+			BT_CONTROL(BT_H_BUSY);
+		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
+		i = read_all_bytes(bt);		/* true == packet seq match */
+		BT_CONTROL(BT_H_BUSY);		/* NOW clear */
+		if (!i) 			/* Not my message */
+			BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->state = bt->complete;
+		return bt->state == BT_STATE_IDLE ?	/* where to next? */
+			SI_SM_TRANSACTION_COMPLETE :	/* normal */
+			SI_SM_CALL_WITHOUT_DELAY;	/* Startup magic */
+
+	case BT_STATE_LONG_BUSY:	/* For example: after FW update */
+		if (!(status & BT_B_BUSY)) {
+			reset_flags(bt);	/* next state is now IDLE */
+			bt_init_data(bt, bt->io);
+		}
+		return SI_SM_CALL_WITH_DELAY;	/* No repeat printing */
+
+	case BT_STATE_RESET1:
+		reset_flags(bt);
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESET2,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESET2:		/* Send a soft reset */
+		BT_CONTROL(BT_CLR_WR_PTR);
+		HOST2BMC(3);		/* number of bytes following */
+		HOST2BMC(0x18);		/* NetFn/LUN == Application, LUN 0 */
+		HOST2BMC(42);		/* Sequence number */
+		HOST2BMC(3);		/* Cmd == Soft reset */
+		BT_CONTROL(BT_H2B_ATN);
+		bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
+		BT_STATE_CHANGE(BT_STATE_RESET3,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESET3:		/* Hold off everything for a bit */
+		if (bt->timeout > 0)
+			return SI_SM_CALL_WITH_DELAY;
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESTART,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESTART:		/* don't reset retries or seq! */
+		bt->read_count = 0;
+		bt->nonzero_status = 0;
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
+
+	default:	/* should never occur */
+		return error_recovery(bt,
+				      status,
+				      IPMI_ERR_UNSPECIFIED);
+	}
+	return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+	unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+	unsigned char BT_CAP[8];
+	enum si_sm_result smi_result;
+	int rv;
+
+	/*
+	 * It's impossible for the BT status and interrupt registers to be
+	 * all 1's, (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.  The calling routine uses negative logic.
+	 */
+
+	if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
+		return 1;
+	reset_flags(bt);
+
+	/*
+	 * Try getting the BT capabilities here.
+	 */
+	rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+	if (rv) {
+		dev_warn(bt->io->dev,
+			 "Can't start capabilities transaction: %d\n", rv);
+		goto out_no_bt_cap;
+	}
+
+	smi_result = SI_SM_CALL_WITHOUT_DELAY;
+	for (;;) {
+		if (smi_result == SI_SM_CALL_WITH_DELAY ||
+		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+			schedule_timeout_uninterruptible(1);
+			smi_result = bt_event(bt, jiffies_to_usecs(1));
+		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+			smi_result = bt_event(bt, 0);
+		} else
+			break;
+	}
+
+	rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+	bt_init_data(bt, bt->io);
+	if (rv < 8) {
+		dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+		goto out_no_bt_cap;
+	}
+
+	if (BT_CAP[2]) {
+		dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+		dev_warn(bt->io->dev, "using default values\n");
+	} else {
+		bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+		bt->BT_CAP_retries = BT_CAP[7];
+	}
+
+	dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+		 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
+	return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers bt_smi_handlers = {
+	.init_data		= bt_init_data,
+	.start_transaction	= bt_start_transaction,
+	.get_result		= bt_get_result,
+	.event			= bt_event,
+	.detect			= bt_detect,
+	.cleanup		= bt_cleanup,
+	.size			= bt_size,
+};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 0000000..1a486ae
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/compat.h>
+
+struct ipmi_file_private
+{
+	struct ipmi_user     *user;
+	spinlock_t           recv_msg_lock;
+	struct list_head     recv_msgs;
+	struct file          *file;
+	struct fasync_struct *fasync_queue;
+	wait_queue_head_t    wait;
+	struct mutex	     recv_mutex;
+	int                  default_retries;
+	unsigned int         default_retry_time_ms;
+};
+
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+				 void                 *handler_data)
+{
+	struct ipmi_file_private *priv = handler_data;
+	int                      was_empty;
+	unsigned long            flags;
+
+	spin_lock_irqsave(&priv->recv_msg_lock, flags);
+	was_empty = list_empty(&priv->recv_msgs);
+	list_add_tail(&msg->link, &priv->recv_msgs);
+	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+	if (was_empty) {
+		wake_up_interruptible(&priv->wait);
+		kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+	}
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+	struct ipmi_file_private *priv = file->private_data;
+	__poll_t             mask = 0;
+	unsigned long            flags;
+
+	poll_wait(file, &priv->wait, wait);
+
+	spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+	if (!list_empty(&priv->recv_msgs))
+		mask |= (EPOLLIN | EPOLLRDNORM);
+
+	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+	return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+	struct ipmi_file_private *priv = file->private_data;
+
+	return fasync_helper(fd, file, on, &priv->fasync_queue);
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs =
+{
+	.ipmi_recv_hndl	= file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+	int                      if_num = iminor(inode);
+	int                      rv;
+	struct ipmi_file_private *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->file = file;
+
+	rv = ipmi_create_user(if_num,
+			      &ipmi_hndlrs,
+			      priv,
+			      &priv->user);
+	if (rv) {
+		kfree(priv);
+		goto out;
+	}
+
+	file->private_data = priv;
+
+	spin_lock_init(&priv->recv_msg_lock);
+	INIT_LIST_HEAD(&priv->recv_msgs);
+	init_waitqueue_head(&priv->wait);
+	priv->fasync_queue = NULL;
+	mutex_init(&priv->recv_mutex);
+
+	/* Use the low-level defaults. */
+	priv->default_retries = -1;
+	priv->default_retry_time_ms = 0;
+
+out:
+	return rv;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+	struct ipmi_file_private *priv = file->private_data;
+	int                      rv;
+	struct ipmi_recv_msg *msg, *next;
+
+	rv = ipmi_destroy_user(priv->user);
+	if (rv)
+		return rv;
+
+	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+		ipmi_free_recv_msg(msg);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static int handle_send_req(struct ipmi_user *user,
+			   struct ipmi_req *req,
+			   int             retries,
+			   unsigned int    retry_time_ms)
+{
+	int              rv;
+	struct ipmi_addr addr;
+	struct kernel_ipmi_msg msg;
+
+	if (req->addr_len > sizeof(struct ipmi_addr))
+		return -EINVAL;
+
+	if (copy_from_user(&addr, req->addr, req->addr_len))
+		return -EFAULT;
+
+	msg.netfn = req->msg.netfn;
+	msg.cmd = req->msg.cmd;
+	msg.data_len = req->msg.data_len;
+	msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!msg.data)
+		return -ENOMEM;
+
+	/* From here out we cannot return, we must jump to "out" for
+	   error exits to free msgdata. */
+
+	rv = ipmi_validate_addr(&addr, req->addr_len);
+	if (rv)
+		goto out;
+
+	if (req->msg.data != NULL) {
+		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+			rv = -EMSGSIZE;
+			goto out;
+		}
+
+		if (copy_from_user(msg.data,
+				   req->msg.data,
+				   req->msg.data_len)) {
+			rv = -EFAULT;
+			goto out;
+		}
+	} else {
+		msg.data_len = 0;
+	}
+
+	rv = ipmi_request_settime(user,
+				  &addr,
+				  req->msgid,
+				  &msg,
+				  NULL,
+				  0,
+				  retries,
+				  retry_time_ms);
+ out:
+	kfree(msg.data);
+	return rv;
+}
+
+static int handle_recv(struct ipmi_file_private *priv,
+			bool trunc, struct ipmi_recv *rsp,
+			int (*copyout)(struct ipmi_recv *, void __user *),
+			void __user *to)
+{
+	int              addr_len;
+	struct list_head *entry;
+	struct ipmi_recv_msg  *msg;
+	unsigned long    flags;
+	int rv = 0;
+
+	/* We claim a mutex because we don't want two
+	   users getting something from the queue at a time.
+	   Since we have to release the spinlock before we can
+	   copy the data to the user, it's possible another
+	   user will grab something from the queue, too.  Then
+	   the messages might get out of order if something
+	   fails and the message gets put back onto the
+	   queue.  This mutex prevents that problem. */
+	mutex_lock(&priv->recv_mutex);
+
+	/* Grab the message off the list. */
+	spin_lock_irqsave(&priv->recv_msg_lock, flags);
+	if (list_empty(&(priv->recv_msgs))) {
+		spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+		rv = -EAGAIN;
+		goto recv_err;
+	}
+	entry = priv->recv_msgs.next;
+	msg = list_entry(entry, struct ipmi_recv_msg, link);
+	list_del(entry);
+	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+	addr_len = ipmi_addr_length(msg->addr.addr_type);
+	if (rsp->addr_len < addr_len) {
+		rv = -EINVAL;
+		goto recv_putback_on_err;
+	}
+
+	if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
+		rv = -EFAULT;
+		goto recv_putback_on_err;
+	}
+	rsp->addr_len = addr_len;
+
+	rsp->recv_type = msg->recv_type;
+	rsp->msgid = msg->msgid;
+	rsp->msg.netfn = msg->msg.netfn;
+	rsp->msg.cmd = msg->msg.cmd;
+
+	if (msg->msg.data_len > 0) {
+		if (rsp->msg.data_len < msg->msg.data_len) {
+			rv = -EMSGSIZE;
+			if (trunc)
+				msg->msg.data_len = rsp->msg.data_len;
+			else
+				goto recv_putback_on_err;
+		}
+
+		if (copy_to_user(rsp->msg.data,
+				 msg->msg.data,
+				 msg->msg.data_len)) {
+			rv = -EFAULT;
+			goto recv_putback_on_err;
+		}
+		rsp->msg.data_len = msg->msg.data_len;
+	} else {
+		rsp->msg.data_len = 0;
+	}
+
+	rv = copyout(rsp, to);
+	if (rv)
+		goto recv_putback_on_err;
+
+	mutex_unlock(&priv->recv_mutex);
+	ipmi_free_recv_msg(msg);
+	return 0;
+
+recv_putback_on_err:
+	/* If we got an error, put the message back onto
+	   the head of the queue. */
+	spin_lock_irqsave(&priv->recv_msg_lock, flags);
+	list_add(entry, &priv->recv_msgs);
+	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+recv_err:
+	mutex_unlock(&priv->recv_mutex);
+	return rv;
+}
+
+static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
+{
+	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
+}
+
+static long ipmi_ioctl(struct file   *file,
+		       unsigned int  cmd,
+		       unsigned long data)
+{
+	int                      rv = -EINVAL;
+	struct ipmi_file_private *priv = file->private_data;
+	void __user *arg = (void __user *)data;
+
+	switch (cmd) 
+	{
+	case IPMICTL_SEND_COMMAND:
+	{
+		struct ipmi_req req;
+		int retries;
+		unsigned int retry_time_ms;
+
+		if (copy_from_user(&req, arg, sizeof(req))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		mutex_lock(&priv->recv_mutex);
+		retries = priv->default_retries;
+		retry_time_ms = priv->default_retry_time_ms;
+		mutex_unlock(&priv->recv_mutex);
+
+		rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
+		break;
+	}
+
+	case IPMICTL_SEND_COMMAND_SETTIME:
+	{
+		struct ipmi_req_settime req;
+
+		if (copy_from_user(&req, arg, sizeof(req))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = handle_send_req(priv->user,
+				     &req.req,
+				     req.retries,
+				     req.retry_time_ms);
+		break;
+	}
+
+	case IPMICTL_RECEIVE_MSG:
+	case IPMICTL_RECEIVE_MSG_TRUNC:
+	{
+		struct ipmi_recv      rsp;
+
+		if (copy_from_user(&rsp, arg, sizeof(rsp)))
+			rv = -EFAULT;
+		else
+			rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
+					 &rsp, copyout_recv, arg);
+		break;
+	}
+
+	case IPMICTL_REGISTER_FOR_CMD:
+	{
+		struct ipmi_cmdspec val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+					   IPMI_CHAN_ALL);
+		break;
+	}
+
+	case IPMICTL_UNREGISTER_FOR_CMD:
+	{
+		struct ipmi_cmdspec   val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+					     IPMI_CHAN_ALL);
+		break;
+	}
+
+	case IPMICTL_REGISTER_FOR_CMD_CHANS:
+	{
+		struct ipmi_cmdspec_chans val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+					   val.chans);
+		break;
+	}
+
+	case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
+	{
+		struct ipmi_cmdspec_chans val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+					     val.chans);
+		break;
+	}
+
+	case IPMICTL_SET_GETS_EVENTS_CMD:
+	{
+		int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_gets_events(priv->user, val);
+		break;
+	}
+
+	/* The next four are legacy, not per-channel. */
+	case IPMICTL_SET_MY_ADDRESS_CMD:
+	{
+		unsigned int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_address(priv->user, 0, val);
+		break;
+	}
+
+	case IPMICTL_GET_MY_ADDRESS_CMD:
+	{
+		unsigned int  val;
+		unsigned char rval;
+
+		rv = ipmi_get_my_address(priv->user, 0, &rval);
+		if (rv)
+			break;
+
+		val = rval;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_LUN_CMD:
+	{
+		unsigned int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_LUN(priv->user, 0, val);
+		break;
+	}
+
+	case IPMICTL_GET_MY_LUN_CMD:
+	{
+		unsigned int  val;
+		unsigned char rval;
+
+		rv = ipmi_get_my_LUN(priv->user, 0, &rval);
+		if (rv)
+			break;
+
+		val = rval;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		return ipmi_set_my_address(priv->user, val.channel, val.value);
+		break;
+	}
+
+	case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
+		if (rv)
+			break;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
+		break;
+	}
+
+	case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
+		if (rv)
+			break;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_TIMING_PARMS_CMD:
+	{
+		struct ipmi_timing_parms parms;
+
+		if (copy_from_user(&parms, arg, sizeof(parms))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		mutex_lock(&priv->recv_mutex);
+		priv->default_retries = parms.retries;
+		priv->default_retry_time_ms = parms.retry_time_ms;
+		mutex_unlock(&priv->recv_mutex);
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_GET_TIMING_PARMS_CMD:
+	{
+		struct ipmi_timing_parms parms;
+
+		mutex_lock(&priv->recv_mutex);
+		parms.retries = priv->default_retries;
+		parms.retry_time_ms = priv->default_retry_time_ms;
+		mutex_unlock(&priv->recv_mutex);
+
+		if (copy_to_user(arg, &parms, sizeof(parms))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		mode = ipmi_get_maintenance_mode(priv->user);
+		if (copy_to_user(arg, &mode, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		if (copy_from_user(&mode, arg, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = ipmi_set_maintenance_mode(priv->user, mode);
+		break;
+	}
+
+	default:
+		rv = -ENOTTY;
+		break;
+	}
+  
+	return rv;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The following code contains code for supporting 32-bit compatible
+ * ioctls on 64-bit kernels.  This allows running 32-bit apps on the
+ * 64-bit kernel
+ */
+#define COMPAT_IPMICTL_SEND_COMMAND	\
+	_IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
+#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME	\
+	_IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
+#define COMPAT_IPMICTL_RECEIVE_MSG	\
+	_IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
+#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC	\
+	_IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
+
+struct compat_ipmi_msg {
+	u8		netfn;
+	u8		cmd;
+	u16		data_len;
+	compat_uptr_t	data;
+};
+
+struct compat_ipmi_req {
+	compat_uptr_t		addr;
+	compat_uint_t		addr_len;
+	compat_long_t		msgid;
+	struct compat_ipmi_msg	msg;
+};
+
+struct compat_ipmi_recv {
+	compat_int_t		recv_type;
+	compat_uptr_t		addr;
+	compat_uint_t		addr_len;
+	compat_long_t		msgid;
+	struct compat_ipmi_msg	msg;
+};
+
+struct compat_ipmi_req_settime {
+	struct compat_ipmi_req	req;
+	compat_int_t		retries;
+	compat_uint_t		retry_time_ms;
+};
+
+/*
+ * Define some helper functions for copying IPMI data
+ */
+static void get_compat_ipmi_msg(struct ipmi_msg *p64,
+				struct compat_ipmi_msg *p32)
+{
+	p64->netfn = p32->netfn;
+	p64->cmd = p32->cmd;
+	p64->data_len = p32->data_len;
+	p64->data = compat_ptr(p32->data);
+}
+
+static void get_compat_ipmi_req(struct ipmi_req *p64,
+				struct compat_ipmi_req *p32)
+{
+	p64->addr = compat_ptr(p32->addr);
+	p64->addr_len = p32->addr_len;
+	p64->msgid = p32->msgid;
+	get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+		struct compat_ipmi_req_settime *p32)
+{
+	get_compat_ipmi_req(&p64->req, &p32->req);
+	p64->retries = p32->retries;
+	p64->retry_time_ms = p32->retry_time_ms;
+}
+
+static void get_compat_ipmi_recv(struct ipmi_recv *p64,
+				 struct compat_ipmi_recv *p32)
+{
+	memset(p64, 0, sizeof(struct ipmi_recv));
+	p64->recv_type = p32->recv_type;
+	p64->addr = compat_ptr(p32->addr);
+	p64->addr_len = p32->addr_len;
+	p64->msgid = p32->msgid;
+	get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
+{
+	struct compat_ipmi_recv v32;
+	memset(&v32, 0, sizeof(struct compat_ipmi_recv));
+	v32.recv_type = p64->recv_type;
+	v32.addr = ptr_to_compat(p64->addr);
+	v32.addr_len = p64->addr_len;
+	v32.msgid = p64->msgid;
+	v32.msg.netfn = p64->msg.netfn;
+	v32.msg.cmd = p64->msg.cmd;
+	v32.msg.data_len = p64->msg.data_len;
+	v32.msg.data = ptr_to_compat(p64->msg.data);
+	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
+}
+
+/*
+ * Handle compatibility ioctls
+ */
+static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+			      unsigned long arg)
+{
+	struct ipmi_file_private *priv = filep->private_data;
+
+	switch(cmd) {
+	case COMPAT_IPMICTL_SEND_COMMAND:
+	{
+		struct ipmi_req	rp;
+		struct compat_ipmi_req r32;
+		int retries;
+		unsigned int retry_time_ms;
+
+		if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
+			return -EFAULT;
+
+		get_compat_ipmi_req(&rp, &r32);
+
+		mutex_lock(&priv->recv_mutex);
+		retries = priv->default_retries;
+		retry_time_ms = priv->default_retry_time_ms;
+		mutex_unlock(&priv->recv_mutex);
+
+		return handle_send_req(priv->user, &rp,
+				       retries, retry_time_ms);
+	}
+	case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
+	{
+		struct ipmi_req_settime	sp;
+		struct compat_ipmi_req_settime sp32;
+
+		if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
+			return -EFAULT;
+
+		get_compat_ipmi_req_settime(&sp, &sp32);
+
+		return handle_send_req(priv->user, &sp.req,
+				sp.retries, sp.retry_time_ms);
+	}
+	case COMPAT_IPMICTL_RECEIVE_MSG:
+	case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
+	{
+		struct ipmi_recv   recv64;
+		struct compat_ipmi_recv recv32;
+
+		if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
+			return -EFAULT;
+
+		get_compat_ipmi_recv(&recv64, &recv32);
+
+		return handle_recv(priv,
+				 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
+				 &recv64, copyout_recv32, compat_ptr(arg));
+	}
+	default:
+		return ipmi_ioctl(filep, cmd, arg);
+	}
+}
+#endif
+
+static const struct file_operations ipmi_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= ipmi_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = compat_ipmi_ioctl,
+#endif
+	.open		= ipmi_open,
+	.release	= ipmi_release,
+	.fasync		= ipmi_fasync,
+	.poll		= ipmi_poll,
+	.llseek		= noop_llseek,
+};
+
+#define DEVICE_NAME     "ipmidev"
+
+static int ipmi_major;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device.  By"
+		 " default, or if you set it to zero, it will choose the next"
+		 " available device.  Setting it to -1 will disable the"
+		 " interface.  Other values will set the major device number"
+		 " to that value.");
+
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+	dev_t            dev;
+	struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
+static struct class *ipmi_class;
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+	dev_t dev = MKDEV(ipmi_major, if_num);
+	struct ipmi_reg_list *entry;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		printk(KERN_ERR "ipmi_devintf: Unable to create the"
+		       " ipmi class device link\n");
+		return;
+	}
+	entry->dev = dev;
+
+	mutex_lock(&reg_list_mutex);
+	device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
+	list_add(&entry->link, &reg_list);
+	mutex_unlock(&reg_list_mutex);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+	dev_t dev = MKDEV(ipmi_major, if_num);
+	struct ipmi_reg_list *entry;
+
+	mutex_lock(&reg_list_mutex);
+	list_for_each_entry(entry, &reg_list, link) {
+		if (entry->dev == dev) {
+			list_del(&entry->link);
+			kfree(entry);
+			break;
+		}
+	}
+	device_destroy(ipmi_class, dev);
+	mutex_unlock(&reg_list_mutex);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_new_smi,
+	.smi_gone = ipmi_smi_gone,
+};
+
+static int __init init_ipmi_devintf(void)
+{
+	int rv;
+
+	if (ipmi_major < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "ipmi device interface\n");
+
+	ipmi_class = class_create(THIS_MODULE, "ipmi");
+	if (IS_ERR(ipmi_class)) {
+		printk(KERN_ERR "ipmi: can't register device class\n");
+		return PTR_ERR(ipmi_class);
+	}
+
+	rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+	if (rv < 0) {
+		class_destroy(ipmi_class);
+		printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
+		return rv;
+	}
+
+	if (ipmi_major == 0) {
+		ipmi_major = rv;
+	}
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+	if (rv) {
+		unregister_chrdev(ipmi_major, DEVICE_NAME);
+		class_destroy(ipmi_class);
+		printk(KERN_WARNING "ipmi: can't register smi watcher\n");
+		return rv;
+	}
+
+	return 0;
+}
+module_init(init_ipmi_devintf);
+
+static void __exit cleanup_ipmi(void)
+{
+	struct ipmi_reg_list *entry, *entry2;
+	mutex_lock(&reg_list_mutex);
+	list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+		list_del(&entry->link);
+		device_destroy(ipmi_class, entry->dev);
+		kfree(entry);
+	}
+	mutex_unlock(&reg_list_mutex);
+	class_destroy(ipmi_class);
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
new file mode 100644
index 0000000..e2c1438
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * A hack to create a platform device from a DMI entry.  This will
+ * allow autoloading of the IPMI drive based on SMBIOS entries.
+ */
+
+#include <linux/ipmi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include "ipmi_si_sm.h"
+#include "ipmi_dmi.h"
+
+#define IPMI_DMI_TYPE_KCS	0x01
+#define IPMI_DMI_TYPE_SMIC	0x02
+#define IPMI_DMI_TYPE_BT	0x03
+#define IPMI_DMI_TYPE_SSIF	0x04
+
+struct ipmi_dmi_info {
+	enum si_type si_type;
+	u32 flags;
+	unsigned long addr;
+	u8 slave_addr;
+	struct ipmi_dmi_info *next;
+};
+
+static struct ipmi_dmi_info *ipmi_dmi_infos;
+
+static int ipmi_dmi_nr __initdata;
+
+static void __init dmi_add_platform_ipmi(unsigned long base_addr,
+					 u32 flags,
+					 u8 slave_addr,
+					 int irq,
+					 int offset,
+					 int type)
+{
+	struct platform_device *pdev;
+	struct resource r[4];
+	unsigned int num_r = 1, size;
+	struct property_entry p[5];
+	unsigned int pidx = 0;
+	char *name, *override;
+	int rv;
+	enum si_type si_type;
+	struct ipmi_dmi_info *info;
+
+	memset(p, 0, sizeof(p));
+
+	name = "dmi-ipmi-si";
+	override = "ipmi_si";
+	switch (type) {
+	case IPMI_DMI_TYPE_SSIF:
+		name = "dmi-ipmi-ssif";
+		override = "ipmi_ssif";
+		offset = 1;
+		size = 1;
+		si_type = SI_TYPE_INVALID;
+		break;
+	case IPMI_DMI_TYPE_BT:
+		size = 3;
+		si_type = SI_BT;
+		break;
+	case IPMI_DMI_TYPE_KCS:
+		size = 2;
+		si_type = SI_KCS;
+		break;
+	case IPMI_DMI_TYPE_SMIC:
+		size = 2;
+		si_type = SI_SMIC;
+		break;
+	default:
+		pr_err("ipmi:dmi: Invalid IPMI type: %d\n", type);
+		return;
+	}
+
+	if (si_type != SI_TYPE_INVALID)
+		p[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", si_type);
+
+	p[pidx++] = PROPERTY_ENTRY_U8("slave-addr", slave_addr);
+	p[pidx++] = PROPERTY_ENTRY_U8("addr-source", SI_SMBIOS);
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		pr_warn("ipmi:dmi: Could not allocate dmi info\n");
+	} else {
+		info->si_type = si_type;
+		info->flags = flags;
+		info->addr = base_addr;
+		info->slave_addr = slave_addr;
+		info->next = ipmi_dmi_infos;
+		ipmi_dmi_infos = info;
+	}
+
+	pdev = platform_device_alloc(name, ipmi_dmi_nr);
+	if (!pdev) {
+		pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
+		return;
+	}
+	pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+					  override);
+	if (!pdev->driver_override)
+		goto err;
+
+	if (type == IPMI_DMI_TYPE_SSIF) {
+		p[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", base_addr);
+		goto add_properties;
+	}
+
+	memset(r, 0, sizeof(r));
+
+	r[0].start = base_addr;
+	r[0].end = r[0].start + offset - 1;
+	r[0].name = "IPMI Address 1";
+	r[0].flags = flags;
+
+	if (size > 1) {
+		r[1].start = r[0].start + offset;
+		r[1].end = r[1].start + offset - 1;
+		r[1].name = "IPMI Address 2";
+		r[1].flags = flags;
+		num_r++;
+	}
+
+	if (size > 2) {
+		r[2].start = r[1].start + offset;
+		r[2].end = r[2].start + offset - 1;
+		r[2].name = "IPMI Address 3";
+		r[2].flags = flags;
+		num_r++;
+	}
+
+	if (irq) {
+		r[num_r].start = irq;
+		r[num_r].end = irq;
+		r[num_r].name = "IPMI IRQ";
+		r[num_r].flags = IORESOURCE_IRQ;
+		num_r++;
+	}
+
+	rv = platform_device_add_resources(pdev, r, num_r);
+	if (rv) {
+		dev_err(&pdev->dev,
+			"ipmi:dmi: Unable to add resources: %d\n", rv);
+		goto err;
+	}
+
+add_properties:
+	rv = platform_device_add_properties(pdev, p);
+	if (rv) {
+		dev_err(&pdev->dev,
+			"ipmi:dmi: Unable to add properties: %d\n", rv);
+		goto err;
+	}
+
+	rv = platform_device_add(pdev);
+	if (rv) {
+		dev_err(&pdev->dev, "ipmi:dmi: Unable to add device: %d\n", rv);
+		goto err;
+	}
+
+	ipmi_dmi_nr++;
+	return;
+
+err:
+	platform_device_put(pdev);
+}
+
+/*
+ * Look up the slave address for a given interface.  This is here
+ * because ACPI doesn't have a slave address while SMBIOS does, but we
+ * prefer using ACPI so the ACPI code can use the IPMI namespace.
+ * This function allows an ACPI-specified IPMI device to look up the
+ * slave address from the DMI table.
+ */
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+			    unsigned long base_addr)
+{
+	struct ipmi_dmi_info *info = ipmi_dmi_infos;
+
+	while (info) {
+		if (info->si_type == si_type &&
+		    info->flags == flags &&
+		    info->addr == base_addr)
+			return info->slave_addr;
+		info = info->next;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_dmi_get_slave_addr);
+
+#define DMI_IPMI_MIN_LENGTH	0x10
+#define DMI_IPMI_VER2_LENGTH	0x12
+#define DMI_IPMI_TYPE		4
+#define DMI_IPMI_SLAVEADDR	6
+#define DMI_IPMI_ADDR		8
+#define DMI_IPMI_ACCESS		0x10
+#define DMI_IPMI_IRQ		0x11
+#define DMI_IPMI_IO_MASK	0xfffe
+
+static void __init dmi_decode_ipmi(const struct dmi_header *dm)
+{
+	const u8	*data = (const u8 *) dm;
+	u32             flags = IORESOURCE_IO;
+	unsigned long	base_addr;
+	u8              len = dm->length;
+	u8              slave_addr;
+	int             irq = 0, offset;
+	int             type;
+
+	if (len < DMI_IPMI_MIN_LENGTH)
+		return;
+
+	type = data[DMI_IPMI_TYPE];
+	slave_addr = data[DMI_IPMI_SLAVEADDR];
+
+	memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long));
+	if (len >= DMI_IPMI_VER2_LENGTH) {
+		if (type == IPMI_DMI_TYPE_SSIF) {
+			offset = 0;
+			flags = 0;
+			base_addr = data[DMI_IPMI_ADDR] >> 1;
+			if (base_addr == 0) {
+				/*
+				 * Some broken systems put the I2C address in
+				 * the slave address field.  We try to
+				 * accommodate them here.
+				 */
+				base_addr = data[DMI_IPMI_SLAVEADDR] >> 1;
+				slave_addr = 0;
+			}
+		} else {
+			if (base_addr & 1) {
+				/* I/O */
+				base_addr &= DMI_IPMI_IO_MASK;
+			} else {
+				/* Memory */
+				flags = IORESOURCE_MEM;
+			}
+
+			/*
+			 * If bit 4 of byte 0x10 is set, then the lsb
+			 * for the address is odd.
+			 */
+			base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1;
+
+			irq = data[DMI_IPMI_IRQ];
+
+			/*
+			 * The top two bits of byte 0x10 hold the
+			 * register spacing.
+			 */
+			switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) {
+			case 0: /* Byte boundaries */
+				offset = 1;
+				break;
+			case 1: /* 32-bit boundaries */
+				offset = 4;
+				break;
+			case 2: /* 16-byte boundaries */
+				offset = 16;
+				break;
+			default:
+				pr_err("ipmi:dmi: Invalid offset: 0\n");
+				return;
+			}
+		}
+	} else {
+		/* Old DMI spec. */
+		/*
+		 * Note that technically, the lower bit of the base
+		 * address should be 1 if the address is I/O and 0 if
+		 * the address is in memory.  So many systems get that
+		 * wrong (and all that I have seen are I/O) so we just
+		 * ignore that bit and assume I/O.  Systems that use
+		 * memory should use the newer spec, anyway.
+		 */
+		base_addr = base_addr & DMI_IPMI_IO_MASK;
+		offset = 1;
+	}
+
+	dmi_add_platform_ipmi(base_addr, flags, slave_addr, irq,
+			      offset, type);
+}
+
+static int __init scan_for_dmi_ipmi(void)
+{
+	const struct dmi_device *dev = NULL;
+
+	while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+		dmi_decode_ipmi((const struct dmi_header *) dev->device_data);
+
+	return 0;
+}
+subsys_initcall(scan_for_dmi_ipmi);
diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h
new file mode 100644
index 0000000..8d2b094
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DMI defines for use by IPMI
+ */
+
+#ifdef CONFIG_IPMI_DMI_DECODE
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+			    unsigned long base_addr);
+#endif
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 0000000..f4ea9f4
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim.  If you have questions about the states, see
+ * that document.
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* kcs_debug is a bit-field
+ *	KCS_DEBUG_ENABLE -	turned on for now
+ *	KCS_DEBUG_MSG    -	commands and their responses
+ *	KCS_DEBUG_STATES -	state machine
+ */
+#define KCS_DEBUG_STATES	4
+#define KCS_DEBUG_MSG		2
+#define	KCS_DEBUG_ENABLE	1
+
+static int kcs_debug;
+module_param(kcs_debug, int, 0644);
+MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+	/* The KCS interface is currently doing nothing. */
+	KCS_IDLE,
+
+	/*
+	 * We are starting an operation.  The data is in the output
+	 * buffer, but nothing has been done to the interface yet.  This
+	 * was added to the state machine in the spec to wait for the
+	 * initial IBF.
+	 */
+	KCS_START_OP,
+
+	/* We have written a write cmd to the interface. */
+	KCS_WAIT_WRITE_START,
+
+	/* We are writing bytes to the interface. */
+	KCS_WAIT_WRITE,
+
+	/*
+	 * We have written the write end cmd to the interface, and
+	 * still need to write the last byte.
+	 */
+	KCS_WAIT_WRITE_END,
+
+	/* We are waiting to read data from the interface. */
+	KCS_WAIT_READ,
+
+	/*
+	 * State to transition to the error handler, this was added to
+	 * the state machine in the spec to be sure IBF was there.
+	 */
+	KCS_ERROR0,
+
+	/*
+	 * First stage error handler, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR1,
+
+	/*
+	 * The abort cmd has been written, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR2,
+
+	/*
+	 * We wrote some data to the interface, wait for it to switch
+	 * to read mode.
+	 */
+	KCS_ERROR3,
+
+	/* The hardware failed to follow the state machine. */
+	KCS_HOSED
+};
+
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define MAX_ERROR_RETRIES 10
+#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
+
+struct si_sm_data {
+	enum kcs_states  state;
+	struct si_sm_io *io;
+	unsigned char    write_data[MAX_KCS_WRITE_SIZE];
+	int              write_pos;
+	int              write_count;
+	int              orig_write_count;
+	unsigned char    read_data[MAX_KCS_READ_SIZE];
+	int              read_pos;
+	int	         truncated;
+
+	unsigned int  error_retries;
+	long          ibf_timeout;
+	long          obf_timeout;
+	unsigned long  error0_timeout;
+};
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+				  struct si_sm_io *io)
+{
+	kcs->state = KCS_IDLE;
+	kcs->io = io;
+	kcs->write_pos = 0;
+	kcs->write_count = 0;
+	kcs->orig_write_count = 0;
+	kcs->read_pos = 0;
+	kcs->error_retries = 0;
+	kcs->truncated = 0;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+	/* Reserve 2 I/O bytes. */
+	return 2;
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+	return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+	return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+	kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+	kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT	0x60
+#define KCS_WRITE_START		0x61
+#define KCS_WRITE_END		0x62
+#define KCS_READ_BYTE		0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE	0
+#define KCS_READ_STATE	1
+#define KCS_WRITE_STATE	2
+#define KCS_ERROR_STATE	3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+	write_data(kcs, kcs->write_data[kcs->write_pos]);
+	(kcs->write_pos)++;
+	(kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+	(kcs->error_retries)++;
+	if (kcs->error_retries > MAX_ERROR_RETRIES) {
+		if (kcs_debug & KCS_DEBUG_ENABLE)
+			printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+			       reason);
+		kcs->state = KCS_HOSED;
+	} else {
+		kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
+		kcs->state = KCS_ERROR0;
+	}
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+	if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+		/* Throw the data away and mark it truncated. */
+		read_data(kcs);
+		kcs->truncated = 1;
+	} else {
+		kcs->read_data[kcs->read_pos] = read_data(kcs);
+		(kcs->read_pos)++;
+	}
+	write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+			    long time)
+{
+	if (GET_STATUS_IBF(status)) {
+		kcs->ibf_timeout -= time;
+		if (kcs->ibf_timeout < 0) {
+			start_error_recovery(kcs, "IBF not ready in time");
+			kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+			return 1;
+		}
+		return 0;
+	}
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+			    long time)
+{
+	if (!GET_STATUS_OBF(status)) {
+		kcs->obf_timeout -= time;
+		if (kcs->obf_timeout < 0) {
+			kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+			start_error_recovery(kcs, "OBF not ready in time");
+			return 1;
+		}
+		return 0;
+	}
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+	if (GET_STATUS_OBF(status))
+		read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+	kcs->write_count = kcs->orig_write_count;
+	kcs->write_pos = 0;
+	kcs->read_pos = 0;
+	kcs->state = KCS_WAIT_WRITE_START;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+				 unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_KCS_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (kcs_debug & KCS_DEBUG_MSG) {
+		printk(KERN_DEBUG "start_kcs_transaction -");
+		for (i = 0; i < size; i++)
+			printk(" %02x", (unsigned char) (data [i]));
+		printk("\n");
+	}
+	kcs->error_retries = 0;
+	memcpy(kcs->write_data, data, size);
+	kcs->write_count = size;
+	kcs->orig_write_count = size;
+	kcs->write_pos = 0;
+	kcs->read_pos = 0;
+	kcs->state = KCS_START_OP;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+			  unsigned int length)
+{
+	if (length < kcs->read_pos) {
+		kcs->read_pos = length;
+		kcs->truncated = 1;
+	}
+
+	memcpy(data, kcs->read_data, kcs->read_pos);
+
+	if ((length >= 3) && (kcs->read_pos < 3)) {
+		/* Guarantee that we return at least 3 bytes, with an
+		   error in the third byte if it is too short. */
+		data[2] = IPMI_ERR_UNSPECIFIED;
+		kcs->read_pos = 3;
+	}
+	if (kcs->truncated) {
+		/*
+		 * Report a truncated error.  We might overwrite
+		 * another error, but that's too bad, the user needs
+		 * to know it was truncated.
+		 */
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		kcs->truncated = 0;
+	}
+
+	return kcs->read_pos;
+}
+
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works.  Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+	unsigned char status;
+	unsigned char state;
+
+	status = read_status(kcs);
+
+	if (kcs_debug & KCS_DEBUG_STATES)
+		printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);
+
+	/* All states wait for ibf, so just do it here. */
+	if (!check_ibf(kcs, status, time))
+		return SI_SM_CALL_WITH_DELAY;
+
+	/* Just about everything looks at the KCS state, so grab that, too. */
+	state = GET_STATUS_STATE(status);
+
+	switch (kcs->state) {
+	case KCS_IDLE:
+		/* If there's and interrupt source, turn it off. */
+		clear_obf(kcs, status);
+
+		if (GET_STATUS_ATN(status))
+			return SI_SM_ATTN;
+		else
+			return SI_SM_IDLE;
+
+	case KCS_START_OP:
+		if (state != KCS_IDLE_STATE) {
+			start_error_recovery(kcs,
+					     "State machine not idle at start");
+			break;
+		}
+
+		clear_obf(kcs, status);
+		write_cmd(kcs, KCS_WRITE_START);
+		kcs->state = KCS_WAIT_WRITE_START;
+		break;
+
+	case KCS_WAIT_WRITE_START:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(
+				kcs,
+				"Not in write state at write start");
+			break;
+		}
+		read_data(kcs);
+		if (kcs->write_count == 1) {
+			write_cmd(kcs, KCS_WRITE_END);
+			kcs->state = KCS_WAIT_WRITE_END;
+		} else {
+			write_next_byte(kcs);
+			kcs->state = KCS_WAIT_WRITE;
+		}
+		break;
+
+	case KCS_WAIT_WRITE:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in write state for write");
+			break;
+		}
+		clear_obf(kcs, status);
+		if (kcs->write_count == 1) {
+			write_cmd(kcs, KCS_WRITE_END);
+			kcs->state = KCS_WAIT_WRITE_END;
+		} else {
+			write_next_byte(kcs);
+		}
+		break;
+
+	case KCS_WAIT_WRITE_END:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in write state"
+					     " for write end");
+			break;
+		}
+		clear_obf(kcs, status);
+		write_next_byte(kcs);
+		kcs->state = KCS_WAIT_READ;
+		break;
+
+	case KCS_WAIT_READ:
+		if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+			start_error_recovery(
+				kcs,
+				"Not in read or idle in read state");
+			break;
+		}
+
+		if (state == KCS_READ_STATE) {
+			if (!check_obf(kcs, status, time))
+				return SI_SM_CALL_WITH_DELAY;
+			read_next_byte(kcs);
+		} else {
+			/*
+			 * We don't implement this exactly like the state
+			 * machine in the spec.  Some broken hardware
+			 * does not write the final dummy byte to the
+			 * read register.  Thus obf will never go high
+			 * here.  We just go straight to idle, and we
+			 * handle clearing out obf in idle state if it
+			 * happens to come in.
+			 */
+			clear_obf(kcs, status);
+			kcs->orig_write_count = 0;
+			kcs->state = KCS_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+		break;
+
+	case KCS_ERROR0:
+		clear_obf(kcs, status);
+		status = read_status(kcs);
+		if (GET_STATUS_OBF(status))
+			/* controller isn't responding */
+			if (time_before(jiffies, kcs->error0_timeout))
+				return SI_SM_CALL_WITH_TICK_DELAY;
+		write_cmd(kcs, KCS_GET_STATUS_ABORT);
+		kcs->state = KCS_ERROR1;
+		break;
+
+	case KCS_ERROR1:
+		clear_obf(kcs, status);
+		write_data(kcs, 0);
+		kcs->state = KCS_ERROR2;
+		break;
+
+	case KCS_ERROR2:
+		if (state != KCS_READ_STATE) {
+			start_error_recovery(kcs,
+					     "Not in read state for error2");
+			break;
+		}
+		if (!check_obf(kcs, status, time))
+			return SI_SM_CALL_WITH_DELAY;
+
+		clear_obf(kcs, status);
+		write_data(kcs, KCS_READ_BYTE);
+		kcs->state = KCS_ERROR3;
+		break;
+
+	case KCS_ERROR3:
+		if (state != KCS_IDLE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in idle state for error3");
+			break;
+		}
+
+		if (!check_obf(kcs, status, time))
+			return SI_SM_CALL_WITH_DELAY;
+
+		clear_obf(kcs, status);
+		if (kcs->orig_write_count) {
+			restart_kcs_transaction(kcs);
+		} else {
+			kcs->state = KCS_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+		break;
+
+	case KCS_HOSED:
+		break;
+	}
+
+	if (kcs->state == KCS_HOSED) {
+		init_kcs_data(kcs, kcs->io);
+		return SI_SM_HOSED;
+	}
+
+	return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+	/*
+	 * It's impossible for the KCS status register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
+	if (read_status(kcs) == 0xff)
+		return 1;
+
+	return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+const struct si_sm_handlers kcs_smi_handlers = {
+	.init_data         = init_kcs_data,
+	.start_transaction = start_kcs_transaction,
+	.get_result        = get_kcs_result,
+	.event             = kcs_event,
+	.detect            = kcs_detect,
+	.cleanup           = kcs_cleanup,
+	.size              = kcs_size,
+};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 0000000..7fc9612
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,5102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
+
+#define PFX "IPMI message handler: "
+
+#define IPMI_DRIVER_VERSION "39.2"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(struct ipmi_smi *intf);
+static void need_waiter(struct ipmi_smi *intf);
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+			       struct ipmi_smi_msg *msg);
+
+#ifdef DEBUG
+static void ipmi_debug_msg(const char *title, unsigned char *data,
+			   unsigned int len)
+{
+	int i, pos;
+	char buf[100];
+
+	pos = snprintf(buf, sizeof(buf), "%s: ", title);
+	for (i = 0; i < len; i++)
+		pos += snprintf(buf + pos, sizeof(buf) - pos,
+				" %2.2x", data[i]);
+	pr_debug("%s\n", buf);
+}
+#else
+static void ipmi_debug_msg(const char *title, unsigned char *data,
+			   unsigned int len)
+{ }
+#endif
+
+static int initialized;
+
+enum ipmi_panic_event_op {
+	IPMI_SEND_PANIC_EVENT_NONE,
+	IPMI_SEND_PANIC_EVENT,
+	IPMI_SEND_PANIC_EVENT_STRING
+};
+#ifdef CONFIG_IPMI_PANIC_STRING
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
+#elif defined(CONFIG_IPMI_PANIC_EVENT)
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
+#else
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
+#endif
+static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
+
+static int panic_op_write_handler(const char *val,
+				  const struct kernel_param *kp)
+{
+	char valcp[16];
+	char *s;
+
+	strncpy(valcp, val, 15);
+	valcp[15] = '\0';
+
+	s = strstrip(valcp);
+
+	if (strcmp(s, "none") == 0)
+		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
+	else if (strcmp(s, "event") == 0)
+		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
+	else if (strcmp(s, "string") == 0)
+		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
+{
+	switch (ipmi_send_panic_event) {
+	case IPMI_SEND_PANIC_EVENT_NONE:
+		strcpy(buffer, "none");
+		break;
+
+	case IPMI_SEND_PANIC_EVENT:
+		strcpy(buffer, "event");
+		break;
+
+	case IPMI_SEND_PANIC_EVENT_STRING:
+		strcpy(buffer, "string");
+		break;
+
+	default:
+		strcpy(buffer, "???");
+		break;
+	}
+
+	return strlen(buffer);
+}
+
+static const struct kernel_param_ops panic_op_ops = {
+	.set = panic_op_write_handler,
+	.get = panic_op_read_handler
+};
+module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
+MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
+
+
+#define MAX_EVENTS_IN_QUEUE	25
+
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+static unsigned long maintenance_mode_timeout_ms = 30000;
+module_param(maintenance_mode_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(maintenance_mode_timeout_ms,
+		 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
+
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer.  This is in milliseconds.
+ */
+#define MAX_MSG_TIMEOUT		60000
+
+/*
+ * Timeout times below are in milliseconds, and are done off a 1
+ * second timer.  So setting the value to 1000 would mean anything
+ * between 0 and 1000ms.  So really the only reasonable minimum
+ * setting it 2000ms, which is between 1 and 2 seconds.
+ */
+
+/* The default timeout for message retries. */
+static unsigned long default_retry_ms = 2000;
+module_param(default_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_retry_ms,
+		 "The time (milliseconds) between retry sends");
+
+/* The default timeout for maintenance mode message retries. */
+static unsigned long default_maintenance_retry_ms = 3000;
+module_param(default_maintenance_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_maintenance_retry_ms,
+		 "The time (milliseconds) between retry sends in maintenance mode");
+
+/* The default maximum number of retries */
+static unsigned int default_max_retries = 4;
+module_param(default_max_retries, uint, 0644);
+MODULE_PARM_DESC(default_max_retries,
+		 "The time (milliseconds) between retry sends in maintenance mode");
+
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME	1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
+
+/* How long should we cache dynamic device IDs? */
+#define IPMI_DYN_DEV_ID_EXPIRY	(10 * HZ)
+
+/*
+ * The main "user" data structure.
+ */
+struct ipmi_user {
+	struct list_head link;
+
+	/*
+	 * Set to NULL when the user is destroyed, a pointer to myself
+	 * so srcu_dereference can be used on it.
+	 */
+	struct ipmi_user *self;
+	struct srcu_struct release_barrier;
+
+	struct kref refcount;
+
+	/* The upper layer that handles receive messages. */
+	const struct ipmi_user_hndl *handler;
+	void             *handler_data;
+
+	/* The interface this user is bound to. */
+	struct ipmi_smi *intf;
+
+	/* Does this interface receive IPMI events? */
+	bool gets_events;
+};
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
+	__acquires(user->release_barrier)
+{
+	struct ipmi_user *ruser;
+
+	*index = srcu_read_lock(&user->release_barrier);
+	ruser = srcu_dereference(user->self, &user->release_barrier);
+	if (!ruser)
+		srcu_read_unlock(&user->release_barrier, *index);
+	return ruser;
+}
+
+static void release_ipmi_user(struct ipmi_user *user, int index)
+{
+	srcu_read_unlock(&user->release_barrier, index);
+}
+
+struct cmd_rcvr {
+	struct list_head link;
+
+	struct ipmi_user *user;
+	unsigned char netfn;
+	unsigned char cmd;
+	unsigned int  chans;
+
+	/*
+	 * This is used to form a linked lised during mass deletion.
+	 * Since this is in an RCU list, we cannot use the link above
+	 * or change any data until the RCU period completes.  So we
+	 * use this next variable during mass deletion so we can have
+	 * a list and don't have to wait and restart the search on
+	 * every individual deletion of a command.
+	 */
+	struct cmd_rcvr *next;
+};
+
+struct seq_table {
+	unsigned int         inuse : 1;
+	unsigned int         broadcast : 1;
+
+	unsigned long        timeout;
+	unsigned long        orig_timeout;
+	unsigned int         retries_left;
+
+	/*
+	 * To verify on an incoming send message response that this is
+	 * the message that the response is for, we keep a sequence id
+	 * and increment it every time we send a message.
+	 */
+	long                 seqid;
+
+	/*
+	 * This is held so we can properly respond to the message on a
+	 * timeout, and it is used to hold the temporary data for
+	 * retransmission, too.
+	 */
+	struct ipmi_recv_msg *recv_msg;
+};
+
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+	do {								\
+		seq = (((msgid) >> 26) & 0x3f);				\
+		seqid = ((msgid) & 0x3ffffff);				\
+	} while (0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+
+#define IPMI_MAX_CHANNELS       16
+struct ipmi_channel {
+	unsigned char medium;
+	unsigned char protocol;
+};
+
+struct ipmi_channel_set {
+	struct ipmi_channel c[IPMI_MAX_CHANNELS];
+};
+
+struct ipmi_my_addrinfo {
+	/*
+	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
+	 * but may be changed by the user.
+	 */
+	unsigned char address;
+
+	/*
+	 * My LUN.  This should generally stay the SMS LUN, but just in
+	 * case...
+	 */
+	unsigned char lun;
+};
+
+/*
+ * Note that the product id, manufacturer id, guid, and device id are
+ * immutable in this structure, so dyn_mutex is not required for
+ * accessing those.  If those change on a BMC, a new BMC is allocated.
+ */
+struct bmc_device {
+	struct platform_device pdev;
+	struct list_head       intfs; /* Interfaces on this BMC. */
+	struct ipmi_device_id  id;
+	struct ipmi_device_id  fetch_id;
+	int                    dyn_id_set;
+	unsigned long          dyn_id_expiry;
+	struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
+	guid_t                 guid;
+	guid_t                 fetch_guid;
+	int                    dyn_guid_set;
+	struct kref	       usecount;
+	struct work_struct     remove_work;
+};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+			     struct ipmi_device_id *id,
+			     bool *guid_set, guid_t *guid);
+
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+	/* Commands we got from the user that were invalid. */
+	IPMI_STAT_sent_invalid_commands = 0,
+
+	/* Commands we sent to the MC. */
+	IPMI_STAT_sent_local_commands,
+
+	/* Responses from the MC that were delivered to a user. */
+	IPMI_STAT_handled_local_responses,
+
+	/* Responses from the MC that were not delivered to a user. */
+	IPMI_STAT_unhandled_local_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_ipmb_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_ipmb_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_ipmb_commands,
+
+	/*
+	 * This is like above, but for broadcasts.  Broadcasts are
+	 * *not* included in the above count (they are expected to
+	 * time out).
+	 */
+	IPMI_STAT_timed_out_ipmb_broadcasts,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_ipmb_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_ipmb_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_ipmb_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_lan_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_lan_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_lan_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_lan_commands,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_lan_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_lan_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_lan_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_lan_responses,
+
+	/* The command was delivered to the user. */
+	IPMI_STAT_handled_commands,
+
+	/* The command had invalid data in it. */
+	IPMI_STAT_invalid_commands,
+
+	/* The command didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_commands,
+
+	/* Invalid data in an event. */
+	IPMI_STAT_invalid_events,
+
+	/* Events that were received with the proper format. */
+	IPMI_STAT_events,
+
+	/* Retransmissions on IPMB that failed. */
+	IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+	/* Retransmissions on LAN that failed. */
+	IPMI_STAT_dropped_rexmit_lan_commands,
+
+	/* This *must* remain last, add new values above this. */
+	IPMI_NUM_STATS
+};
+
+
+#define IPMI_IPMB_NUM_SEQ	64
+struct ipmi_smi {
+	/* What interface number are we? */
+	int intf_num;
+
+	struct kref refcount;
+
+	/* Set when the interface is being unregistered. */
+	bool in_shutdown;
+
+	/* Used for a list of interfaces. */
+	struct list_head link;
+
+	/*
+	 * The list of upper layers that are using me.  seq_lock write
+	 * protects this.  Read protection is with srcu.
+	 */
+	struct list_head users;
+	struct srcu_struct users_srcu;
+
+	/* Used for wake ups at startup. */
+	wait_queue_head_t waitq;
+
+	/*
+	 * Prevents the interface from being unregistered when the
+	 * interface is used by being looked up through the BMC
+	 * structure.
+	 */
+	struct mutex bmc_reg_mutex;
+
+	struct bmc_device tmp_bmc;
+	struct bmc_device *bmc;
+	bool bmc_registered;
+	struct list_head bmc_link;
+	char *my_dev_name;
+	bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
+	struct work_struct bmc_reg_work;
+
+	const struct ipmi_smi_handlers *handlers;
+	void                     *send_info;
+
+	/* Driver-model device for the system interface. */
+	struct device          *si_dev;
+
+	/*
+	 * A table of sequence numbers for this interface.  We use the
+	 * sequence numbers for IPMB messages that go out of the
+	 * interface to match them up with their responses.  A routine
+	 * is called periodically to time the items in this list.
+	 */
+	spinlock_t       seq_lock;
+	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+	int curr_seq;
+
+	/*
+	 * Messages queued for delivery.  If delivery fails (out of memory
+	 * for instance), They will stay in here to be processed later in a
+	 * periodic timer interrupt.  The tasklet is for handling received
+	 * messages directly from the handler.
+	 */
+	spinlock_t       waiting_rcv_msgs_lock;
+	struct list_head waiting_rcv_msgs;
+	atomic_t	 watchdog_pretimeouts_to_deliver;
+	struct tasklet_struct recv_tasklet;
+
+	spinlock_t             xmit_msgs_lock;
+	struct list_head       xmit_msgs;
+	struct ipmi_smi_msg    *curr_msg;
+	struct list_head       hp_xmit_msgs;
+
+	/*
+	 * The list of command receivers that are registered for commands
+	 * on this interface.
+	 */
+	struct mutex     cmd_rcvrs_mutex;
+	struct list_head cmd_rcvrs;
+
+	/*
+	 * Events that were queues because no one was there to receive
+	 * them.
+	 */
+	spinlock_t       events_lock; /* For dealing with event stuff. */
+	struct list_head waiting_events;
+	unsigned int     waiting_events_count; /* How many events in queue? */
+	char             delivering_events;
+	char             event_msg_printed;
+	atomic_t         event_waiters;
+	unsigned int     ticks_to_req_ev;
+	int              last_needs_timer;
+
+	/*
+	 * The event receiver for my BMC, only really used at panic
+	 * shutdown as a place to store this.
+	 */
+	unsigned char event_receiver;
+	unsigned char event_receiver_lun;
+	unsigned char local_sel_device;
+	unsigned char local_event_generator;
+
+	/* For handling of maintenance mode. */
+	int maintenance_mode;
+	bool maintenance_mode_enable;
+	int auto_maintenance_timeout;
+	spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
+	/*
+	 * If we are doing maintenance on something on IPMB, extend
+	 * the timeout time to avoid timeouts writing firmware and
+	 * such.
+	 */
+	int ipmb_maintenance_mode_timeout;
+
+	/*
+	 * A cheap hack, if this is non-null and a message to an
+	 * interface comes in with a NULL user, call this routine with
+	 * it.  Note that the message will still be freed by the
+	 * caller.  This only works on the system interface.
+	 *
+	 * Protected by bmc_reg_mutex.
+	 */
+	void (*null_user_handler)(struct ipmi_smi *intf,
+				  struct ipmi_recv_msg *msg);
+
+	/*
+	 * When we are scanning the channels for an SMI, this will
+	 * tell which channel we are scanning.
+	 */
+	int curr_channel;
+
+	/* Channel information */
+	struct ipmi_channel_set *channel_list;
+	unsigned int curr_working_cset; /* First index into the following. */
+	struct ipmi_channel_set wchannels[2];
+	struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
+	bool channels_ready;
+
+	atomic_t stats[IPMI_NUM_STATS];
+
+	/*
+	 * run_to_completion duplicate of smb_info, smi_info
+	 * and ipmi_serial_info structures. Used to decrease numbers of
+	 * parameters passed by "low" level IPMI code.
+	 */
+	int run_to_completion;
+};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+
+static void __get_guid(struct ipmi_smi *intf);
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+			       struct ipmi_device_id *id,
+			       bool guid_set, guid_t *guid, int intf_num);
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+
+
+/**
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct platform_driver ipmidriver = {
+	.driver = {
+		.name = "ipmi",
+		.bus = &platform_bus_type
+	}
+};
+/*
+ * This mutex keeps us from adding the same BMC twice.
+ */
+static DEFINE_MUTEX(ipmidriver_mutex);
+
+static LIST_HEAD(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
+DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
+
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
+static LIST_HEAD(smi_watchers);
+static DEFINE_MUTEX(smi_watchers_mutex);
+
+#define ipmi_inc_stat(intf, stat) \
+	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+static const char * const addr_src_to_str[] = {
+	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
+	"device-tree", "platform"
+};
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+	if (src >= SI_LAST)
+		src = 0; /* Invalid */
+	return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+	struct ipmi_recv_msg *msg, *msg2;
+
+	list_for_each_entry_safe(msg, msg2, q, link) {
+		list_del(&msg->link);
+		ipmi_free_recv_msg(msg);
+	}
+}
+
+static void free_smi_msg_list(struct list_head *q)
+{
+	struct ipmi_smi_msg *msg, *msg2;
+
+	list_for_each_entry_safe(msg, msg2, q, link) {
+		list_del(&msg->link);
+		ipmi_free_smi_msg(msg);
+	}
+}
+
+static void clean_up_interface_data(struct ipmi_smi *intf)
+{
+	int              i;
+	struct cmd_rcvr  *rcvr, *rcvr2;
+	struct list_head list;
+
+	tasklet_kill(&intf->recv_tasklet);
+
+	free_smi_msg_list(&intf->waiting_rcv_msgs);
+	free_recv_msg_list(&intf->waiting_events);
+
+	/*
+	 * Wholesale remove all the entries from the list in the
+	 * interface and wait for RCU to know that none are in use.
+	 */
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	INIT_LIST_HEAD(&list);
+	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+
+	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+		kfree(rcvr);
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		if ((intf->seq_table[i].inuse)
+					&& (intf->seq_table[i].recv_msg))
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+	}
+}
+
+static void intf_free(struct kref *ref)
+{
+	struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
+
+	clean_up_interface_data(intf);
+	kfree(intf);
+}
+
+struct watcher_entry {
+	int              intf_num;
+	struct ipmi_smi  *intf;
+	struct list_head link;
+};
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+	struct ipmi_smi *intf;
+	int index;
+
+	mutex_lock(&smi_watchers_mutex);
+
+	list_add(&watcher->link, &smi_watchers);
+
+	index = srcu_read_lock(&ipmi_interfaces_srcu);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		int intf_num = READ_ONCE(intf->intf_num);
+
+		if (intf_num == -1)
+			continue;
+		watcher->new_smi(intf_num, intf->si_dev);
+	}
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+	mutex_unlock(&smi_watchers_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+	mutex_lock(&smi_watchers_mutex);
+	list_del(&watcher->link);
+	mutex_unlock(&smi_watchers_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
+static void
+call_smi_watchers(int i, struct device *dev)
+{
+	struct ipmi_smi_watcher *w;
+
+	mutex_lock(&smi_watchers_mutex);
+	list_for_each_entry(w, &smi_watchers, link) {
+		if (try_module_get(w->owner)) {
+			w->new_smi(i, dev);
+			module_put(w->owner);
+		}
+	}
+	mutex_unlock(&smi_watchers_mutex);
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+	if (addr1->addr_type != addr2->addr_type)
+		return 0;
+
+	if (addr1->channel != addr2->channel)
+		return 0;
+
+	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		struct ipmi_system_interface_addr *smi_addr1
+		    = (struct ipmi_system_interface_addr *) addr1;
+		struct ipmi_system_interface_addr *smi_addr2
+		    = (struct ipmi_system_interface_addr *) addr2;
+		return (smi_addr1->lun == smi_addr2->lun);
+	}
+
+	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
+		struct ipmi_ipmb_addr *ipmb_addr1
+		    = (struct ipmi_ipmb_addr *) addr1;
+		struct ipmi_ipmb_addr *ipmb_addr2
+		    = (struct ipmi_ipmb_addr *) addr2;
+
+		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+			&& (ipmb_addr1->lun == ipmb_addr2->lun));
+	}
+
+	if (is_lan_addr(addr1)) {
+		struct ipmi_lan_addr *lan_addr1
+			= (struct ipmi_lan_addr *) addr1;
+		struct ipmi_lan_addr *lan_addr2
+		    = (struct ipmi_lan_addr *) addr2;
+
+		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
+			&& (lan_addr1->session_handle
+			    == lan_addr2->session_handle)
+			&& (lan_addr1->lun == lan_addr2->lun));
+	}
+
+	return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+	if (len < sizeof(struct ipmi_system_interface_addr))
+		return -EINVAL;
+
+	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		if (addr->channel != IPMI_BMC_CHANNEL)
+			return -EINVAL;
+		return 0;
+	}
+
+	if ((addr->channel == IPMI_BMC_CHANNEL)
+	    || (addr->channel >= IPMI_MAX_CHANNELS)
+	    || (addr->channel < 0))
+		return -EINVAL;
+
+	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+		if (len < sizeof(struct ipmi_ipmb_addr))
+			return -EINVAL;
+		return 0;
+	}
+
+	if (is_lan_addr(addr)) {
+		if (len < sizeof(struct ipmi_lan_addr))
+			return -EINVAL;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(ipmi_validate_addr);
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+		return sizeof(struct ipmi_system_interface_addr);
+
+	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+		return sizeof(struct ipmi_ipmb_addr);
+
+	if (addr_type == IPMI_LAN_ADDR_TYPE)
+		return sizeof(struct ipmi_lan_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_addr_length);
+
+static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+	int rv = 0;
+
+	if (!msg->user) {
+		/* Special handling for NULL users. */
+		if (intf->null_user_handler) {
+			intf->null_user_handler(intf, msg);
+		} else {
+			/* No handler, so give up. */
+			rv = -EINVAL;
+		}
+		ipmi_free_recv_msg(msg);
+	} else if (!oops_in_progress) {
+		/*
+		 * If we are running in the panic context, calling the
+		 * receive handler doesn't much meaning and has a deadlock
+		 * risk.  At this moment, simply skip it in that case.
+		 */
+		int index;
+		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+
+		if (user) {
+			user->handler->ipmi_recv_hndl(msg, user->handler_data);
+			release_ipmi_user(msg->user, index);
+		} else {
+			/* User went away, give up. */
+			ipmi_free_recv_msg(msg);
+			rv = -EINVAL;
+		}
+	}
+
+	return rv;
+}
+
+static void deliver_local_response(struct ipmi_smi *intf,
+				   struct ipmi_recv_msg *msg)
+{
+	if (deliver_response(intf, msg))
+		ipmi_inc_stat(intf, unhandled_local_responses);
+	else
+		ipmi_inc_stat(intf, handled_local_responses);
+}
+
+static void deliver_err_response(struct ipmi_smi *intf,
+				 struct ipmi_recv_msg *msg, int err)
+{
+	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	msg->msg_data[0] = err;
+	msg->msg.netfn |= 1; /* Convert to a response. */
+	msg->msg.data_len = 1;
+	msg->msg.data = msg->msg_data;
+	deliver_local_response(intf, msg);
+}
+
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table.  This must be
+ * called with the interface's seq_lock held.
+ */
+static int intf_next_seq(struct ipmi_smi      *intf,
+			 struct ipmi_recv_msg *recv_msg,
+			 unsigned long        timeout,
+			 int                  retries,
+			 int                  broadcast,
+			 unsigned char        *seq,
+			 long                 *seqid)
+{
+	int          rv = 0;
+	unsigned int i;
+
+	if (timeout == 0)
+		timeout = default_retry_ms;
+	if (retries < 0)
+		retries = default_max_retries;
+
+	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
+		if (!intf->seq_table[i].inuse)
+			break;
+	}
+
+	if (!intf->seq_table[i].inuse) {
+		intf->seq_table[i].recv_msg = recv_msg;
+
+		/*
+		 * Start with the maximum timeout, when the send response
+		 * comes in we will start the real timer.
+		 */
+		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+		intf->seq_table[i].orig_timeout = timeout;
+		intf->seq_table[i].retries_left = retries;
+		intf->seq_table[i].broadcast = broadcast;
+		intf->seq_table[i].inuse = 1;
+		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+		*seq = i;
+		*seqid = intf->seq_table[i].seqid;
+		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+		need_waiter(intf);
+	} else {
+		rv = -EAGAIN;
+	}
+
+	return rv;
+}
+
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused.  Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
+static int intf_find_seq(struct ipmi_smi      *intf,
+			 unsigned char        seq,
+			 short                channel,
+			 unsigned char        cmd,
+			 unsigned char        netfn,
+			 struct ipmi_addr     *addr,
+			 struct ipmi_recv_msg **recv_msg)
+{
+	int           rv = -ENODEV;
+	unsigned long flags;
+
+	if (seq >= IPMI_IPMB_NUM_SEQ)
+		return -EINVAL;
+
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	if (intf->seq_table[seq].inuse) {
+		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+				&& (msg->msg.netfn == netfn)
+				&& (ipmi_addr_equal(addr, &msg->addr))) {
+			*recv_msg = msg;
+			intf->seq_table[seq].inuse = 0;
+			rv = 0;
+		}
+	}
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(struct ipmi_smi *intf,
+				long       msgid)
+{
+	int           rv = -ENODEV;
+	unsigned long flags;
+	unsigned char seq;
+	unsigned long seqid;
+
+
+	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
+	if ((intf->seq_table[seq].inuse)
+				&& (intf->seq_table[seq].seqid == seqid)) {
+		struct seq_table *ent = &intf->seq_table[seq];
+		ent->timeout = ent->orig_timeout;
+		rv = 0;
+	}
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(struct ipmi_smi *intf,
+			long         msgid,
+			unsigned int err)
+{
+	int                  rv = -ENODEV;
+	unsigned long        flags;
+	unsigned char        seq;
+	unsigned long        seqid;
+	struct ipmi_recv_msg *msg = NULL;
+
+
+	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
+	if ((intf->seq_table[seq].inuse)
+				&& (intf->seq_table[seq].seqid == seqid)) {
+		struct seq_table *ent = &intf->seq_table[seq];
+
+		ent->inuse = 0;
+		msg = ent->recv_msg;
+		rv = 0;
+	}
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	if (msg)
+		deliver_err_response(intf, msg, err);
+
+	return rv;
+}
+
+
+int ipmi_create_user(unsigned int          if_num,
+		     const struct ipmi_user_hndl *handler,
+		     void                  *handler_data,
+		     struct ipmi_user      **user)
+{
+	unsigned long flags;
+	struct ipmi_user *new_user;
+	int           rv = 0, index;
+	struct ipmi_smi *intf;
+
+	/*
+	 * There is no module usecount here, because it's not
+	 * required.  Since this can only be used by and called from
+	 * other modules, they will implicitly use this module, and
+	 * thus this can't be removed unless the other modules are
+	 * removed.
+	 */
+
+	if (handler == NULL)
+		return -EINVAL;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	if (!initialized) {
+		rv = ipmi_init_msghandler();
+		if (rv)
+			return rv;
+
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
+		if (!initialized)
+			return -ENODEV;
+	}
+
+	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+	if (!new_user)
+		return -ENOMEM;
+
+	index = srcu_read_lock(&ipmi_interfaces_srcu);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	goto out_kfree;
+
+ found:
+	rv = init_srcu_struct(&new_user->release_barrier);
+	if (rv)
+		goto out_kfree;
+
+	/* Note that each existing user holds a refcount to the interface. */
+	kref_get(&intf->refcount);
+
+	kref_init(&new_user->refcount);
+	new_user->handler = handler;
+	new_user->handler_data = handler_data;
+	new_user->intf = intf;
+	new_user->gets_events = false;
+
+	rcu_assign_pointer(new_user->self, new_user);
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	list_add_rcu(&new_user->link, &intf->users);
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+	if (handler->ipmi_watchdog_pretimeout) {
+		/* User wants pretimeouts, so make sure to watch for them. */
+		if (atomic_inc_return(&intf->event_waiters) == 1)
+			need_waiter(intf);
+	}
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+	*user = new_user;
+	return 0;
+
+out_kfree:
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+	kfree(new_user);
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_create_user);
+
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+	int rv, index;
+	struct ipmi_smi *intf;
+
+	index = srcu_read_lock(&ipmi_interfaces_srcu);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+	/* Not found, return an error */
+	return -EINVAL;
+
+found:
+	if (!intf->handlers->get_smi_info)
+		rv = -ENOTTY;
+	else
+		rv = intf->handlers->get_smi_info(intf->send_info, data);
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
+static void free_user(struct kref *ref)
+{
+	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+	kfree(user);
+}
+
+static void _ipmi_destroy_user(struct ipmi_user *user)
+{
+	struct ipmi_smi  *intf = user->intf;
+	int              i;
+	unsigned long    flags;
+	struct cmd_rcvr  *rcvr;
+	struct cmd_rcvr  *rcvrs = NULL;
+
+	if (!acquire_ipmi_user(user, &i)) {
+		/*
+		 * The user has already been cleaned up, just make sure
+		 * nothing is using it and return.
+		 */
+		synchronize_srcu(&user->release_barrier);
+		return;
+	}
+
+	rcu_assign_pointer(user->self, NULL);
+	release_ipmi_user(user, i);
+
+	synchronize_srcu(&user->release_barrier);
+
+	if (user->handler->shutdown)
+		user->handler->shutdown(user->handler_data);
+
+	if (user->handler->ipmi_watchdog_pretimeout)
+		atomic_dec(&intf->event_waiters);
+
+	if (user->gets_events)
+		atomic_dec(&intf->event_waiters);
+
+	/* Remove the user from the interface's sequence table. */
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	list_del_rcu(&user->link);
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		if (intf->seq_table[i].inuse
+		    && (intf->seq_table[i].recv_msg->user == user)) {
+			intf->seq_table[i].inuse = 0;
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+		}
+	}
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	/*
+	 * Remove the user from the command receiver's table.  First
+	 * we build a list of everything (not using the standard link,
+	 * since other things may be using it till we do
+	 * synchronize_srcu()) then free everything in that list.
+	 */
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if (rcvr->user == user) {
+			list_del_rcu(&rcvr->link);
+			rcvr->next = rcvrs;
+			rcvrs = rcvr;
+		}
+	}
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	synchronize_rcu();
+	while (rcvrs) {
+		rcvr = rcvrs;
+		rcvrs = rcvr->next;
+		kfree(rcvr);
+	}
+
+	kref_put(&intf->refcount, intf_free);
+}
+
+int ipmi_destroy_user(struct ipmi_user *user)
+{
+	_ipmi_destroy_user(user);
+
+	cleanup_srcu_struct(&user->release_barrier);
+	kref_put(&user->refcount, free_user);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_destroy_user);
+
+int ipmi_get_version(struct ipmi_user *user,
+		     unsigned char *major,
+		     unsigned char *minor)
+{
+	struct ipmi_device_id id;
+	int rv, index;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
+	if (!rv) {
+		*major = ipmi_version_major(&id);
+		*minor = ipmi_version_minor(&id);
+	}
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_version);
+
+int ipmi_set_my_address(struct ipmi_user *user,
+			unsigned int  channel,
+			unsigned char address)
+{
+	int index, rv = 0;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	if (channel >= IPMI_MAX_CHANNELS)
+		rv = -EINVAL;
+	else
+		user->intf->addrinfo[channel].address = address;
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_set_my_address);
+
+int ipmi_get_my_address(struct ipmi_user *user,
+			unsigned int  channel,
+			unsigned char *address)
+{
+	int index, rv = 0;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	if (channel >= IPMI_MAX_CHANNELS)
+		rv = -EINVAL;
+	else
+		*address = user->intf->addrinfo[channel].address;
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_address);
+
+int ipmi_set_my_LUN(struct ipmi_user *user,
+		    unsigned int  channel,
+		    unsigned char LUN)
+{
+	int index, rv = 0;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	if (channel >= IPMI_MAX_CHANNELS)
+		rv = -EINVAL;
+	else
+		user->intf->addrinfo[channel].lun = LUN & 0x3;
+	release_ipmi_user(user, index);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+
+int ipmi_get_my_LUN(struct ipmi_user *user,
+		    unsigned int  channel,
+		    unsigned char *address)
+{
+	int index, rv = 0;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	if (channel >= IPMI_MAX_CHANNELS)
+		rv = -EINVAL;
+	else
+		*address = user->intf->addrinfo[channel].lun;
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+
+int ipmi_get_maintenance_mode(struct ipmi_user *user)
+{
+	int mode, index;
+	unsigned long flags;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+	mode = user->intf->maintenance_mode;
+	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+	release_ipmi_user(user, index);
+
+	return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(struct ipmi_smi *intf)
+{
+	if (intf->handlers->set_maintenance_mode)
+		intf->handlers->set_maintenance_mode(
+			intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
+{
+	int rv = 0, index;
+	unsigned long flags;
+	struct ipmi_smi *intf = user->intf;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+	if (intf->maintenance_mode != mode) {
+		switch (mode) {
+		case IPMI_MAINTENANCE_MODE_AUTO:
+			intf->maintenance_mode_enable
+				= (intf->auto_maintenance_timeout > 0);
+			break;
+
+		case IPMI_MAINTENANCE_MODE_OFF:
+			intf->maintenance_mode_enable = false;
+			break;
+
+		case IPMI_MAINTENANCE_MODE_ON:
+			intf->maintenance_mode_enable = true;
+			break;
+
+		default:
+			rv = -EINVAL;
+			goto out_unlock;
+		}
+		intf->maintenance_mode = mode;
+
+		maintenance_mode_update(intf);
+	}
+ out_unlock:
+	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
+int ipmi_set_gets_events(struct ipmi_user *user, bool val)
+{
+	unsigned long        flags;
+	struct ipmi_smi      *intf = user->intf;
+	struct ipmi_recv_msg *msg, *msg2;
+	struct list_head     msgs;
+	int index;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	INIT_LIST_HEAD(&msgs);
+
+	spin_lock_irqsave(&intf->events_lock, flags);
+	if (user->gets_events == val)
+		goto out;
+
+	user->gets_events = val;
+
+	if (val) {
+		if (atomic_inc_return(&intf->event_waiters) == 1)
+			need_waiter(intf);
+	} else {
+		atomic_dec(&intf->event_waiters);
+	}
+
+	if (intf->delivering_events)
+		/*
+		 * Another thread is delivering events for this, so
+		 * let it handle any new events.
+		 */
+		goto out;
+
+	/* Deliver any queued events. */
+	while (user->gets_events && !list_empty(&intf->waiting_events)) {
+		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+			list_move_tail(&msg->link, &msgs);
+		intf->waiting_events_count = 0;
+		if (intf->event_msg_printed) {
+			dev_warn(intf->si_dev,
+				 PFX "Event queue no longer full\n");
+			intf->event_msg_printed = 0;
+		}
+
+		intf->delivering_events = 1;
+		spin_unlock_irqrestore(&intf->events_lock, flags);
+
+		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+			msg->user = user;
+			kref_get(&user->refcount);
+			deliver_local_response(intf, msg);
+		}
+
+		spin_lock_irqsave(&intf->events_lock, flags);
+		intf->delivering_events = 0;
+	}
+
+ out:
+	spin_unlock_irqrestore(&intf->events_lock, flags);
+	release_ipmi_user(user, index);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_set_gets_events);
+
+static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
+				      unsigned char netfn,
+				      unsigned char cmd,
+				      unsigned char chan)
+{
+	struct cmd_rcvr *rcvr;
+
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+					&& (rcvr->chans & (1 << chan)))
+			return rcvr;
+	}
+	return NULL;
+}
+
+static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
+				 unsigned char netfn,
+				 unsigned char cmd,
+				 unsigned int  chans)
+{
+	struct cmd_rcvr *rcvr;
+
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+					&& (rcvr->chans & chans))
+			return 0;
+	}
+	return 1;
+}
+
+int ipmi_register_for_cmd(struct ipmi_user *user,
+			  unsigned char netfn,
+			  unsigned char cmd,
+			  unsigned int  chans)
+{
+	struct ipmi_smi *intf = user->intf;
+	struct cmd_rcvr *rcvr;
+	int rv = 0, index;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+	if (!rcvr) {
+		rv = -ENOMEM;
+		goto out_release;
+	}
+	rcvr->cmd = cmd;
+	rcvr->netfn = netfn;
+	rcvr->chans = chans;
+	rcvr->user = user;
+
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	/* Make sure the command/netfn is not already registered. */
+	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
+		rv = -EBUSY;
+		goto out_unlock;
+	}
+
+	if (atomic_inc_return(&intf->event_waiters) == 1)
+		need_waiter(intf);
+
+	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+out_unlock:
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	if (rv)
+		kfree(rcvr);
+out_release:
+	release_ipmi_user(user, index);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
+			    unsigned char netfn,
+			    unsigned char cmd,
+			    unsigned int  chans)
+{
+	struct ipmi_smi *intf = user->intf;
+	struct cmd_rcvr *rcvr;
+	struct cmd_rcvr *rcvrs = NULL;
+	int i, rv = -ENOENT, index;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
+		if (((1 << i) & chans) == 0)
+			continue;
+		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
+		if (rcvr == NULL)
+			continue;
+		if (rcvr->user == user) {
+			rv = 0;
+			rcvr->chans &= ~chans;
+			if (rcvr->chans == 0) {
+				list_del_rcu(&rcvr->link);
+				rcvr->next = rcvrs;
+				rcvrs = rcvr;
+			}
+		}
+	}
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	synchronize_rcu();
+	release_ipmi_user(user, index);
+	while (rcvrs) {
+		atomic_dec(&intf->event_waiters);
+		rcvr = rcvrs;
+		rcvrs = rcvr->next;
+		kfree(rcvr);
+	}
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+
+static unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+	unsigned char csum = 0;
+
+	for (; size > 0; size--, data++)
+		csum += *data;
+
+	return -csum;
+}
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
+				   struct kernel_ipmi_msg *msg,
+				   struct ipmi_ipmb_addr *ipmb_addr,
+				   long                  msgid,
+				   unsigned char         ipmb_seq,
+				   int                   broadcast,
+				   unsigned char         source_address,
+				   unsigned char         source_lun)
+{
+	int i = broadcast;
+
+	/* Format the IPMB header data. */
+	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+	smi_msg->data[2] = ipmb_addr->channel;
+	if (broadcast)
+		smi_msg->data[3] = 0;
+	smi_msg->data[i+3] = ipmb_addr->slave_addr;
+	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+	smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
+	smi_msg->data[i+6] = source_address;
+	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+	smi_msg->data[i+8] = msg->cmd;
+
+	/* Now tack on the data to the message. */
+	if (msg->data_len > 0)
+		memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
+	smi_msg->data_size = msg->data_len + 9;
+
+	/* Now calculate the checksum and tack it on. */
+	smi_msg->data[i+smi_msg->data_size]
+		= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
+
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
+	smi_msg->data_size += 1 + i;
+
+	smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
+				  struct kernel_ipmi_msg *msg,
+				  struct ipmi_lan_addr  *lan_addr,
+				  long                  msgid,
+				  unsigned char         ipmb_seq,
+				  unsigned char         source_lun)
+{
+	/* Format the IPMB header data. */
+	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+	smi_msg->data[2] = lan_addr->channel;
+	smi_msg->data[3] = lan_addr->session_handle;
+	smi_msg->data[4] = lan_addr->remote_SWID;
+	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+	smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
+	smi_msg->data[7] = lan_addr->local_SWID;
+	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+	smi_msg->data[9] = msg->cmd;
+
+	/* Now tack on the data to the message. */
+	if (msg->data_len > 0)
+		memcpy(&smi_msg->data[10], msg->data, msg->data_len);
+	smi_msg->data_size = msg->data_len + 10;
+
+	/* Now calculate the checksum and tack it on. */
+	smi_msg->data[smi_msg->data_size]
+		= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
+
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
+	smi_msg->data_size += 1;
+
+	smi_msg->msgid = msgid;
+}
+
+static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
+					     struct ipmi_smi_msg *smi_msg,
+					     int priority)
+{
+	if (intf->curr_msg) {
+		if (priority > 0)
+			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+		else
+			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+		smi_msg = NULL;
+	} else {
+		intf->curr_msg = smi_msg;
+	}
+
+	return smi_msg;
+}
+
+
+static void smi_send(struct ipmi_smi *intf,
+		     const struct ipmi_smi_handlers *handlers,
+		     struct ipmi_smi_msg *smi_msg, int priority)
+{
+	int run_to_completion = intf->run_to_completion;
+
+	if (run_to_completion) {
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+	}
+
+	if (smi_msg)
+		handlers->sender(intf->send_info, smi_msg);
+}
+
+static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
+{
+	return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+		 && ((msg->cmd == IPMI_COLD_RESET_CMD)
+		     || (msg->cmd == IPMI_WARM_RESET_CMD)))
+		|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
+}
+
+static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
+			      struct ipmi_addr       *addr,
+			      long                   msgid,
+			      struct kernel_ipmi_msg *msg,
+			      struct ipmi_smi_msg    *smi_msg,
+			      struct ipmi_recv_msg   *recv_msg,
+			      int                    retries,
+			      unsigned int           retry_time_ms)
+{
+	struct ipmi_system_interface_addr *smi_addr;
+
+	if (msg->netfn & 1)
+		/* Responses are not allowed to the SMI. */
+		return -EINVAL;
+
+	smi_addr = (struct ipmi_system_interface_addr *) addr;
+	if (smi_addr->lun > 3) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+	if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+	    && ((msg->cmd == IPMI_SEND_MSG_CMD)
+		|| (msg->cmd == IPMI_GET_MSG_CMD)
+		|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+		/*
+		 * We don't let the user do these, since we manage
+		 * the sequence numbers.
+		 */
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	if (is_maintenance_mode_cmd(msg)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+		intf->auto_maintenance_timeout
+			= maintenance_mode_timeout_ms;
+		if (!intf->maintenance_mode
+		    && !intf->maintenance_mode_enable) {
+			intf->maintenance_mode_enable = true;
+			maintenance_mode_update(intf);
+		}
+		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+				       flags);
+	}
+
+	if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EMSGSIZE;
+	}
+
+	smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+	smi_msg->data[1] = msg->cmd;
+	smi_msg->msgid = msgid;
+	smi_msg->user_data = recv_msg;
+	if (msg->data_len > 0)
+		memcpy(&smi_msg->data[2], msg->data, msg->data_len);
+	smi_msg->data_size = msg->data_len + 2;
+	ipmi_inc_stat(intf, sent_local_commands);
+
+	return 0;
+}
+
+static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
+			   struct ipmi_addr       *addr,
+			   long                   msgid,
+			   struct kernel_ipmi_msg *msg,
+			   struct ipmi_smi_msg    *smi_msg,
+			   struct ipmi_recv_msg   *recv_msg,
+			   unsigned char          source_address,
+			   unsigned char          source_lun,
+			   int                    retries,
+			   unsigned int           retry_time_ms)
+{
+	struct ipmi_ipmb_addr *ipmb_addr;
+	unsigned char ipmb_seq;
+	long seqid;
+	int broadcast = 0;
+	struct ipmi_channel *chans;
+	int rv = 0;
+
+	if (addr->channel >= IPMI_MAX_CHANNELS) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	chans = READ_ONCE(intf->channel_list)->c;
+
+	if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+		/*
+		 * Broadcasts add a zero at the beginning of the
+		 * message, but otherwise is the same as an IPMB
+		 * address.
+		 */
+		addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+		broadcast = 1;
+		retries = 0; /* Don't retry broadcasts. */
+	}
+
+	/*
+	 * 9 for the header and 1 for the checksum, plus
+	 * possibly one for the broadcast.
+	 */
+	if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EMSGSIZE;
+	}
+
+	ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+	if (ipmb_addr->lun > 3) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+	if (recv_msg->msg.netfn & 0x1) {
+		/*
+		 * It's a response, so use the user's sequence
+		 * from msgid.
+		 */
+		ipmi_inc_stat(intf, sent_ipmb_responses);
+		format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+				msgid, broadcast,
+				source_address, source_lun);
+
+		/*
+		 * Save the receive message so we can use it
+		 * to deliver the response.
+		 */
+		smi_msg->user_data = recv_msg;
+	} else {
+		/* It's a command, so get a sequence for it. */
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->seq_lock, flags);
+
+		if (is_maintenance_mode_cmd(msg))
+			intf->ipmb_maintenance_mode_timeout =
+				maintenance_mode_timeout_ms;
+
+		if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
+			/* Different default in maintenance mode */
+			retry_time_ms = default_maintenance_retry_ms;
+
+		/*
+		 * Create a sequence number with a 1 second
+		 * timeout and 4 retries.
+		 */
+		rv = intf_next_seq(intf,
+				   recv_msg,
+				   retry_time_ms,
+				   retries,
+				   broadcast,
+				   &ipmb_seq,
+				   &seqid);
+		if (rv)
+			/*
+			 * We have used up all the sequence numbers,
+			 * probably, so abort.
+			 */
+			goto out_err;
+
+		ipmi_inc_stat(intf, sent_ipmb_commands);
+
+		/*
+		 * Store the sequence number in the message,
+		 * so that when the send message response
+		 * comes back we can start the timer.
+		 */
+		format_ipmb_msg(smi_msg, msg, ipmb_addr,
+				STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+				ipmb_seq, broadcast,
+				source_address, source_lun);
+
+		/*
+		 * Copy the message into the recv message data, so we
+		 * can retransmit it later if necessary.
+		 */
+		memcpy(recv_msg->msg_data, smi_msg->data,
+		       smi_msg->data_size);
+		recv_msg->msg.data = recv_msg->msg_data;
+		recv_msg->msg.data_len = smi_msg->data_size;
+
+		/*
+		 * We don't unlock until here, because we need
+		 * to copy the completed message into the
+		 * recv_msg before we release the lock.
+		 * Otherwise, race conditions may bite us.  I
+		 * know that's pretty paranoid, but I prefer
+		 * to be correct.
+		 */
+out_err:
+		spin_unlock_irqrestore(&intf->seq_lock, flags);
+	}
+
+	return rv;
+}
+
+static int i_ipmi_req_lan(struct ipmi_smi        *intf,
+			  struct ipmi_addr       *addr,
+			  long                   msgid,
+			  struct kernel_ipmi_msg *msg,
+			  struct ipmi_smi_msg    *smi_msg,
+			  struct ipmi_recv_msg   *recv_msg,
+			  unsigned char          source_lun,
+			  int                    retries,
+			  unsigned int           retry_time_ms)
+{
+	struct ipmi_lan_addr  *lan_addr;
+	unsigned char ipmb_seq;
+	long seqid;
+	struct ipmi_channel *chans;
+	int rv = 0;
+
+	if (addr->channel >= IPMI_MAX_CHANNELS) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	chans = READ_ONCE(intf->channel_list)->c;
+
+	if ((chans[addr->channel].medium
+				!= IPMI_CHANNEL_MEDIUM_8023LAN)
+			&& (chans[addr->channel].medium
+			    != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	/* 11 for the header and 1 for the checksum. */
+	if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EMSGSIZE;
+	}
+
+	lan_addr = (struct ipmi_lan_addr *) addr;
+	if (lan_addr->lun > 3) {
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		return -EINVAL;
+	}
+
+	memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+	if (recv_msg->msg.netfn & 0x1) {
+		/*
+		 * It's a response, so use the user's sequence
+		 * from msgid.
+		 */
+		ipmi_inc_stat(intf, sent_lan_responses);
+		format_lan_msg(smi_msg, msg, lan_addr, msgid,
+			       msgid, source_lun);
+
+		/*
+		 * Save the receive message so we can use it
+		 * to deliver the response.
+		 */
+		smi_msg->user_data = recv_msg;
+	} else {
+		/* It's a command, so get a sequence for it. */
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->seq_lock, flags);
+
+		/*
+		 * Create a sequence number with a 1 second
+		 * timeout and 4 retries.
+		 */
+		rv = intf_next_seq(intf,
+				   recv_msg,
+				   retry_time_ms,
+				   retries,
+				   0,
+				   &ipmb_seq,
+				   &seqid);
+		if (rv)
+			/*
+			 * We have used up all the sequence numbers,
+			 * probably, so abort.
+			 */
+			goto out_err;
+
+		ipmi_inc_stat(intf, sent_lan_commands);
+
+		/*
+		 * Store the sequence number in the message,
+		 * so that when the send message response
+		 * comes back we can start the timer.
+		 */
+		format_lan_msg(smi_msg, msg, lan_addr,
+			       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+			       ipmb_seq, source_lun);
+
+		/*
+		 * Copy the message into the recv message data, so we
+		 * can retransmit it later if necessary.
+		 */
+		memcpy(recv_msg->msg_data, smi_msg->data,
+		       smi_msg->data_size);
+		recv_msg->msg.data = recv_msg->msg_data;
+		recv_msg->msg.data_len = smi_msg->data_size;
+
+		/*
+		 * We don't unlock until here, because we need
+		 * to copy the completed message into the
+		 * recv_msg before we release the lock.
+		 * Otherwise, race conditions may bite us.  I
+		 * know that's pretty paranoid, but I prefer
+		 * to be correct.
+		 */
+out_err:
+		spin_unlock_irqrestore(&intf->seq_lock, flags);
+	}
+
+	return rv;
+}
+
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time).  If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
+static int i_ipmi_request(struct ipmi_user     *user,
+			  struct ipmi_smi      *intf,
+			  struct ipmi_addr     *addr,
+			  long                 msgid,
+			  struct kernel_ipmi_msg *msg,
+			  void                 *user_msg_data,
+			  void                 *supplied_smi,
+			  struct ipmi_recv_msg *supplied_recv,
+			  int                  priority,
+			  unsigned char        source_address,
+			  unsigned char        source_lun,
+			  int                  retries,
+			  unsigned int         retry_time_ms)
+{
+	struct ipmi_smi_msg *smi_msg;
+	struct ipmi_recv_msg *recv_msg;
+	int rv = 0;
+
+	if (supplied_recv)
+		recv_msg = supplied_recv;
+	else {
+		recv_msg = ipmi_alloc_recv_msg();
+		if (recv_msg == NULL) {
+			rv = -ENOMEM;
+			goto out;
+		}
+	}
+	recv_msg->user_msg_data = user_msg_data;
+
+	if (supplied_smi)
+		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
+	else {
+		smi_msg = ipmi_alloc_smi_msg();
+		if (smi_msg == NULL) {
+			ipmi_free_recv_msg(recv_msg);
+			rv = -ENOMEM;
+			goto out;
+		}
+	}
+
+	rcu_read_lock();
+	if (intf->in_shutdown) {
+		rv = -ENODEV;
+		goto out_err;
+	}
+
+	recv_msg->user = user;
+	if (user)
+		/* The put happens when the message is freed. */
+		kref_get(&user->refcount);
+	recv_msg->msgid = msgid;
+	/*
+	 * Store the message to send in the receive message so timeout
+	 * responses can get the proper response data.
+	 */
+	recv_msg->msg = *msg;
+
+	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
+					recv_msg, retries, retry_time_ms);
+	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+		rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
+				     source_address, source_lun,
+				     retries, retry_time_ms);
+	} else if (is_lan_addr(addr)) {
+		rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
+				    source_lun, retries, retry_time_ms);
+	} else {
+	    /* Unknown address type. */
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		rv = -EINVAL;
+	}
+
+	if (rv) {
+out_err:
+		ipmi_free_smi_msg(smi_msg);
+		ipmi_free_recv_msg(recv_msg);
+	} else {
+		ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
+
+		smi_send(intf, intf->handlers, smi_msg, priority);
+	}
+	rcu_read_unlock();
+
+out:
+	return rv;
+}
+
+static int check_addr(struct ipmi_smi  *intf,
+		      struct ipmi_addr *addr,
+		      unsigned char    *saddr,
+		      unsigned char    *lun)
+{
+	if (addr->channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	*lun = intf->addrinfo[addr->channel].lun;
+	*saddr = intf->addrinfo[addr->channel].address;
+	return 0;
+}
+
+int ipmi_request_settime(struct ipmi_user *user,
+			 struct ipmi_addr *addr,
+			 long             msgid,
+			 struct kernel_ipmi_msg  *msg,
+			 void             *user_msg_data,
+			 int              priority,
+			 int              retries,
+			 unsigned int     retry_time_ms)
+{
+	unsigned char saddr = 0, lun = 0;
+	int rv, index;
+
+	if (!user)
+		return -EINVAL;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	rv = check_addr(user->intf, addr, &saddr, &lun);
+	if (!rv)
+		rv = i_ipmi_request(user,
+				    user->intf,
+				    addr,
+				    msgid,
+				    msg,
+				    user_msg_data,
+				    NULL, NULL,
+				    priority,
+				    saddr,
+				    lun,
+				    retries,
+				    retry_time_ms);
+
+	release_ipmi_user(user, index);
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_request_settime);
+
+int ipmi_request_supply_msgs(struct ipmi_user     *user,
+			     struct ipmi_addr     *addr,
+			     long                 msgid,
+			     struct kernel_ipmi_msg *msg,
+			     void                 *user_msg_data,
+			     void                 *supplied_smi,
+			     struct ipmi_recv_msg *supplied_recv,
+			     int                  priority)
+{
+	unsigned char saddr = 0, lun = 0;
+	int rv, index;
+
+	if (!user)
+		return -EINVAL;
+
+	user = acquire_ipmi_user(user, &index);
+	if (!user)
+		return -ENODEV;
+
+	rv = check_addr(user->intf, addr, &saddr, &lun);
+	if (!rv)
+		rv = i_ipmi_request(user,
+				    user->intf,
+				    addr,
+				    msgid,
+				    msg,
+				    user_msg_data,
+				    supplied_smi,
+				    supplied_recv,
+				    priority,
+				    saddr,
+				    lun,
+				    -1, 0);
+
+	release_ipmi_user(user, index);
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+
+static void bmc_device_id_handler(struct ipmi_smi *intf,
+				  struct ipmi_recv_msg *msg)
+{
+	int rv;
+
+	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+			|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+			|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
+		dev_warn(intf->si_dev,
+			 PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+			msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+		return;
+	}
+
+	rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
+			msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
+	if (rv) {
+		dev_warn(intf->si_dev,
+			 PFX "device id demangle failed: %d\n", rv);
+		intf->bmc->dyn_id_set = 0;
+	} else {
+		/*
+		 * Make sure the id data is available before setting
+		 * dyn_id_set.
+		 */
+		smp_wmb();
+		intf->bmc->dyn_id_set = 1;
+	}
+
+	wake_up(&intf->waitq);
+}
+
+static int
+send_get_device_id_cmd(struct ipmi_smi *intf)
+{
+	struct ipmi_system_interface_addr si;
+	struct kernel_ipmi_msg msg;
+
+	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si.channel = IPMI_BMC_CHANNEL;
+	si.lun = 0;
+
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+	msg.data = NULL;
+	msg.data_len = 0;
+
+	return i_ipmi_request(NULL,
+			      intf,
+			      (struct ipmi_addr *) &si,
+			      0,
+			      &msg,
+			      intf,
+			      NULL,
+			      NULL,
+			      0,
+			      intf->addrinfo[0].address,
+			      intf->addrinfo[0].lun,
+			      -1, 0);
+}
+
+static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
+{
+	int rv;
+
+	bmc->dyn_id_set = 2;
+
+	intf->null_user_handler = bmc_device_id_handler;
+
+	rv = send_get_device_id_cmd(intf);
+	if (rv)
+		return rv;
+
+	wait_event(intf->waitq, bmc->dyn_id_set != 2);
+
+	if (!bmc->dyn_id_set)
+		rv = -EIO; /* Something went wrong in the fetch. */
+
+	/* dyn_id_set makes the id data available. */
+	smp_rmb();
+
+	intf->null_user_handler = NULL;
+
+	return rv;
+}
+
+/*
+ * Fetch the device id for the bmc/interface.  You must pass in either
+ * bmc or intf, this code will get the other one.  If the data has
+ * been recently fetched, this will just use the cached data.  Otherwise
+ * it will run a new fetch.
+ *
+ * Except for the first time this is called (in ipmi_register_smi()),
+ * this will always return good data;
+ */
+static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+			       struct ipmi_device_id *id,
+			       bool *guid_set, guid_t *guid, int intf_num)
+{
+	int rv = 0;
+	int prev_dyn_id_set, prev_guid_set;
+	bool intf_set = intf != NULL;
+
+	if (!intf) {
+		mutex_lock(&bmc->dyn_mutex);
+retry_bmc_lock:
+		if (list_empty(&bmc->intfs)) {
+			mutex_unlock(&bmc->dyn_mutex);
+			return -ENOENT;
+		}
+		intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
+					bmc_link);
+		kref_get(&intf->refcount);
+		mutex_unlock(&bmc->dyn_mutex);
+		mutex_lock(&intf->bmc_reg_mutex);
+		mutex_lock(&bmc->dyn_mutex);
+		if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
+					     bmc_link)) {
+			mutex_unlock(&intf->bmc_reg_mutex);
+			kref_put(&intf->refcount, intf_free);
+			goto retry_bmc_lock;
+		}
+	} else {
+		mutex_lock(&intf->bmc_reg_mutex);
+		bmc = intf->bmc;
+		mutex_lock(&bmc->dyn_mutex);
+		kref_get(&intf->refcount);
+	}
+
+	/* If we have a valid and current ID, just return that. */
+	if (intf->in_bmc_register ||
+	    (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
+		goto out_noprocessing;
+
+	prev_guid_set = bmc->dyn_guid_set;
+	__get_guid(intf);
+
+	prev_dyn_id_set = bmc->dyn_id_set;
+	rv = __get_device_id(intf, bmc);
+	if (rv)
+		goto out;
+
+	/*
+	 * The guid, device id, manufacturer id, and product id should
+	 * not change on a BMC.  If it does we have to do some dancing.
+	 */
+	if (!intf->bmc_registered
+	    || (!prev_guid_set && bmc->dyn_guid_set)
+	    || (!prev_dyn_id_set && bmc->dyn_id_set)
+	    || (prev_guid_set && bmc->dyn_guid_set
+		&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
+	    || bmc->id.device_id != bmc->fetch_id.device_id
+	    || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
+	    || bmc->id.product_id != bmc->fetch_id.product_id) {
+		struct ipmi_device_id id = bmc->fetch_id;
+		int guid_set = bmc->dyn_guid_set;
+		guid_t guid;
+
+		guid = bmc->fetch_guid;
+		mutex_unlock(&bmc->dyn_mutex);
+
+		__ipmi_bmc_unregister(intf);
+		/* Fill in the temporary BMC for good measure. */
+		intf->bmc->id = id;
+		intf->bmc->dyn_guid_set = guid_set;
+		intf->bmc->guid = guid;
+		if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
+			need_waiter(intf); /* Retry later on an error. */
+		else
+			__scan_channels(intf, &id);
+
+
+		if (!intf_set) {
+			/*
+			 * We weren't given the interface on the
+			 * command line, so restart the operation on
+			 * the next interface for the BMC.
+			 */
+			mutex_unlock(&intf->bmc_reg_mutex);
+			mutex_lock(&bmc->dyn_mutex);
+			goto retry_bmc_lock;
+		}
+
+		/* We have a new BMC, set it up. */
+		bmc = intf->bmc;
+		mutex_lock(&bmc->dyn_mutex);
+		goto out_noprocessing;
+	} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
+		/* Version info changes, scan the channels again. */
+		__scan_channels(intf, &bmc->fetch_id);
+
+	bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+out:
+	if (rv && prev_dyn_id_set) {
+		rv = 0; /* Ignore failures if we have previous data. */
+		bmc->dyn_id_set = prev_dyn_id_set;
+	}
+	if (!rv) {
+		bmc->id = bmc->fetch_id;
+		if (bmc->dyn_guid_set)
+			bmc->guid = bmc->fetch_guid;
+		else if (prev_guid_set)
+			/*
+			 * The guid used to be valid and it failed to fetch,
+			 * just use the cached value.
+			 */
+			bmc->dyn_guid_set = prev_guid_set;
+	}
+out_noprocessing:
+	if (!rv) {
+		if (id)
+			*id = bmc->id;
+
+		if (guid_set)
+			*guid_set = bmc->dyn_guid_set;
+
+		if (guid && bmc->dyn_guid_set)
+			*guid =  bmc->guid;
+	}
+
+	mutex_unlock(&bmc->dyn_mutex);
+	mutex_unlock(&intf->bmc_reg_mutex);
+
+	kref_put(&intf->refcount, intf_free);
+	return rv;
+}
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+			     struct ipmi_device_id *id,
+			     bool *guid_set, guid_t *guid)
+{
+	return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
+}
+
+static ssize_t device_id_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 10, "%u\n", id.device_id);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t provides_device_sdrs_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
+}
+static DEVICE_ATTR_RO(provides_device_sdrs);
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
+}
+static DEVICE_ATTR_RO(revision);
+
+static ssize_t firmware_revision_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+			id.firmware_revision_2);
+}
+static DEVICE_ATTR_RO(firmware_revision);
+
+static ssize_t ipmi_version_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 20, "%u.%u\n",
+			ipmi_version_major(&id),
+			ipmi_version_minor(&id));
+}
+static DEVICE_ATTR_RO(ipmi_version);
+
+static ssize_t add_dev_support_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
+}
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+		   NULL);
+
+static ssize_t manufacturer_id_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
+}
+static DEVICE_ATTR_RO(manufacturer_id);
+
+static ssize_t product_id_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
+}
+static DEVICE_ATTR_RO(product_id);
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	struct ipmi_device_id id;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+	if (rv)
+		return rv;
+
+	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+			id.aux_firmware_revision[3],
+			id.aux_firmware_revision[2],
+			id.aux_firmware_revision[1],
+			id.aux_firmware_revision[0]);
+}
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+	bool guid_set;
+	guid_t guid;
+	int rv;
+
+	rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
+	if (rv)
+		return rv;
+	if (!guid_set)
+		return -ENOENT;
+
+	return snprintf(buf, 38, "%pUl\n", guid.b);
+}
+static DEVICE_ATTR_RO(guid);
+
+static struct attribute *bmc_dev_attrs[] = {
+	&dev_attr_device_id.attr,
+	&dev_attr_provides_device_sdrs.attr,
+	&dev_attr_revision.attr,
+	&dev_attr_firmware_revision.attr,
+	&dev_attr_ipmi_version.attr,
+	&dev_attr_additional_device_support.attr,
+	&dev_attr_manufacturer_id.attr,
+	&dev_attr_product_id.attr,
+	&dev_attr_aux_firmware_revision.attr,
+	&dev_attr_guid.attr,
+	NULL
+};
+
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+				       struct attribute *attr, int idx)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct bmc_device *bmc = to_bmc_device(dev);
+	umode_t mode = attr->mode;
+	int rv;
+
+	if (attr == &dev_attr_aux_firmware_revision.attr) {
+		struct ipmi_device_id id;
+
+		rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+		return (!rv && id.aux_firmware_revision_set) ? mode : 0;
+	}
+	if (attr == &dev_attr_guid.attr) {
+		bool guid_set;
+
+		rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
+		return (!rv && guid_set) ? mode : 0;
+	}
+	return mode;
+}
+
+static const struct attribute_group bmc_dev_attr_group = {
+	.attrs		= bmc_dev_attrs,
+	.is_visible	= bmc_dev_attr_is_visible,
+};
+
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+	&bmc_dev_attr_group,
+	NULL
+};
+
+static const struct device_type bmc_device_type = {
+	.groups		= bmc_dev_attr_groups,
+};
+
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+	guid_t *guid = data;
+	struct bmc_device *bmc;
+	int rv;
+
+	if (dev->type != &bmc_device_type)
+		return 0;
+
+	bmc = to_bmc_device(dev);
+	rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
+	if (rv)
+		rv = kref_get_unless_zero(&bmc->usecount);
+	return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+					     guid_t *guid)
+{
+	struct device *dev;
+	struct bmc_device *bmc = NULL;
+
+	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+	if (dev) {
+		bmc = to_bmc_device(dev);
+		put_device(dev);
+	}
+	return bmc;
+}
+
+struct prod_dev_id {
+	unsigned int  product_id;
+	unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+	struct prod_dev_id *cid = data;
+	struct bmc_device *bmc;
+	int rv;
+
+	if (dev->type != &bmc_device_type)
+		return 0;
+
+	bmc = to_bmc_device(dev);
+	rv = (bmc->id.product_id == cid->product_id
+	      && bmc->id.device_id == cid->device_id);
+	if (rv)
+		rv = kref_get_unless_zero(&bmc->usecount);
+	return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+	struct device_driver *drv,
+	unsigned int product_id, unsigned char device_id)
+{
+	struct prod_dev_id id = {
+		.product_id = product_id,
+		.device_id = device_id,
+	};
+	struct device *dev;
+	struct bmc_device *bmc = NULL;
+
+	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+	if (dev) {
+		bmc = to_bmc_device(dev);
+		put_device(dev);
+	}
+	return bmc;
+}
+
+static DEFINE_IDA(ipmi_bmc_ida);
+
+static void
+release_bmc_device(struct device *dev)
+{
+	kfree(to_bmc_device(dev));
+}
+
+static void cleanup_bmc_work(struct work_struct *work)
+{
+	struct bmc_device *bmc = container_of(work, struct bmc_device,
+					      remove_work);
+	int id = bmc->pdev.id; /* Unregister overwrites id */
+
+	platform_device_unregister(&bmc->pdev);
+	ida_simple_remove(&ipmi_bmc_ida, id);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
+
+	/*
+	 * Remove the platform device in a work queue to avoid issues
+	 * with removing the device attributes while reading a device
+	 * attribute.
+	 */
+	schedule_work(&bmc->remove_work);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+	struct bmc_device *bmc = intf->bmc;
+
+	if (!intf->bmc_registered)
+		return;
+
+	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+	sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+	kfree(intf->my_dev_name);
+	intf->my_dev_name = NULL;
+
+	mutex_lock(&bmc->dyn_mutex);
+	list_del(&intf->bmc_link);
+	mutex_unlock(&bmc->dyn_mutex);
+	intf->bmc = &intf->tmp_bmc;
+	kref_put(&bmc->usecount, cleanup_bmc_device);
+	intf->bmc_registered = false;
+}
+
+static void ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+	mutex_lock(&intf->bmc_reg_mutex);
+	__ipmi_bmc_unregister(intf);
+	mutex_unlock(&intf->bmc_reg_mutex);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+			       struct ipmi_device_id *id,
+			       bool guid_set, guid_t *guid, int intf_num)
+{
+	int               rv;
+	struct bmc_device *bmc;
+	struct bmc_device *old_bmc;
+
+	/*
+	 * platform_device_register() can cause bmc_reg_mutex to
+	 * be claimed because of the is_visible functions of
+	 * the attributes.  Eliminate possible recursion and
+	 * release the lock.
+	 */
+	intf->in_bmc_register = true;
+	mutex_unlock(&intf->bmc_reg_mutex);
+
+	/*
+	 * Try to find if there is an bmc_device struct
+	 * representing the interfaced BMC already
+	 */
+	mutex_lock(&ipmidriver_mutex);
+	if (guid_set)
+		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
+	else
+		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+						    id->product_id,
+						    id->device_id);
+
+	/*
+	 * If there is already an bmc_device, free the new one,
+	 * otherwise register the new BMC device
+	 */
+	if (old_bmc) {
+		bmc = old_bmc;
+		/*
+		 * Note: old_bmc already has usecount incremented by
+		 * the BMC find functions.
+		 */
+		intf->bmc = old_bmc;
+		mutex_lock(&bmc->dyn_mutex);
+		list_add_tail(&intf->bmc_link, &bmc->intfs);
+		mutex_unlock(&bmc->dyn_mutex);
+
+		dev_info(intf->si_dev,
+			 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+			 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+			 bmc->id.manufacturer_id,
+			 bmc->id.product_id,
+			 bmc->id.device_id);
+	} else {
+		bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
+		if (!bmc) {
+			rv = -ENOMEM;
+			goto out;
+		}
+		INIT_LIST_HEAD(&bmc->intfs);
+		mutex_init(&bmc->dyn_mutex);
+		INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
+
+		bmc->id = *id;
+		bmc->dyn_id_set = 1;
+		bmc->dyn_guid_set = guid_set;
+		bmc->guid = *guid;
+		bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+		bmc->pdev.name = "ipmi_bmc";
+
+		rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+		if (rv < 0)
+			goto out;
+		bmc->pdev.dev.driver = &ipmidriver.driver;
+		bmc->pdev.id = rv;
+		bmc->pdev.dev.release = release_bmc_device;
+		bmc->pdev.dev.type = &bmc_device_type;
+		kref_init(&bmc->usecount);
+
+		intf->bmc = bmc;
+		mutex_lock(&bmc->dyn_mutex);
+		list_add_tail(&intf->bmc_link, &bmc->intfs);
+		mutex_unlock(&bmc->dyn_mutex);
+
+		rv = platform_device_register(&bmc->pdev);
+		if (rv) {
+			dev_err(intf->si_dev,
+				PFX " Unable to register bmc device: %d\n",
+				rv);
+			goto out_list_del;
+		}
+
+		dev_info(intf->si_dev,
+			 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+			 bmc->id.manufacturer_id,
+			 bmc->id.product_id,
+			 bmc->id.device_id);
+	}
+
+	/*
+	 * create symlink from system interface device to bmc device
+	 * and back.
+	 */
+	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
+	if (rv) {
+		dev_err(intf->si_dev,
+			PFX "Unable to create bmc symlink: %d\n", rv);
+		goto out_put_bmc;
+	}
+
+	if (intf_num == -1)
+		intf_num = intf->intf_num;
+	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
+	if (!intf->my_dev_name) {
+		rv = -ENOMEM;
+		dev_err(intf->si_dev,
+			PFX "Unable to allocate link from BMC: %d\n", rv);
+		goto out_unlink1;
+	}
+
+	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
+			       intf->my_dev_name);
+	if (rv) {
+		kfree(intf->my_dev_name);
+		intf->my_dev_name = NULL;
+		dev_err(intf->si_dev,
+			PFX "Unable to create symlink to bmc: %d\n", rv);
+		goto out_free_my_dev_name;
+	}
+
+	intf->bmc_registered = true;
+
+out:
+	mutex_unlock(&ipmidriver_mutex);
+	mutex_lock(&intf->bmc_reg_mutex);
+	intf->in_bmc_register = false;
+	return rv;
+
+
+out_free_my_dev_name:
+	kfree(intf->my_dev_name);
+	intf->my_dev_name = NULL;
+
+out_unlink1:
+	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+
+out_put_bmc:
+	mutex_lock(&bmc->dyn_mutex);
+	list_del(&intf->bmc_link);
+	mutex_unlock(&bmc->dyn_mutex);
+	intf->bmc = &intf->tmp_bmc;
+	kref_put(&bmc->usecount, cleanup_bmc_device);
+	goto out;
+
+out_list_del:
+	mutex_lock(&bmc->dyn_mutex);
+	list_del(&intf->bmc_link);
+	mutex_unlock(&bmc->dyn_mutex);
+	intf->bmc = &intf->tmp_bmc;
+	put_device(&bmc->pdev.dev);
+	goto out;
+}
+
+static int
+send_guid_cmd(struct ipmi_smi *intf, int chan)
+{
+	struct kernel_ipmi_msg            msg;
+	struct ipmi_system_interface_addr si;
+
+	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si.channel = IPMI_BMC_CHANNEL;
+	si.lun = 0;
+
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+	msg.data = NULL;
+	msg.data_len = 0;
+	return i_ipmi_request(NULL,
+			      intf,
+			      (struct ipmi_addr *) &si,
+			      0,
+			      &msg,
+			      intf,
+			      NULL,
+			      NULL,
+			      0,
+			      intf->addrinfo[0].address,
+			      intf->addrinfo[0].lun,
+			      -1, 0);
+}
+
+static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+	struct bmc_device *bmc = intf->bmc;
+
+	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+		/* Not for me */
+		return;
+
+	if (msg->msg.data[0] != 0) {
+		/* Error from getting the GUID, the BMC doesn't have one. */
+		bmc->dyn_guid_set = 0;
+		goto out;
+	}
+
+	if (msg->msg.data_len < 17) {
+		bmc->dyn_guid_set = 0;
+		dev_warn(intf->si_dev,
+			 PFX "The GUID response from the BMC was too short, it was %d but should have been 17.  Assuming GUID is not available.\n",
+			 msg->msg.data_len);
+		goto out;
+	}
+
+	memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
+	/*
+	 * Make sure the guid data is available before setting
+	 * dyn_guid_set.
+	 */
+	smp_wmb();
+	bmc->dyn_guid_set = 1;
+ out:
+	wake_up(&intf->waitq);
+}
+
+static void __get_guid(struct ipmi_smi *intf)
+{
+	int rv;
+	struct bmc_device *bmc = intf->bmc;
+
+	bmc->dyn_guid_set = 2;
+	intf->null_user_handler = guid_handler;
+	rv = send_guid_cmd(intf, 0);
+	if (rv)
+		/* Send failed, no GUID available. */
+		bmc->dyn_guid_set = 0;
+
+	wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+	/* dyn_guid_set makes the guid data available. */
+	smp_rmb();
+
+	intf->null_user_handler = NULL;
+}
+
+static int
+send_channel_info_cmd(struct ipmi_smi *intf, int chan)
+{
+	struct kernel_ipmi_msg            msg;
+	unsigned char                     data[1];
+	struct ipmi_system_interface_addr si;
+
+	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si.channel = IPMI_BMC_CHANNEL;
+	si.lun = 0;
+
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+	msg.data = data;
+	msg.data_len = 1;
+	data[0] = chan;
+	return i_ipmi_request(NULL,
+			      intf,
+			      (struct ipmi_addr *) &si,
+			      0,
+			      &msg,
+			      intf,
+			      NULL,
+			      NULL,
+			      0,
+			      intf->addrinfo[0].address,
+			      intf->addrinfo[0].lun,
+			      -1, 0);
+}
+
+static void
+channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+	int rv = 0;
+	int ch;
+	unsigned int set = intf->curr_working_cset;
+	struct ipmi_channel *chans;
+
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
+		/* It's the one we want */
+		if (msg->msg.data[0] != 0) {
+			/* Got an error from the channel, just go on. */
+
+			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
+				/*
+				 * If the MC does not support this
+				 * command, that is legal.  We just
+				 * assume it has one IPMB at channel
+				 * zero.
+				 */
+				intf->wchannels[set].c[0].medium
+					= IPMI_CHANNEL_MEDIUM_IPMB;
+				intf->wchannels[set].c[0].protocol
+					= IPMI_CHANNEL_PROTOCOL_IPMB;
+
+				intf->channel_list = intf->wchannels + set;
+				intf->channels_ready = true;
+				wake_up(&intf->waitq);
+				goto out;
+			}
+			goto next_channel;
+		}
+		if (msg->msg.data_len < 4) {
+			/* Message not big enough, just go on. */
+			goto next_channel;
+		}
+		ch = intf->curr_channel;
+		chans = intf->wchannels[set].c;
+		chans[ch].medium = msg->msg.data[2] & 0x7f;
+		chans[ch].protocol = msg->msg.data[3] & 0x1f;
+
+ next_channel:
+		intf->curr_channel++;
+		if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
+			intf->channel_list = intf->wchannels + set;
+			intf->channels_ready = true;
+			wake_up(&intf->waitq);
+		} else {
+			intf->channel_list = intf->wchannels + set;
+			intf->channels_ready = true;
+			rv = send_channel_info_cmd(intf, intf->curr_channel);
+		}
+
+		if (rv) {
+			/* Got an error somehow, just give up. */
+			dev_warn(intf->si_dev,
+				 PFX "Error sending channel information for channel %d: %d\n",
+				 intf->curr_channel, rv);
+
+			intf->channel_list = intf->wchannels + set;
+			intf->channels_ready = true;
+			wake_up(&intf->waitq);
+		}
+	}
+ out:
+	return;
+}
+
+/*
+ * Must be holding intf->bmc_reg_mutex to call this.
+ */
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+{
+	int rv;
+
+	if (ipmi_version_major(id) > 1
+			|| (ipmi_version_major(id) == 1
+			    && ipmi_version_minor(id) >= 5)) {
+		unsigned int set;
+
+		/*
+		 * Start scanning the channels to see what is
+		 * available.
+		 */
+		set = !intf->curr_working_cset;
+		intf->curr_working_cset = set;
+		memset(&intf->wchannels[set], 0,
+		       sizeof(struct ipmi_channel_set));
+
+		intf->null_user_handler = channel_handler;
+		intf->curr_channel = 0;
+		rv = send_channel_info_cmd(intf, 0);
+		if (rv) {
+			dev_warn(intf->si_dev,
+				 "Error sending channel information for channel 0, %d\n",
+				 rv);
+			return -EIO;
+		}
+
+		/* Wait for the channel info to be read. */
+		wait_event(intf->waitq, intf->channels_ready);
+		intf->null_user_handler = NULL;
+	} else {
+		unsigned int set = intf->curr_working_cset;
+
+		/* Assume a single IPMB channel at zero. */
+		intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+		intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+		intf->channel_list = intf->wchannels + set;
+		intf->channels_ready = true;
+	}
+
+	return 0;
+}
+
+static void ipmi_poll(struct ipmi_smi *intf)
+{
+	if (intf->handlers->poll)
+		intf->handlers->poll(intf->send_info);
+	/* In case something came in */
+	handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(struct ipmi_user *user)
+{
+	ipmi_poll(user->intf);
+}
+EXPORT_SYMBOL(ipmi_poll_interface);
+
+static void redo_bmc_reg(struct work_struct *work)
+{
+	struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
+					     bmc_reg_work);
+
+	if (!intf->in_shutdown)
+		bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
+
+	kref_put(&intf->refcount, intf_free);
+}
+
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+		      void		       *send_info,
+		      struct device            *si_dev,
+		      unsigned char            slave_addr)
+{
+	int              i, j;
+	int              rv;
+	struct ipmi_smi *intf, *tintf;
+	struct list_head *link;
+	struct ipmi_device_id id;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	if (!initialized) {
+		rv = ipmi_init_msghandler();
+		if (rv)
+			return rv;
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
+		if (!initialized)
+			return -ENODEV;
+	}
+
+	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+	if (!intf)
+		return -ENOMEM;
+
+	rv = init_srcu_struct(&intf->users_srcu);
+	if (rv) {
+		kfree(intf);
+		return rv;
+	}
+
+
+	intf->bmc = &intf->tmp_bmc;
+	INIT_LIST_HEAD(&intf->bmc->intfs);
+	mutex_init(&intf->bmc->dyn_mutex);
+	INIT_LIST_HEAD(&intf->bmc_link);
+	mutex_init(&intf->bmc_reg_mutex);
+	intf->intf_num = -1; /* Mark it invalid for now. */
+	kref_init(&intf->refcount);
+	INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
+	intf->si_dev = si_dev;
+	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
+		intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
+		intf->addrinfo[j].lun = 2;
+	}
+	if (slave_addr != 0)
+		intf->addrinfo[0].address = slave_addr;
+	INIT_LIST_HEAD(&intf->users);
+	intf->handlers = handlers;
+	intf->send_info = send_info;
+	spin_lock_init(&intf->seq_lock);
+	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+		intf->seq_table[j].inuse = 0;
+		intf->seq_table[j].seqid = 0;
+	}
+	intf->curr_seq = 0;
+	spin_lock_init(&intf->waiting_rcv_msgs_lock);
+	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+	tasklet_init(&intf->recv_tasklet,
+		     smi_recv_tasklet,
+		     (unsigned long) intf);
+	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+	spin_lock_init(&intf->xmit_msgs_lock);
+	INIT_LIST_HEAD(&intf->xmit_msgs);
+	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+	spin_lock_init(&intf->events_lock);
+	atomic_set(&intf->event_waiters, 0);
+	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+	INIT_LIST_HEAD(&intf->waiting_events);
+	intf->waiting_events_count = 0;
+	mutex_init(&intf->cmd_rcvrs_mutex);
+	spin_lock_init(&intf->maintenance_mode_lock);
+	INIT_LIST_HEAD(&intf->cmd_rcvrs);
+	init_waitqueue_head(&intf->waitq);
+	for (i = 0; i < IPMI_NUM_STATS; i++)
+		atomic_set(&intf->stats[i], 0);
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	/* Look for a hole in the numbers. */
+	i = 0;
+	link = &ipmi_interfaces;
+	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+		if (tintf->intf_num != i) {
+			link = &tintf->link;
+			break;
+		}
+		i++;
+	}
+	/* Add the new interface in numeric order. */
+	if (i == 0)
+		list_add_rcu(&intf->link, &ipmi_interfaces);
+	else
+		list_add_tail_rcu(&intf->link, link);
+
+	rv = handlers->start_processing(send_info, intf);
+	if (rv)
+		goto out_err;
+
+	rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
+	if (rv) {
+		dev_err(si_dev, "Unable to get the device id: %d\n", rv);
+		goto out_err_started;
+	}
+
+	mutex_lock(&intf->bmc_reg_mutex);
+	rv = __scan_channels(intf, &id);
+	mutex_unlock(&intf->bmc_reg_mutex);
+	if (rv)
+		goto out_err_bmc_reg;
+
+	/*
+	 * Keep memory order straight for RCU readers.  Make
+	 * sure everything else is committed to memory before
+	 * setting intf_num to mark the interface valid.
+	 */
+	smp_wmb();
+	intf->intf_num = i;
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	/* After this point the interface is legal to use. */
+	call_smi_watchers(i, intf->si_dev);
+
+	return 0;
+
+ out_err_bmc_reg:
+	ipmi_bmc_unregister(intf);
+ out_err_started:
+	if (intf->handlers->shutdown)
+		intf->handlers->shutdown(intf->send_info);
+ out_err:
+	list_del_rcu(&intf->link);
+	mutex_unlock(&ipmi_interfaces_mutex);
+	synchronize_srcu(&ipmi_interfaces_srcu);
+	cleanup_srcu_struct(&intf->users_srcu);
+	kref_put(&intf->refcount, intf_free);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_register_smi);
+
+static void deliver_smi_err_response(struct ipmi_smi *intf,
+				     struct ipmi_smi_msg *msg,
+				     unsigned char err)
+{
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = err;
+	msg->rsp_size = 3;
+	/* It's an error, so it will never requeue, no need to check return. */
+	handle_one_recv_msg(intf, msg);
+}
+
+static void cleanup_smi_msgs(struct ipmi_smi *intf)
+{
+	int              i;
+	struct seq_table *ent;
+	struct ipmi_smi_msg *msg;
+	struct list_head *entry;
+	struct list_head tmplist;
+
+	/* Clear out our transmit queues and hold the messages. */
+	INIT_LIST_HEAD(&tmplist);
+	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+	list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+	/* Current message first, to preserve order */
+	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+		/* Wait for the message to clear out. */
+		schedule_timeout(1);
+	}
+
+	/* No need for locks, the interface is down. */
+
+	/*
+	 * Return errors for all pending messages in queue and in the
+	 * tables waiting for remote responses.
+	 */
+	while (!list_empty(&tmplist)) {
+		entry = tmplist.next;
+		list_del(entry);
+		msg = list_entry(entry, struct ipmi_smi_msg, link);
+		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+	}
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		ent = &intf->seq_table[i];
+		if (!ent->inuse)
+			continue;
+		deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+	}
+}
+
+void ipmi_unregister_smi(struct ipmi_smi *intf)
+{
+	struct ipmi_smi_watcher *w;
+	int intf_num = intf->intf_num, index;
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	intf->intf_num = -1;
+	intf->in_shutdown = true;
+	list_del_rcu(&intf->link);
+	mutex_unlock(&ipmi_interfaces_mutex);
+	synchronize_srcu(&ipmi_interfaces_srcu);
+
+	/* At this point no users can be added to the interface. */
+
+	/*
+	 * Call all the watcher interfaces to tell them that
+	 * an interface is going away.
+	 */
+	mutex_lock(&smi_watchers_mutex);
+	list_for_each_entry(w, &smi_watchers, link)
+		w->smi_gone(intf_num);
+	mutex_unlock(&smi_watchers_mutex);
+
+	index = srcu_read_lock(&intf->users_srcu);
+	while (!list_empty(&intf->users)) {
+		struct ipmi_user *user =
+			container_of(list_next_rcu(&intf->users),
+				     struct ipmi_user, link);
+
+		_ipmi_destroy_user(user);
+	}
+	srcu_read_unlock(&intf->users_srcu, index);
+
+	if (intf->handlers->shutdown)
+		intf->handlers->shutdown(intf->send_info);
+
+	cleanup_smi_msgs(intf);
+
+	ipmi_bmc_unregister(intf);
+
+	cleanup_srcu_struct(&intf->users_srcu);
+	kref_put(&intf->refcount, intf_free);
+}
+EXPORT_SYMBOL(ipmi_unregister_smi);
+
+static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
+				   struct ipmi_smi_msg *msg)
+{
+	struct ipmi_ipmb_addr ipmb_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+	/*
+	 * This is 11, not 10, because the response must contain a
+	 * completion code.
+	 */
+	if (msg->rsp_size < 11) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_ipmb_responses);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+	ipmb_addr.slave_addr = msg->rsp[6];
+	ipmb_addr.channel = msg->rsp[3] & 0x0f;
+	ipmb_addr.lun = msg->rsp[7] & 3;
+
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
+	if (intf_find_seq(intf,
+			  msg->rsp[7] >> 2,
+			  msg->rsp[3] & 0x0f,
+			  msg->rsp[8],
+			  (msg->rsp[4] >> 2) & (~1),
+			  (struct ipmi_addr *) &ipmb_addr,
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_ipmb_responses);
+		return 0;
+	}
+
+	memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
+	recv_msg->msg.netfn = msg->rsp[4] >> 2;
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 10;
+	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	if (deliver_response(intf, recv_msg))
+		ipmi_inc_stat(intf, unhandled_ipmb_responses);
+	else
+		ipmi_inc_stat(intf, handled_ipmb_responses);
+
+	return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+				   struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr          *rcvr;
+	int                      rv = 0;
+	unsigned char            netfn;
+	unsigned char            cmd;
+	unsigned char            chan;
+	struct ipmi_user         *user = NULL;
+	struct ipmi_ipmb_addr    *ipmb_addr;
+	struct ipmi_recv_msg     *recv_msg;
+
+	if (msg->rsp_size < 10) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	netfn = msg->rsp[4] >> 2;
+	cmd = msg->rsp[8];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, deliver an error response. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+		msg->data[1] = IPMI_SEND_MSG_CMD;
+		msg->data[2] = msg->rsp[3];
+		msg->data[3] = msg->rsp[6];
+		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+		msg->data[5] = ipmb_checksum(&msg->data[3], 2);
+		msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
+		/* rqseq/lun */
+		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+		msg->data[8] = msg->rsp[8]; /* cmd */
+		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+		msg->data[10] = ipmb_checksum(&msg->data[6], 4);
+		msg->data_size = 11;
+
+		ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
+
+		rcu_read_lock();
+		if (!intf->in_shutdown) {
+			smi_send(intf, intf->handlers, msg, 0);
+			/*
+			 * We used the message, so return the value
+			 * that causes it to not be freed or
+			 * queued.
+			 */
+			rv = -1;
+		}
+		rcu_read_unlock();
+	} else {
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/* Extract the source address from the data. */
+			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+			ipmb_addr->slave_addr = msg->rsp[6];
+			ipmb_addr->lun = msg->rsp[7] & 3;
+			ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
+			recv_msg->user = user;
+			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+			recv_msg->msgid = msg->rsp[7] >> 2;
+			recv_msg->msg.netfn = msg->rsp[4] >> 2;
+			recv_msg->msg.cmd = msg->rsp[8];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * We chop off 10, not 9 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 10;
+			memcpy(recv_msg->msg_data, &msg->rsp[9],
+			       msg->rsp_size - 10);
+			if (deliver_response(intf, recv_msg))
+				ipmi_inc_stat(intf, unhandled_commands);
+			else
+				ipmi_inc_stat(intf, handled_commands);
+		}
+	}
+
+	return rv;
+}
+
+static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct ipmi_lan_addr  lan_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+
+	/*
+	 * This is 13, not 12, because the response must contain a
+	 * completion code.
+	 */
+	if (msg->rsp_size < 13) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_lan_responses);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+	lan_addr.session_handle = msg->rsp[4];
+	lan_addr.remote_SWID = msg->rsp[8];
+	lan_addr.local_SWID = msg->rsp[5];
+	lan_addr.channel = msg->rsp[3] & 0x0f;
+	lan_addr.privilege = msg->rsp[3] >> 4;
+	lan_addr.lun = msg->rsp[9] & 3;
+
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
+	if (intf_find_seq(intf,
+			  msg->rsp[9] >> 2,
+			  msg->rsp[3] & 0x0f,
+			  msg->rsp[10],
+			  (msg->rsp[6] >> 2) & (~1),
+			  (struct ipmi_addr *) &lan_addr,
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_lan_responses);
+		return 0;
+	}
+
+	memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
+	recv_msg->msg.netfn = msg->rsp[6] >> 2;
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 12;
+	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	if (deliver_response(intf, recv_msg))
+		ipmi_inc_stat(intf, unhandled_lan_responses);
+	else
+		ipmi_inc_stat(intf, handled_lan_responses);
+
+	return 0;
+}
+
+static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr          *rcvr;
+	int                      rv = 0;
+	unsigned char            netfn;
+	unsigned char            cmd;
+	unsigned char            chan;
+	struct ipmi_user         *user = NULL;
+	struct ipmi_lan_addr     *lan_addr;
+	struct ipmi_recv_msg     *recv_msg;
+
+	if (msg->rsp_size < 12) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	netfn = msg->rsp[6] >> 2;
+	cmd = msg->rsp[10];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, just give up. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		/*
+		 * Don't do anything with these messages, just allow
+		 * them to be freed.
+		 */
+		rv = 0;
+	} else {
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/* Extract the source address from the data. */
+			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+			lan_addr->session_handle = msg->rsp[4];
+			lan_addr->remote_SWID = msg->rsp[8];
+			lan_addr->local_SWID = msg->rsp[5];
+			lan_addr->lun = msg->rsp[9] & 3;
+			lan_addr->channel = msg->rsp[3] & 0xf;
+			lan_addr->privilege = msg->rsp[3] >> 4;
+
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
+			recv_msg->user = user;
+			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+			recv_msg->msgid = msg->rsp[9] >> 2;
+			recv_msg->msg.netfn = msg->rsp[6] >> 2;
+			recv_msg->msg.cmd = msg->rsp[10];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * We chop off 12, not 11 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 12;
+			memcpy(recv_msg->msg_data, &msg->rsp[11],
+			       msg->rsp_size - 12);
+			if (deliver_response(intf, recv_msg))
+				ipmi_inc_stat(intf, unhandled_commands);
+			else
+				ipmi_inc_stat(intf, handled_commands);
+		}
+	}
+
+	return rv;
+}
+
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM.  See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr       *rcvr;
+	int                   rv = 0;
+	unsigned char         netfn;
+	unsigned char         cmd;
+	unsigned char         chan;
+	struct ipmi_user *user = NULL;
+	struct ipmi_system_interface_addr *smi_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+	/*
+	 * We expect the OEM SW to perform error checking
+	 * so we just do some basic sanity checks
+	 */
+	if (msg->rsp_size < 4) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	/*
+	 * This is an OEM Message so the OEM needs to know how
+	 * handle the message. We do no interpretation.
+	 */
+	netfn = msg->rsp[0] >> 2;
+	cmd = msg->rsp[1];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, just give up. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		/*
+		 * Don't do anything with these messages, just allow
+		 * them to be freed.
+		 */
+
+		rv = 0;
+	} else {
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/*
+			 * OEM Messages are expected to be delivered via
+			 * the system interface to SMS software.  We might
+			 * need to visit this again depending on OEM
+			 * requirements
+			 */
+			smi_addr = ((struct ipmi_system_interface_addr *)
+				    &recv_msg->addr);
+			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+			smi_addr->channel = IPMI_BMC_CHANNEL;
+			smi_addr->lun = msg->rsp[0] & 3;
+
+			recv_msg->user = user;
+			recv_msg->user_msg_data = NULL;
+			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+			recv_msg->msg.cmd = msg->rsp[1];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * The message starts at byte 4 which follows the
+			 * the Channel Byte in the "GET MESSAGE" command
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 4;
+			memcpy(recv_msg->msg_data, &msg->rsp[4],
+			       msg->rsp_size - 4);
+			if (deliver_response(intf, recv_msg))
+				ipmi_inc_stat(intf, unhandled_commands);
+			else
+				ipmi_inc_stat(intf, handled_commands);
+		}
+	}
+
+	return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+				     struct ipmi_smi_msg  *msg)
+{
+	struct ipmi_system_interface_addr *smi_addr;
+
+	recv_msg->msgid = 0;
+	smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
+	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr->channel = IPMI_BMC_CHANNEL;
+	smi_addr->lun = msg->rsp[0] & 3;
+	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+	recv_msg->msg.netfn = msg->rsp[0] >> 2;
+	recv_msg->msg.cmd = msg->rsp[1];
+	memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+static int handle_read_event_rsp(struct ipmi_smi *intf,
+				 struct ipmi_smi_msg *msg)
+{
+	struct ipmi_recv_msg *recv_msg, *recv_msg2;
+	struct list_head     msgs;
+	struct ipmi_user     *user;
+	int rv = 0, deliver_count = 0, index;
+	unsigned long        flags;
+
+	if (msg->rsp_size < 19) {
+		/* Message is too small to be an IPMB event. */
+		ipmi_inc_stat(intf, invalid_events);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the event, just ignore it. */
+		return 0;
+	}
+
+	INIT_LIST_HEAD(&msgs);
+
+	spin_lock_irqsave(&intf->events_lock, flags);
+
+	ipmi_inc_stat(intf, events);
+
+	/*
+	 * Allocate and fill in one message for every user that is
+	 * getting events.
+	 */
+	index = srcu_read_lock(&intf->users_srcu);
+	list_for_each_entry_rcu(user, &intf->users, link) {
+		if (!user->gets_events)
+			continue;
+
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			rcu_read_unlock();
+			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+						 link) {
+				list_del(&recv_msg->link);
+				ipmi_free_recv_msg(recv_msg);
+			}
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			goto out;
+		}
+
+		deliver_count++;
+
+		copy_event_into_recv_msg(recv_msg, msg);
+		recv_msg->user = user;
+		kref_get(&user->refcount);
+		list_add_tail(&recv_msg->link, &msgs);
+	}
+	srcu_read_unlock(&intf->users_srcu, index);
+
+	if (deliver_count) {
+		/* Now deliver all the messages. */
+		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+			list_del(&recv_msg->link);
+			deliver_local_response(intf, recv_msg);
+		}
+	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+		/*
+		 * No one to receive the message, put it in queue if there's
+		 * not already too many things in the queue.
+		 */
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			goto out;
+		}
+
+		copy_event_into_recv_msg(recv_msg, msg);
+		list_add_tail(&recv_msg->link, &intf->waiting_events);
+		intf->waiting_events_count++;
+	} else if (!intf->event_msg_printed) {
+		/*
+		 * There's too many things in the queue, discard this
+		 * message.
+		 */
+		dev_warn(intf->si_dev,
+			 PFX "Event queue full, discarding incoming events\n");
+		intf->event_msg_printed = 1;
+	}
+
+ out:
+	spin_unlock_irqrestore(&intf->events_lock, flags);
+
+	return rv;
+}
+
+static int handle_bmc_rsp(struct ipmi_smi *intf,
+			  struct ipmi_smi_msg *msg)
+{
+	struct ipmi_recv_msg *recv_msg;
+	struct ipmi_system_interface_addr *smi_addr;
+
+	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+	if (recv_msg == NULL) {
+		dev_warn(intf->si_dev,
+			 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vender for assistance\n");
+		return 0;
+	}
+
+	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	recv_msg->msgid = msg->msgid;
+	smi_addr = ((struct ipmi_system_interface_addr *)
+		    &recv_msg->addr);
+	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr->channel = IPMI_BMC_CHANNEL;
+	smi_addr->lun = msg->rsp[0] & 3;
+	recv_msg->msg.netfn = msg->rsp[0] >> 2;
+	recv_msg->msg.cmd = msg->rsp[1];
+	memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 2;
+	deliver_local_response(intf, recv_msg);
+
+	return 0;
+}
+
+/*
+ * Handle a received message.  Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+			       struct ipmi_smi_msg *msg)
+{
+	int requeue;
+	int chan;
+
+	ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+	if (msg->rsp_size < 2) {
+		/* Message is too small to be correct. */
+		dev_warn(intf->si_dev,
+			 PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
+			 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+		/* Generate an error response for the message. */
+		msg->rsp[0] = msg->data[0] | (1 << 2);
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+		   || (msg->rsp[1] != msg->data[1])) {
+		/*
+		 * The NetFN and Command in the response is not even
+		 * marginally correct.
+		 */
+		dev_warn(intf->si_dev,
+			 PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+			 (msg->data[0] >> 2) | 1, msg->data[1],
+			 msg->rsp[0] >> 2, msg->rsp[1]);
+
+		/* Generate an error response for the message. */
+		msg->rsp[0] = msg->data[0] | (1 << 2);
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+	}
+
+	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+	    && (msg->user_data != NULL)) {
+		/*
+		 * It's a response to a response we sent.  For this we
+		 * deliver a send message response to the user.
+		 */
+		struct ipmi_recv_msg *recv_msg = msg->user_data;
+
+		requeue = 0;
+		if (msg->rsp_size < 2)
+			/* Message is too small to be correct. */
+			goto out;
+
+		chan = msg->data[2] & 0x0f;
+		if (chan >= IPMI_MAX_CHANNELS)
+			/* Invalid channel number */
+			goto out;
+
+		if (!recv_msg)
+			goto out;
+
+		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+		recv_msg->msg.data = recv_msg->msg_data;
+		recv_msg->msg.data_len = 1;
+		recv_msg->msg_data[0] = msg->rsp[2];
+		deliver_local_response(intf, recv_msg);
+	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+		struct ipmi_channel   *chans;
+
+		/* It's from the receive queue. */
+		chan = msg->rsp[3] & 0xf;
+		if (chan >= IPMI_MAX_CHANNELS) {
+			/* Invalid channel number */
+			requeue = 0;
+			goto out;
+		}
+
+		/*
+		 * We need to make sure the channels have been initialized.
+		 * The channel_handler routine will set the "curr_channel"
+		 * equal to or greater than IPMI_MAX_CHANNELS when all the
+		 * channels for this interface have been initialized.
+		 */
+		if (!intf->channels_ready) {
+			requeue = 0; /* Throw the message away */
+			goto out;
+		}
+
+		chans = READ_ONCE(intf->channel_list)->c;
+
+		switch (chans[chan].medium) {
+		case IPMI_CHANNEL_MEDIUM_IPMB:
+			if (msg->rsp[4] & 0x04) {
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
+				requeue = handle_ipmb_get_msg_rsp(intf, msg);
+			} else {
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
+				requeue = handle_ipmb_get_msg_cmd(intf, msg);
+			}
+			break;
+
+		case IPMI_CHANNEL_MEDIUM_8023LAN:
+		case IPMI_CHANNEL_MEDIUM_ASYNC:
+			if (msg->rsp[6] & 0x04) {
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
+				requeue = handle_lan_get_msg_rsp(intf, msg);
+			} else {
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
+				requeue = handle_lan_get_msg_cmd(intf, msg);
+			}
+			break;
+
+		default:
+			/* Check for OEM Channels.  Clients had better
+			   register for these commands. */
+			if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+			    && (chans[chan].medium
+				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+				requeue = handle_oem_get_msg_cmd(intf, msg);
+			} else {
+				/*
+				 * We don't handle the channel type, so just
+				 * free the message.
+				 */
+				requeue = 0;
+			}
+		}
+
+	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
+		/* It's an asynchronous event. */
+		requeue = handle_read_event_rsp(intf, msg);
+	} else {
+		/* It's a response from the local BMC. */
+		requeue = handle_bmc_rsp(intf, msg);
+	}
+
+ out:
+	return requeue;
+}
+
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(struct ipmi_smi *intf)
+{
+	struct ipmi_smi_msg  *smi_msg;
+	unsigned long        flags = 0;
+	int                  rv;
+	int                  run_to_completion = intf->run_to_completion;
+
+	/* See if any waiting messages need to be processed. */
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+	while (!list_empty(&intf->waiting_rcv_msgs)) {
+		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+				     struct ipmi_smi_msg, link);
+		list_del(&smi_msg->link);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+					       flags);
+		rv = handle_one_recv_msg(intf, smi_msg);
+		if (!run_to_completion)
+			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+		if (rv > 0) {
+			/*
+			 * To preserve message order, quit if we
+			 * can't handle a message.  Add the message
+			 * back at the head, this is safe because this
+			 * tasklet is the only thing that pulls the
+			 * messages.
+			 */
+			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+			break;
+		} else {
+			if (rv == 0)
+				/* Message handled */
+				ipmi_free_smi_msg(smi_msg);
+			/* If rv < 0, fatal error, del but don't free. */
+		}
+	}
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
+
+	/*
+	 * If the pretimout count is non-zero, decrement one from it and
+	 * deliver pretimeouts to all the users.
+	 */
+	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+		struct ipmi_user *user;
+		int index;
+
+		index = srcu_read_lock(&intf->users_srcu);
+		list_for_each_entry_rcu(user, &intf->users, link) {
+			if (user->handler->ipmi_watchdog_pretimeout)
+				user->handler->ipmi_watchdog_pretimeout(
+					user->handler_data);
+		}
+		srcu_read_unlock(&intf->users_srcu, index);
+	}
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+	unsigned long flags = 0; /* keep us warning-free. */
+	struct ipmi_smi *intf = (struct ipmi_smi *) val;
+	int run_to_completion = intf->run_to_completion;
+	struct ipmi_smi_msg *newmsg = NULL;
+
+	/*
+	 * Start the next message if available.
+	 *
+	 * Do this here, not in the actual receiver, because we may deadlock
+	 * because the lower layer is allowed to hold locks while calling
+	 * message delivery.
+	 */
+
+	rcu_read_lock();
+
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+	if (intf->curr_msg == NULL && !intf->in_shutdown) {
+		struct list_head *entry = NULL;
+
+		/* Pick the high priority queue first. */
+		if (!list_empty(&intf->hp_xmit_msgs))
+			entry = intf->hp_xmit_msgs.next;
+		else if (!list_empty(&intf->xmit_msgs))
+			entry = intf->xmit_msgs.next;
+
+		if (entry) {
+			list_del(entry);
+			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+			intf->curr_msg = newmsg;
+		}
+	}
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+	if (newmsg)
+		intf->handlers->sender(intf->send_info, newmsg);
+
+	rcu_read_unlock();
+
+	handle_new_recv_msgs(intf);
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
+			   struct ipmi_smi_msg *msg)
+{
+	unsigned long flags = 0; /* keep us warning-free. */
+	int run_to_completion = intf->run_to_completion;
+
+	if ((msg->data_size >= 2)
+	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
+	    && (msg->user_data == NULL)) {
+
+		if (intf->in_shutdown)
+			goto free_msg;
+
+		/*
+		 * This is the local response to a command send, start
+		 * the timer for these.  The user_data will not be
+		 * NULL if this is a response send, and we will let
+		 * response sends just go through.
+		 */
+
+		/*
+		 * Check for errors, if we get certain errors (ones
+		 * that mean basically we can try again later), we
+		 * ignore them and start the timer.  Otherwise we
+		 * report the error immediately.
+		 */
+		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+		    && (msg->rsp[2] != IPMI_BUS_ERR)
+		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+			int ch = msg->rsp[3] & 0xf;
+			struct ipmi_channel *chans;
+
+			/* Got an error sending the message, handle it. */
+
+			chans = READ_ONCE(intf->channel_list)->c;
+			if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+			    || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+				ipmi_inc_stat(intf, sent_lan_command_errs);
+			else
+				ipmi_inc_stat(intf, sent_ipmb_command_errs);
+			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+		} else
+			/* The message was sent, start the timer. */
+			intf_start_seq_timer(intf, msg->msgid);
+
+free_msg:
+		ipmi_free_smi_msg(msg);
+	} else {
+		/*
+		 * To preserve message order, we keep a queue and deliver from
+		 * a tasklet.
+		 */
+		if (!run_to_completion)
+			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+		list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+					       flags);
+	}
+
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+	/*
+	 * We can get an asynchronous event or receive message in addition
+	 * to commands we send.
+	 */
+	if (msg == intf->curr_msg)
+		intf->curr_msg = NULL;
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+	if (run_to_completion)
+		smi_recv_tasklet((unsigned long) intf);
+	else
+		tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
+{
+	if (intf->in_shutdown)
+		return;
+
+	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+	tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+
+static struct ipmi_smi_msg *
+smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
+		  unsigned char seq, long seqid)
+{
+	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
+	if (!smi_msg)
+		/*
+		 * If we can't allocate the message, then just return, we
+		 * get 4 retries, so this should be ok.
+		 */
+		return NULL;
+
+	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+	smi_msg->data_size = recv_msg->msg.data_len;
+	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+	ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
+
+	return smi_msg;
+}
+
+static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+			      struct list_head *timeouts,
+			      unsigned long timeout_period,
+			      int slot, unsigned long *flags,
+			      unsigned int *waiting_msgs)
+{
+	struct ipmi_recv_msg *msg;
+
+	if (intf->in_shutdown)
+		return;
+
+	if (!ent->inuse)
+		return;
+
+	if (timeout_period < ent->timeout) {
+		ent->timeout -= timeout_period;
+		(*waiting_msgs)++;
+		return;
+	}
+
+	if (ent->retries_left == 0) {
+		/* The message has used all its retries. */
+		ent->inuse = 0;
+		msg = ent->recv_msg;
+		list_add_tail(&msg->link, timeouts);
+		if (ent->broadcast)
+			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
+		else if (is_lan_addr(&ent->recv_msg->addr))
+			ipmi_inc_stat(intf, timed_out_lan_commands);
+		else
+			ipmi_inc_stat(intf, timed_out_ipmb_commands);
+	} else {
+		struct ipmi_smi_msg *smi_msg;
+		/* More retries, send again. */
+
+		(*waiting_msgs)++;
+
+		/*
+		 * Start with the max timer, set to normal timer after
+		 * the message is sent.
+		 */
+		ent->timeout = MAX_MSG_TIMEOUT;
+		ent->retries_left--;
+		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
+					    ent->seqid);
+		if (!smi_msg) {
+			if (is_lan_addr(&ent->recv_msg->addr))
+				ipmi_inc_stat(intf,
+					      dropped_rexmit_lan_commands);
+			else
+				ipmi_inc_stat(intf,
+					      dropped_rexmit_ipmb_commands);
+			return;
+		}
+
+		spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
+		/*
+		 * Send the new message.  We send with a zero
+		 * priority.  It timed out, I doubt time is that
+		 * critical now, and high priority messages are really
+		 * only for messages to the local MC, which don't get
+		 * resent.
+		 */
+		if (intf->handlers) {
+			if (is_lan_addr(&ent->recv_msg->addr))
+				ipmi_inc_stat(intf,
+					      retransmitted_lan_commands);
+			else
+				ipmi_inc_stat(intf,
+					      retransmitted_ipmb_commands);
+
+			smi_send(intf, intf->handlers, smi_msg, 0);
+		} else
+			ipmi_free_smi_msg(smi_msg);
+
+		spin_lock_irqsave(&intf->seq_lock, *flags);
+	}
+}
+
+static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
+					 unsigned long timeout_period)
+{
+	struct list_head     timeouts;
+	struct ipmi_recv_msg *msg, *msg2;
+	unsigned long        flags;
+	int                  i;
+	unsigned int         waiting_msgs = 0;
+
+	if (!intf->bmc_registered) {
+		kref_get(&intf->refcount);
+		if (!schedule_work(&intf->bmc_reg_work)) {
+			kref_put(&intf->refcount, intf_free);
+			waiting_msgs++;
+		}
+	}
+
+	/*
+	 * Go through the seq table and find any messages that
+	 * have timed out, putting them in the timeouts
+	 * list.
+	 */
+	INIT_LIST_HEAD(&timeouts);
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	if (intf->ipmb_maintenance_mode_timeout) {
+		if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
+			intf->ipmb_maintenance_mode_timeout = 0;
+		else
+			intf->ipmb_maintenance_mode_timeout -= timeout_period;
+	}
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+		check_msg_timeout(intf, &intf->seq_table[i],
+				  &timeouts, timeout_period, i,
+				  &flags, &waiting_msgs);
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	list_for_each_entry_safe(msg, msg2, &timeouts, link)
+		deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+	/*
+	 * Maintenance mode handling.  Check the timeout
+	 * optimistically before we claim the lock.  It may
+	 * mean a timeout gets missed occasionally, but that
+	 * only means the timeout gets extended by one period
+	 * in that case.  No big deal, and it avoids the lock
+	 * most of the time.
+	 */
+	if (intf->auto_maintenance_timeout > 0) {
+		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+		if (intf->auto_maintenance_timeout > 0) {
+			intf->auto_maintenance_timeout
+				-= timeout_period;
+			if (!intf->maintenance_mode
+			    && (intf->auto_maintenance_timeout <= 0)) {
+				intf->maintenance_mode_enable = false;
+				maintenance_mode_update(intf);
+			}
+		}
+		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+				       flags);
+	}
+
+	tasklet_schedule(&intf->recv_tasklet);
+
+	return waiting_msgs;
+}
+
+static void ipmi_request_event(struct ipmi_smi *intf)
+{
+	/* No event requests when in maintenance mode. */
+	if (intf->maintenance_mode_enable)
+		return;
+
+	if (!intf->in_shutdown)
+		intf->handlers->request_events(intf->send_info);
+}
+
+static struct timer_list ipmi_timer;
+
+static atomic_t stop_operation;
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+	struct ipmi_smi *intf;
+	int nt = 0, index;
+
+	if (atomic_read(&stop_operation))
+		return;
+
+	index = srcu_read_lock(&ipmi_interfaces_srcu);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		int lnt = 0;
+
+		if (atomic_read(&intf->event_waiters)) {
+			intf->ticks_to_req_ev--;
+			if (intf->ticks_to_req_ev == 0) {
+				ipmi_request_event(intf);
+				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+			}
+			lnt++;
+		}
+
+		lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+
+		lnt = !!lnt;
+		if (lnt != intf->last_needs_timer &&
+					intf->handlers->set_need_watch)
+			intf->handlers->set_need_watch(intf->send_info, lnt);
+		intf->last_needs_timer = lnt;
+
+		nt += lnt;
+	}
+	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+	if (nt)
+		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static void need_waiter(struct ipmi_smi *intf)
+{
+	/* Racy, but worst case we start the timer twice. */
+	if (!timer_pending(&ipmi_timer))
+		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&smi_msg_inuse_count);
+	kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+	struct ipmi_smi_msg *rv;
+	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+	if (rv) {
+		rv->done = free_smi_msg;
+		rv->user_data = NULL;
+		atomic_inc(&smi_msg_inuse_count);
+	}
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&recv_msg_inuse_count);
+	kfree(msg);
+}
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+	struct ipmi_recv_msg *rv;
+
+	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+	if (rv) {
+		rv->user = NULL;
+		rv->done = free_recv_msg;
+		atomic_inc(&recv_msg_inuse_count);
+	}
+	return rv;
+}
+
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+{
+	if (msg->user)
+		kref_put(&msg->user->refcount, free_user);
+	msg->done(msg);
+}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+					struct ipmi_addr *addr,
+					struct kernel_ipmi_msg *msg)
+{
+	struct ipmi_smi_msg  smi_msg;
+	struct ipmi_recv_msg recv_msg;
+	int rv;
+
+	smi_msg.done = dummy_smi_done_handler;
+	recv_msg.done = dummy_recv_done_handler;
+	atomic_add(2, &panic_done_count);
+	rv = i_ipmi_request(NULL,
+			    intf,
+			    addr,
+			    0,
+			    msg,
+			    intf,
+			    &smi_msg,
+			    &recv_msg,
+			    0,
+			    intf->addrinfo[0].address,
+			    intf->addrinfo[0].lun,
+			    0, 1); /* Don't retry, and don't wait. */
+	if (rv)
+		atomic_sub(2, &panic_done_count);
+	else if (intf->handlers->flush_messages)
+		intf->handlers->flush_messages(intf->send_info);
+
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll(intf);
+}
+
+static void event_receiver_fetcher(struct ipmi_smi *intf,
+				   struct ipmi_recv_msg *msg)
+{
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+		/* A get event receiver command, save it. */
+		intf->event_receiver = msg->msg.data[1];
+		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
+	}
+}
+
+static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+		/*
+		 * A get device id command, save if we are an event
+		 * receiver or generator.
+		 */
+		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
+		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
+	}
+}
+
+static void send_panic_events(struct ipmi_smi *intf, char *str)
+{
+	struct kernel_ipmi_msg msg;
+	unsigned char data[16];
+	struct ipmi_system_interface_addr *si;
+	struct ipmi_addr addr;
+	char *p = str;
+	struct ipmi_ipmb_addr *ipmb;
+	int j;
+
+	if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
+		return;
+
+	si = (struct ipmi_system_interface_addr *) &addr;
+	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si->channel = IPMI_BMC_CHANNEL;
+	si->lun = 0;
+
+	/* Fill in an event telling that we have failed. */
+	msg.netfn = 0x04; /* Sensor or Event. */
+	msg.cmd = 2; /* Platform event command. */
+	msg.data = data;
+	msg.data_len = 8;
+	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
+	data[1] = 0x03; /* This is for IPMI 1.0. */
+	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+	/*
+	 * Put a few breadcrumbs in.  Hopefully later we can add more things
+	 * to make the panic events more useful.
+	 */
+	if (str) {
+		data[3] = str[0];
+		data[6] = str[1];
+		data[7] = str[2];
+	}
+
+	/* Send the event announcing the panic. */
+	ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+	/*
+	 * On every interface, dump a bunch of OEM event holding the
+	 * string.
+	 */
+	if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
+		return;
+
+	/*
+	 * intf_num is used as an marker to tell if the
+	 * interface is valid.  Thus we need a read barrier to
+	 * make sure data fetched before checking intf_num
+	 * won't be used.
+	 */
+	smp_rmb();
+
+	/*
+	 * First job here is to figure out where to send the
+	 * OEM events.  There's no way in IPMI to send OEM
+	 * events using an event send command, so we have to
+	 * find the SEL to put them in and stick them in
+	 * there.
+	 */
+
+	/* Get capabilities from the get device id. */
+	intf->local_sel_device = 0;
+	intf->local_event_generator = 0;
+	intf->event_receiver = 0;
+
+	/* Request the device info from the local MC. */
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+	msg.data = NULL;
+	msg.data_len = 0;
+	intf->null_user_handler = device_id_fetcher;
+	ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+	if (intf->local_event_generator) {
+		/* Request the event receiver from the local MC. */
+		msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+		msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+		msg.data = NULL;
+		msg.data_len = 0;
+		intf->null_user_handler = event_receiver_fetcher;
+		ipmi_panic_request_and_wait(intf, &addr, &msg);
+	}
+	intf->null_user_handler = NULL;
+
+	/*
+	 * Validate the event receiver.  The low bit must not
+	 * be 1 (it must be a valid IPMB address), it cannot
+	 * be zero, and it must not be my address.
+	 */
+	if (((intf->event_receiver & 1) == 0)
+	    && (intf->event_receiver != 0)
+	    && (intf->event_receiver != intf->addrinfo[0].address)) {
+		/*
+		 * The event receiver is valid, send an IPMB
+		 * message.
+		 */
+		ipmb = (struct ipmi_ipmb_addr *) &addr;
+		ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+		ipmb->channel = 0; /* FIXME - is this right? */
+		ipmb->lun = intf->event_receiver_lun;
+		ipmb->slave_addr = intf->event_receiver;
+	} else if (intf->local_sel_device) {
+		/*
+		 * The event receiver was not valid (or was
+		 * me), but I am an SEL device, just dump it
+		 * in my SEL.
+		 */
+		si = (struct ipmi_system_interface_addr *) &addr;
+		si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+		si->channel = IPMI_BMC_CHANNEL;
+		si->lun = 0;
+	} else
+		return; /* No where to send the event. */
+
+	msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+	msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+	msg.data = data;
+	msg.data_len = 16;
+
+	j = 0;
+	while (*p) {
+		int size = strlen(p);
+
+		if (size > 11)
+			size = 11;
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0xf0; /* OEM event without timestamp. */
+		data[3] = intf->addrinfo[0].address;
+		data[4] = j++; /* sequence # */
+		/*
+		 * Always give 11 bytes, so strncpy will fill
+		 * it with zeroes for me.
+		 */
+		strncpy(data+5, p, 11);
+		p += size;
+
+		ipmi_panic_request_and_wait(intf, &addr, &msg);
+	}
+}
+
+static int has_panicked;
+
+static int panic_event(struct notifier_block *this,
+		       unsigned long         event,
+		       void                  *ptr)
+{
+	struct ipmi_smi *intf;
+	struct ipmi_user *user;
+
+	if (has_panicked)
+		return NOTIFY_DONE;
+	has_panicked = 1;
+
+	/* For every registered interface, set it to run to completion. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers || intf->intf_num == -1)
+			/* Interface is not ready. */
+			continue;
+
+		if (!intf->handlers->poll)
+			continue;
+
+		/*
+		 * If we were interrupted while locking xmit_msgs_lock or
+		 * waiting_rcv_msgs_lock, the corresponding list may be
+		 * corrupted.  In this case, drop items on the list for
+		 * the safety.
+		 */
+		if (!spin_trylock(&intf->xmit_msgs_lock)) {
+			INIT_LIST_HEAD(&intf->xmit_msgs);
+			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+		} else
+			spin_unlock(&intf->xmit_msgs_lock);
+
+		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+		else
+			spin_unlock(&intf->waiting_rcv_msgs_lock);
+
+		intf->run_to_completion = 1;
+		if (intf->handlers->set_run_to_completion)
+			intf->handlers->set_run_to_completion(intf->send_info,
+							      1);
+
+		list_for_each_entry_rcu(user, &intf->users, link) {
+			if (user->handler->ipmi_panic_handler)
+				user->handler->ipmi_panic_handler(
+					user->handler_data);
+		}
+
+		send_panic_events(intf, ptr);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+	.notifier_call	= panic_event,
+	.next		= NULL,
+	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+	int rv;
+
+	if (initialized)
+		return 0;
+
+	rv = driver_register(&ipmidriver.driver);
+	if (rv) {
+		pr_err(PFX "Could not register IPMI driver\n");
+		return rv;
+	}
+
+	pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
+
+	timer_setup(&ipmi_timer, ipmi_timeout, 0);
+	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+	initialized = 1;
+
+	return 0;
+}
+
+static int __init ipmi_init_msghandler_mod(void)
+{
+	ipmi_init_msghandler();
+	return 0;
+}
+
+static void __exit cleanup_ipmi(void)
+{
+	int count;
+
+	if (!initialized)
+		return;
+
+	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+
+	/*
+	 * This can't be called if any interfaces exist, so no worry
+	 * about shutting down the interfaces.
+	 */
+
+	/*
+	 * Tell the timer to stop, then wait for it to stop.  This
+	 * avoids problems with race conditions removing the timer
+	 * here.
+	 */
+	atomic_inc(&stop_operation);
+	del_timer_sync(&ipmi_timer);
+
+	driver_unregister(&ipmidriver.driver);
+
+	initialized = 0;
+
+	/* Check for buffer leaks. */
+	count = atomic_read(&smi_msg_inuse_count);
+	if (count != 0)
+		pr_warn(PFX "SMI message count %d at exit\n", count);
+	count = atomic_read(&recv_msg_inuse_count);
+	if (count != 0)
+		pr_warn(PFX "recv message count %d at exit\n", count);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+		   " interface.");
+MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 0000000..e965003
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ */
+
+#define pr_fmt(fmt)        "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+	u64			interface_id;
+	ipmi_smi_t		intf;
+	unsigned int		irq;
+
+	/**
+	 * We assume that there can only be one outstanding request, so
+	 * keep the pending message in cur_msg. We protect this from concurrent
+	 * updates through send & recv calls, (and consequently opal_msg, which
+	 * is in-use when cur_msg is set) with msg_lock
+	 */
+	spinlock_t		msg_lock;
+	struct ipmi_smi_msg	*cur_msg;
+	struct opal_ipmi_msg	*opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+
+	smi->intf = intf;
+	return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+		struct ipmi_smi_msg *msg, u8 completion_code)
+{
+	msg->rsp[0] = msg->data[0] | 0x4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = completion_code;
+	msg->rsp_size = 3;
+	ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+	struct opal_ipmi_msg *opal_msg;
+	unsigned long flags;
+	int comp, rc;
+	size_t size;
+
+	/* ensure data_len will fit in the opal_ipmi_msg buffer... */
+	if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+		comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+		goto err;
+	}
+
+	/* ... and that we at least have netfn and cmd bytes */
+	if (msg->data_size < 2) {
+		comp = IPMI_REQ_LEN_INVALID_ERR;
+		goto err;
+	}
+
+	spin_lock_irqsave(&smi->msg_lock, flags);
+
+	if (smi->cur_msg) {
+		comp = IPMI_NODE_BUSY_ERR;
+		goto err_unlock;
+	}
+
+	/* format our data for the OPAL API */
+	opal_msg = smi->opal_msg;
+	opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+	opal_msg->netfn = msg->data[0];
+	opal_msg->cmd = msg->data[1];
+	if (msg->data_size > 2)
+		memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+	/* data_size already includes the netfn and cmd bytes */
+	size = sizeof(*opal_msg) + msg->data_size - 2;
+
+	pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+			smi->interface_id, opal_msg, size);
+	rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+	pr_devel("%s:  -> %d\n", __func__, rc);
+
+	if (!rc) {
+		smi->cur_msg = msg;
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		return;
+	}
+
+	comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+	spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+	send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+	struct opal_ipmi_msg *opal_msg;
+	struct ipmi_smi_msg *msg;
+	unsigned long flags;
+	uint64_t size;
+	int rc;
+
+	pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+			smi->interface_id);
+
+	spin_lock_irqsave(&smi->msg_lock, flags);
+
+	if (!smi->cur_msg) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("no current message?\n");
+		return 0;
+	}
+
+	msg = smi->cur_msg;
+	opal_msg = smi->opal_msg;
+
+	size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+	rc = opal_ipmi_recv(smi->interface_id,
+			opal_msg,
+			&size);
+	size = be64_to_cpu(size);
+	pr_devel("%s:   -> %d (size %lld)\n", __func__,
+			rc, rc == 0 ? size : 0);
+	if (rc) {
+		/* If came via the poll, and response was not yet ready */
+		if (rc == OPAL_EMPTY) {
+			spin_unlock_irqrestore(&smi->msg_lock, flags);
+			return 0;
+		}
+
+		smi->cur_msg = NULL;
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
+		return 0;
+	}
+
+	if (size < sizeof(*opal_msg)) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("unexpected IPMI message size %lld\n", size);
+		return 0;
+	}
+
+	if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("unexpected IPMI message format (version %d)\n",
+				opal_msg->version);
+		return 0;
+	}
+
+	msg->rsp[0] = opal_msg->netfn;
+	msg->rsp[1] = opal_msg->cmd;
+	if (size > sizeof(*opal_msg))
+		memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+	msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+	smi->cur_msg = NULL;
+	spin_unlock_irqrestore(&smi->msg_lock, flags);
+	ipmi_smi_msg_received(smi->intf, msg);
+	return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+		bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+
+	ipmi_powernv_recv(smi);
+}
+
+static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+	.owner			= THIS_MODULE,
+	.start_processing	= ipmi_powernv_start_processing,
+	.sender			= ipmi_powernv_send,
+	.request_events		= ipmi_powernv_request_events,
+	.set_run_to_completion	= ipmi_powernv_set_run_to_completion,
+	.poll			= ipmi_powernv_poll,
+};
+
+static irqreturn_t ipmi_opal_event(int irq, void *data)
+{
+	struct ipmi_smi_powernv *smi = data;
+
+	ipmi_powernv_recv(smi);
+	return IRQ_HANDLED;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+	struct ipmi_smi_powernv *ipmi;
+	struct device *dev;
+	u32 prop;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -ENODEV;
+
+	dev = &pdev->dev;
+
+	ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+	if (!ipmi)
+		return -ENOMEM;
+
+	spin_lock_init(&ipmi->msg_lock);
+
+	rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+			&prop);
+	if (rc) {
+		dev_warn(dev, "No interface ID property\n");
+		goto err_free;
+	}
+	ipmi->interface_id = prop;
+
+	rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+	if (rc) {
+		dev_warn(dev, "No interrupts property\n");
+		goto err_free;
+	}
+
+	ipmi->irq = irq_of_parse_and_map(dev->of_node, 0);
+	if (!ipmi->irq) {
+		dev_info(dev, "Unable to map irq from device tree\n");
+		ipmi->irq = opal_event_request(prop);
+	}
+
+	rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+			 "opal-ipmi", ipmi);
+	if (rc) {
+		dev_warn(dev, "Unable to request irq\n");
+		goto err_dispose;
+	}
+
+	ipmi->opal_msg = devm_kmalloc(dev,
+			sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+			GFP_KERNEL);
+	if (!ipmi->opal_msg) {
+		rc = -ENOMEM;
+		goto err_unregister;
+	}
+
+	rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
+	if (rc) {
+		dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+		goto err_free_msg;
+	}
+
+	dev_set_drvdata(dev, ipmi);
+	return 0;
+
+err_free_msg:
+	devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+	free_irq(ipmi->irq, ipmi);
+err_dispose:
+	irq_dispose_mapping(ipmi->irq);
+err_free:
+	devm_kfree(dev, ipmi);
+	return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+	struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+	ipmi_unregister_smi(smi->intf);
+	free_irq(smi->irq, smi);
+	irq_dispose_mapping(smi->irq);
+
+	return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+	{ .compatible = "ibm,opal-ipmi" },
+	{ },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+	.driver = {
+		.name		= "ipmi-powernv",
+		.of_match_table	= ipmi_powernv_match,
+	},
+	.probe	= ipmi_powernv_probe,
+	.remove	= ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 0000000..f6e1941
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ *         Steven Dake <sdake@mvista.com>
+ *         Corey Minyard <cminyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/kdev_t.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+#define PFX "IPMI poweroff: "
+
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
+/* Definitions for controlling power off (if the system supports it).  It
+ * conveniently matches the IPMI chassis control values. */
+#define IPMI_CHASSIS_POWER_DOWN		0	/* power down, the default. */
+#define IPMI_CHASSIS_POWER_CYCLE	0x02	/* power cycle */
+
+/* the IPMI data command */
+static int poweroff_powercycle;
+
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready;
+static struct ipmi_user *ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(struct ipmi_user *user);
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+		return 0;
+
+	ipmi_po_smi_gone(ipmi_ifnum);
+	ipmi_po_new_smi(ifnum_to_use, NULL);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
+/* parameter definition to allow user to flag power cycle */
+module_param(poweroff_powercycle, int, 0644);
+MODULE_PARM_DESC(poweroff_powercycle,
+		 " Set to non-zero to enable power cycle instead of power"
+		 " down. Power cycle is contingent on hardware support,"
+		 " otherwise it defaults back to power down.");
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+static unsigned char ipmi_version;
+
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation.  The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&dummy_count);
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&dummy_count);
+}
+static struct ipmi_smi_msg halt_smi_msg = {
+	.done = dummy_smi_free
+};
+static struct ipmi_recv_msg halt_recv_msg = {
+	.done = dummy_recv_free
+};
+
+
+/*
+ * Code to send a message and wait for the response.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+	struct completion *comp = recv_msg->user_msg_data;
+
+	if (comp)
+		complete(comp);
+}
+
+static const struct ipmi_user_hndl ipmi_poweroff_handler = {
+	.ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(struct ipmi_user       *user,
+					  struct ipmi_addr       *addr,
+					  struct kernel_ipmi_msg *send_msg)
+{
+	int               rv;
+	struct completion comp;
+
+	init_completion(&comp);
+
+	rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
+				      &halt_smi_msg, &halt_recv_msg, 0);
+	if (rv)
+		return rv;
+
+	wait_for_completion(&comp);
+
+	return halt_recv_msg.msg.data[0];
+}
+
+/* Wait for message to complete, spinning. */
+static int ipmi_request_in_rc_mode(struct ipmi_user       *user,
+				   struct ipmi_addr       *addr,
+				   struct kernel_ipmi_msg *send_msg)
+{
+	int rv;
+
+	atomic_set(&dummy_count, 2);
+	rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+				      &halt_smi_msg, &halt_recv_msg, 0);
+	if (rv) {
+		atomic_set(&dummy_count, 0);
+		return rv;
+	}
+
+	/*
+	 * Spin until our message is done.
+	 */
+	while (atomic_read(&dummy_count) > 0) {
+		ipmi_poll_interface(user);
+		cpu_relax();
+	}
+
+	return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA			0x2c
+#define IPMI_ATCA_SET_POWER_CMD		0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD	0x01
+#define IPMI_PICMG_ID			0
+
+#define IPMI_NETFN_OEM				0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART		0x11
+#define IPMI_ATCA_PPS_IANA			"\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID		0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID	0x0051
+
+static void (*atca_oem_poweroff_hook)(struct ipmi_user *user);
+
+static void pps_poweroff_atca(struct ipmi_user *user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "PPS powerdown hook used");
+
+	send_msg.netfn = IPMI_NETFN_OEM;
+	send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+	send_msg.data = IPMI_ATCA_PPS_IANA;
+	send_msg.data_len = 3;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+		printk(KERN_ERR PFX "Unable to send ATCA ,"
+		       " IPMI error 0x%x\n", rv);
+	}
+	return;
+}
+
+static int ipmi_atca_detect(struct ipmi_user *user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	/*
+	 * Use get address info to check and see if we are ATCA
+	 */
+	send_msg.netfn = IPMI_NETFN_ATCA;
+	send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+	data[0] = IPMI_PICMG_ID;
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_wait_for_response(user,
+					    (struct ipmi_addr *) &smi_addr,
+					    &send_msg);
+
+	printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+	       mfg_id, prod_id);
+	if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+	    && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+		printk(KERN_INFO PFX
+		       "Installing Pigeon Point Systems Poweroff Hook\n");
+		atca_oem_poweroff_hook = pps_poweroff_atca;
+	}
+	return !rv;
+}
+
+static void ipmi_poweroff_atca(struct ipmi_user *user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[4];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "Powering down via ATCA power command\n");
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_ATCA;
+	send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+	data[0] = IPMI_PICMG_ID;
+	data[1] = 0; /* FRU id */
+	data[2] = 0; /* Power Level */
+	data[3] = 0; /* Don't change saved presets */
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	/*
+	 * At this point, the system may be shutting down, and most
+	 * serial drivers (if used) will have interrupts turned off
+	 * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+	 * return code
+	 */
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+		printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
+		       " IPMI error 0x%x\n", rv);
+		goto out;
+	}
+
+	if (atca_oem_poweroff_hook)
+		atca_oem_poweroff_hook(user);
+ out:
+	return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1				0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE		0x84
+#define OEM_GRP_CMD_SET_POWER_STATE		0x82
+#define IPMI_NETFN_OEM_8				0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL	0x80
+#define OEM_GRP_CMD_GET_SLOT_GA			0xa3
+#define IPMI_NETFN_SENSOR_EVT			0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER		0x01
+
+#define IPMI_CPI1_PRODUCT_ID		0x000157
+#define IPMI_CPI1_MANUFACTURER_ID	0x0108
+
+static int ipmi_cpi1_detect(struct ipmi_user *user)
+{
+	return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+		&& (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1(struct ipmi_user *user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct ipmi_ipmb_addr             ipmb_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+	int                               slot;
+	unsigned char                     hotswap_ipmb;
+	unsigned char                     aer_addr;
+	unsigned char                     aer_lun;
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
+
+	/*
+	 * Get IPMI ipmb address
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+	slot = halt_recv_msg.msg.data[1];
+	hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+	/*
+	 * Get active event receiver
+	 */
+	send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+	send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+	aer_addr = halt_recv_msg.msg.data[1];
+	aer_lun = halt_recv_msg.msg.data[2];
+
+	/*
+	 * Setup IPMB address target instead of local target
+	 */
+	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+	ipmb_addr.channel = 0;
+	ipmb_addr.slave_addr = aer_addr;
+	ipmb_addr.lun = aer_lun;
+
+	/*
+	 * Send request hotswap control to remove blade from dpv
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+	send_msg.data = &hotswap_ipmb;
+	send_msg.data_len = 1;
+	ipmi_request_in_rc_mode(user,
+				(struct ipmi_addr *) &ipmb_addr,
+				&send_msg);
+
+	/*
+	 * Set reset asserted
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+	send_msg.data = data;
+	data[0] = 1; /* Reset asserted state */
+	send_msg.data_len = 1;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+	send_msg.data = data;
+	data[0] = 1; /* Power down state */
+	send_msg.data_len = 1;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+
+ out:
+	return;
+}
+
+/*
+ * ipmi_dell_chassis_detect()
+ * Dell systems with IPMI < 1.5 don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+static int ipmi_dell_chassis_detect(struct ipmi_user *user)
+{
+	const char ipmi_version_major = ipmi_version & 0xF;
+	const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
+	const char mfr[3] = DELL_IANA_MFR_ID;
+	if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
+	    ipmi_version_major <= 1 &&
+	    ipmi_version_minor < 5)
+		return 1;
+	return 0;
+}
+
+/*
+ * ipmi_hp_chassis_detect()
+ * HP PA-RISC servers rp3410/rp3440, the C8000 workstation and the rx2600 and
+ * zx6000 machines support IPMI vers 1 and don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define HP_IANA_MFR_ID 0x0b
+#define HP_BMC_PROD_ID 0x8201
+static int ipmi_hp_chassis_detect(struct ipmi_user *user)
+{
+	if (mfg_id == HP_IANA_MFR_ID
+		&& prod_id == HP_BMC_PROD_ID
+		&& ipmi_version == 1)
+		return 1;
+	return 0;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST	0
+#define IPMI_CHASSIS_CONTROL_CMD	0x02
+
+static int ipmi_chassis_detect(struct ipmi_user *user)
+{
+	/* Chassis support, use it. */
+	return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis(struct ipmi_user *user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+ powercyclefailed:
+	printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
+		(poweroff_powercycle ? "cycle" : "down"));
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+	send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+	if (poweroff_powercycle)
+		data[0] = IPMI_CHASSIS_POWER_CYCLE;
+	else
+		data[0] = IPMI_CHASSIS_POWER_DOWN;
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv) {
+		if (poweroff_powercycle) {
+			/* power cycle failed, default to power down */
+			printk(KERN_ERR PFX "Unable to send chassis power " \
+			       "cycle message, IPMI error 0x%x\n", rv);
+			poweroff_powercycle = 0;
+			goto powercyclefailed;
+		}
+
+		printk(KERN_ERR PFX "Unable to send chassis power " \
+		       "down message, IPMI error 0x%x\n", rv);
+	}
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+	char *platform_type;
+	int  (*detect)(struct ipmi_user *user);
+	void (*poweroff_func)(struct ipmi_user *user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+	{ .platform_type	= "ATCA",
+	  .detect		= ipmi_atca_detect,
+	  .poweroff_func	= ipmi_poweroff_atca },
+	{ .platform_type	= "CPI1",
+	  .detect		= ipmi_cpi1_detect,
+	  .poweroff_func	= ipmi_poweroff_cpi1 },
+	{ .platform_type	= "chassis",
+	  .detect		= ipmi_dell_chassis_detect,
+	  .poweroff_func	= ipmi_poweroff_chassis },
+	{ .platform_type	= "chassis",
+	  .detect		= ipmi_hp_chassis_detect,
+	  .poweroff_func	= ipmi_poweroff_chassis },
+	/* Chassis should generally be last, other things should override
+	   it. */
+	{ .platform_type	= "chassis",
+	  .detect		= ipmi_chassis_detect,
+	  .poweroff_func	= ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS ARRAY_SIZE(poweroff_functions)
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function(void)
+{
+	if (!ready)
+		return;
+
+	/* Use run-to-completion mode, since interrupts may be off. */
+	specific_poweroff_func(ipmi_user);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+   will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num, struct device *device)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	int                               i;
+
+	if (ready)
+		return;
+
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+		return;
+
+	rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
+			      &ipmi_user);
+	if (rv) {
+		printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
+		       rv);
+		return;
+	}
+
+	ipmi_ifnum = if_num;
+
+	/*
+	 * Do a get device ide and store some results, since this is
+	 * used by several functions.
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+	send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_wait_for_response(ipmi_user,
+					    (struct ipmi_addr *) &smi_addr,
+					    &send_msg);
+	if (rv) {
+		printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
+		       " IPMI error 0x%x\n", rv);
+		goto out_err;
+	}
+
+	if (halt_recv_msg.msg.data_len < 12) {
+		printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
+		       " short, was %d bytes, needed %d bytes\n",
+		       halt_recv_msg.msg.data_len, 12);
+		goto out_err;
+	}
+
+	mfg_id = (halt_recv_msg.msg.data[7]
+		  | (halt_recv_msg.msg.data[8] << 8)
+		  | (halt_recv_msg.msg.data[9] << 16));
+	prod_id = (halt_recv_msg.msg.data[10]
+		   | (halt_recv_msg.msg.data[11] << 8));
+	capabilities = halt_recv_msg.msg.data[6];
+	ipmi_version = halt_recv_msg.msg.data[5];
+
+
+	/* Scan for a poweroff method */
+	for (i = 0; i < NUM_PO_FUNCS; i++) {
+		if (poweroff_functions[i].detect(ipmi_user))
+			goto found;
+	}
+
+ out_err:
+	printk(KERN_ERR PFX "Unable to find a poweroff function that"
+	       " will work, giving up\n");
+	ipmi_destroy_user(ipmi_user);
+	return;
+
+ found:
+	printk(KERN_INFO PFX "Found a %s style poweroff function\n",
+	       poweroff_functions[i].platform_type);
+	specific_poweroff_func = poweroff_functions[i].poweroff_func;
+	old_poweroff_func = pm_power_off;
+	pm_power_off = ipmi_poweroff_function;
+	ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+	if (!ready)
+		return;
+
+	if (ipmi_ifnum != if_num)
+		return;
+
+	ready = 0;
+	ipmi_destroy_user(ipmi_user);
+	pm_power_off = old_poweroff_func;
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_po_new_smi,
+	.smi_gone = ipmi_po_smi_gone
+};
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/sysctl.h>
+
+static struct ctl_table ipmi_table[] = {
+	{ .procname	= "poweroff_powercycle",
+	  .data		= &poweroff_powercycle,
+	  .maxlen	= sizeof(poweroff_powercycle),
+	  .mode		= 0644,
+	  .proc_handler	= proc_dointvec },
+	{ }
+};
+
+static struct ctl_table ipmi_dir_table[] = {
+	{ .procname	= "ipmi",
+	  .mode		= 0555,
+	  .child	= ipmi_table },
+	{ }
+};
+
+static struct ctl_table ipmi_root_table[] = {
+	{ .procname	= "dev",
+	  .mode		= 0555,
+	  .child	= ipmi_dir_table },
+	{ }
+};
+
+static struct ctl_table_header *ipmi_table_header;
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Startup and shutdown functions.
+ */
+static int __init ipmi_poweroff_init(void)
+{
+	int rv;
+
+	printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+	       " IPMI Powerdown via sys_reboot.\n");
+
+	if (poweroff_powercycle)
+		printk(KERN_INFO PFX "Power cycle is enabled.\n");
+
+#ifdef CONFIG_PROC_FS
+	ipmi_table_header = register_sysctl_table(ipmi_root_table);
+	if (!ipmi_table_header) {
+		printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
+		rv = -ENOMEM;
+		goto out_err;
+	}
+#endif
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
+	if (rv) {
+		unregister_sysctl_table(ipmi_table_header);
+		printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
+		goto out_err;
+	}
+
+ out_err:
+#endif
+	return rv;
+}
+
+#ifdef MODULE
+static void __exit ipmi_poweroff_cleanup(void)
+{
+	int rv;
+
+#ifdef CONFIG_PROC_FS
+	unregister_sysctl_table(ipmi_table_header);
+#endif
+
+	ipmi_smi_watcher_unregister(&smi_watcher);
+
+	if (ready) {
+		rv = ipmi_destroy_user(ipmi_user);
+		if (rv)
+			printk(KERN_ERR PFX "could not cleanup the IPMI"
+			       " user: 0x%x\n", rv);
+		pm_power_off = old_poweroff_func;
+	}
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
new file mode 100644
index 0000000..52f6152
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si.h
+ *
+ * Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
+ * etc) to the base ipmi system interface code.
+ */
+
+#include <linux/interrupt.h>
+#include "ipmi_si_sm.h"
+
+#define IPMI_IO_ADDR_SPACE  0
+#define IPMI_MEM_ADDR_SPACE 1
+
+#define DEFAULT_REGSPACING	1
+#define DEFAULT_REGSIZE		1
+
+#define DEVICE_NAME "ipmi_si"
+
+int ipmi_si_add_smi(struct si_sm_io *io);
+irqreturn_t ipmi_si_irq_handler(int irq, void *data);
+void ipmi_irq_start_cleanup(struct si_sm_io *io);
+int ipmi_std_irq_setup(struct si_sm_io *io);
+void ipmi_irq_finish_setup(struct si_sm_io *io);
+int ipmi_si_remove_by_dev(struct device *dev);
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+			    unsigned long addr);
+int ipmi_si_hardcode_find_bmc(void);
+void ipmi_si_platform_init(void);
+void ipmi_si_platform_shutdown(void);
+
+extern struct platform_driver ipmi_platform_driver;
+
+#ifdef CONFIG_PCI
+void ipmi_si_pci_init(void);
+void ipmi_si_pci_shutdown(void);
+#else
+static inline void ipmi_si_pci_init(void) { }
+static inline void ipmi_si_pci_shutdown(void) { }
+#endif
+#ifdef CONFIG_PARISC
+void ipmi_si_parisc_init(void);
+void ipmi_si_parisc_shutdown(void);
+#else
+static inline void ipmi_si_parisc_init(void) { }
+static inline void ipmi_si_parisc_shutdown(void) { }
+#endif
+
+int ipmi_si_port_setup(struct si_sm_io *io);
+int ipmi_si_mem_setup(struct si_sm_io *io);
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
new file mode 100644
index 0000000..10219f2
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/moduleparam.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hardcode: "
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
+ */
+
+#define SI_MAX_PARMS 4
+
+static char          *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char          si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int  ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int           irqs[SI_MAX_PARMS];
+static unsigned int num_irqs;
+static int           regspacings[SI_MAX_PARMS];
+static unsigned int num_regspacings;
+static int           regsizes[SI_MAX_PARMS];
+static unsigned int num_regsizes;
+static int           regshifts[SI_MAX_PARMS];
+static unsigned int num_regshifts;
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
+static unsigned int num_slave_addrs;
+
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+		 " interface separated by commas.  The types are 'kcs',"
+		 " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
+		 " the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " is in memory.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " is a port.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " has an interrupt.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+		 " and each successive register used by the interface.  For"
+		 " instance, if the start address is 0xca2 and the spacing"
+		 " is 2, then the second address is at 0xca4.  Defaults"
+		 " to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+		 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+		 " 16-bit, 32-bit, or 64-bit register.  Use this if you"
+		 " the 8-bit IPMI register has to be read from a larger"
+		 " register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+		 " IPMI register, in bits.  For instance, if the data"
+		 " is read from a 32-bit word and the IPMI data is in"
+		 " bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+		 " the controller.  Normally this is 0x20, but can be"
+		 " overridden by this parm.  This is an array indexed"
+		 " by interface number.");
+
+int ipmi_si_hardcode_find_bmc(void)
+{
+	int ret = -ENODEV;
+	int             i;
+	struct si_sm_io io;
+	char *str;
+
+	/* Parse out the si_type string into its components. */
+	str = si_type_str;
+	if (*str != '\0') {
+		for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+			si_type[i] = str;
+			str = strchr(str, ',');
+			if (str) {
+				*str = '\0';
+				str++;
+			} else {
+				break;
+			}
+		}
+	}
+
+	memset(&io, 0, sizeof(io));
+	for (i = 0; i < SI_MAX_PARMS; i++) {
+		if (!ports[i] && !addrs[i])
+			continue;
+
+		io.addr_source = SI_HARDCODED;
+		pr_info(PFX "probing via hardcoded address\n");
+
+		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+			io.si_type = SI_KCS;
+		} else if (strcmp(si_type[i], "smic") == 0) {
+			io.si_type = SI_SMIC;
+		} else if (strcmp(si_type[i], "bt") == 0) {
+			io.si_type = SI_BT;
+		} else {
+			pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+				i, si_type[i]);
+			continue;
+		}
+
+		if (ports[i]) {
+			/* An I/O port */
+			io.addr_data = ports[i];
+			io.addr_type = IPMI_IO_ADDR_SPACE;
+		} else if (addrs[i]) {
+			/* A memory port */
+			io.addr_data = addrs[i];
+			io.addr_type = IPMI_MEM_ADDR_SPACE;
+		} else {
+			pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+				i);
+			continue;
+		}
+
+		io.addr = NULL;
+		io.regspacing = regspacings[i];
+		if (!io.regspacing)
+			io.regspacing = DEFAULT_REGSPACING;
+		io.regsize = regsizes[i];
+		if (!io.regsize)
+			io.regsize = DEFAULT_REGSIZE;
+		io.regshift = regshifts[i];
+		io.irq = irqs[i];
+		if (io.irq)
+			io.irq_setup = ipmi_std_irq_setup;
+		io.slave_addr = slave_addrs[i];
+
+		ret = ipmi_si_add_smi(&io);
+	}
+	return ret;
+}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
new file mode 100644
index 0000000..a98ca42
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_hotmod.c
+ *
+ * Handling for dynamically adding/removing IPMI devices through
+ * a module parameter (and thus sysfs).
+ */
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hotmod: "
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+		 " Documentation/IPMI.txt in the kernel sources for the"
+		 " gory details.");
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]].  ops are:
+ *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ *   rsp=<regspacing>
+ *   rsi=<regsize>
+ *   rsh=<regshift>
+ *   irq=<irq>
+ *   ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+	const char *name;
+	const int  val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+	{ "add",	HM_ADD },
+	{ "remove",	HM_REMOVE },
+	{ NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+	{ "kcs",	SI_KCS },
+	{ "smic",	SI_SMIC },
+	{ "bt",		SI_BT },
+	{ NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+	{ "mem",	IPMI_MEM_ADDR_SPACE },
+	{ "i/o",	IPMI_IO_ADDR_SPACE },
+	{ NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, int *val, char *name,
+		     char **curr)
+{
+	char *s;
+	int  i;
+
+	s = strchr(*curr, ',');
+	if (!s) {
+		pr_warn(PFX "No hotmod %s given.\n", name);
+		return -EINVAL;
+	}
+	*s = '\0';
+	s++;
+	for (i = 0; v[i].name; i++) {
+		if (strcmp(*curr, v[i].name) == 0) {
+			*val = v[i].val;
+			*curr = s;
+			return 0;
+		}
+	}
+
+	pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
+	return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+			       const char *name, int *val)
+{
+	char *n;
+
+	if (strcmp(curr, name) == 0) {
+		if (!option) {
+			pr_warn(PFX "No option given for '%s'\n", curr);
+			return -EINVAL;
+		}
+		*val = simple_strtoul(option, &n, 0);
+		if ((*n != '\0') || (*option == '\0')) {
+			pr_warn(PFX "Bad option given for '%s'\n", curr);
+			return -EINVAL;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+	char *str = kstrdup(val, GFP_KERNEL);
+	int  rv;
+	char *next, *curr, *s, *n, *o;
+	enum hotmod_op op;
+	enum si_type si_type;
+	int  addr_space;
+	unsigned long addr;
+	int regspacing;
+	int regsize;
+	int regshift;
+	int irq;
+	int ipmb;
+	int ival;
+	int len;
+
+	if (!str)
+		return -ENOMEM;
+
+	/* Kill any trailing spaces, as we can get a "\n" from echo. */
+	len = strlen(str);
+	ival = len - 1;
+	while ((ival >= 0) && isspace(str[ival])) {
+		str[ival] = '\0';
+		ival--;
+	}
+
+	for (curr = str; curr; curr = next) {
+		regspacing = 1;
+		regsize = 1;
+		regshift = 0;
+		irq = 0;
+		ipmb = 0; /* Choose the default if not specified */
+
+		next = strchr(curr, ':');
+		if (next) {
+			*next = '\0';
+			next++;
+		}
+
+		rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+		if (rv)
+			break;
+		op = ival;
+
+		rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+		if (rv)
+			break;
+		si_type = ival;
+
+		rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+		if (rv)
+			break;
+
+		s = strchr(curr, ',');
+		if (s) {
+			*s = '\0';
+			s++;
+		}
+		addr = simple_strtoul(curr, &n, 0);
+		if ((*n != '\0') || (*curr == '\0')) {
+			pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
+			break;
+		}
+
+		while (s) {
+			curr = s;
+			s = strchr(curr, ',');
+			if (s) {
+				*s = '\0';
+				s++;
+			}
+			o = strchr(curr, '=');
+			if (o) {
+				*o = '\0';
+				o++;
+			}
+			rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "irq", &irq);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+
+			rv = -EINVAL;
+			pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
+			goto out;
+		}
+
+		if (op == HM_ADD) {
+			struct si_sm_io io;
+
+			memset(&io, 0, sizeof(io));
+			io.addr_source = SI_HOTMOD;
+			io.si_type = si_type;
+			io.addr_data = addr;
+			io.addr_type = addr_space;
+
+			io.addr = NULL;
+			io.regspacing = regspacing;
+			if (!io.regspacing)
+				io.regspacing = DEFAULT_REGSPACING;
+			io.regsize = regsize;
+			if (!io.regsize)
+				io.regsize = DEFAULT_REGSIZE;
+			io.regshift = regshift;
+			io.irq = irq;
+			if (io.irq)
+				io.irq_setup = ipmi_std_irq_setup;
+			io.slave_addr = ipmb;
+
+			rv = ipmi_si_add_smi(&io);
+			if (rv)
+				goto out;
+		} else {
+			ipmi_si_remove_by_data(addr_space, si_type, addr);
+		}
+	}
+	rv = len;
+out:
+	kfree(str);
+	return rv;
+}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 0000000..5faa917
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,2309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine.  It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include "ipmi_si.h"
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#define PFX "ipmi_si: "
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC	10000
+#define SI_USEC_PER_JIFFY	(1000000/HZ)
+#define SI_TIMEOUT_JIFFIES	(SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
+				      short timeout */
+
+enum si_intf_state {
+	SI_NORMAL,
+	SI_GETTING_FLAGS,
+	SI_GETTING_EVENTS,
+	SI_CLEARING_FLAGS,
+	SI_GETTING_MESSAGES,
+	SI_CHECKING_ENABLES,
+	SI_SETTING_ENABLES
+	/* FIXME - add watchdog stuff. */
+};
+
+/* Some BT-specific defines we need here. */
+#define IPMI_BT_INTMASK_REG		2
+#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT	2
+#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT	1
+
+static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
+
+static int initialized;
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+	/*
+	 * Number of times the driver requested a timer while an operation
+	 * was in progress.
+	 */
+	SI_STAT_short_timeouts = 0,
+
+	/*
+	 * Number of times the driver requested a timer while nothing was in
+	 * progress.
+	 */
+	SI_STAT_long_timeouts,
+
+	/* Number of times the interface was idle while being polled. */
+	SI_STAT_idles,
+
+	/* Number of interrupts the driver handled. */
+	SI_STAT_interrupts,
+
+	/* Number of time the driver got an ATTN from the hardware. */
+	SI_STAT_attentions,
+
+	/* Number of times the driver requested flags from the hardware. */
+	SI_STAT_flag_fetches,
+
+	/* Number of times the hardware didn't follow the state machine. */
+	SI_STAT_hosed_count,
+
+	/* Number of completed messages. */
+	SI_STAT_complete_transactions,
+
+	/* Number of IPMI events received from the hardware. */
+	SI_STAT_events,
+
+	/* Number of watchdog pretimeouts. */
+	SI_STAT_watchdog_pretimeouts,
+
+	/* Number of asynchronous messages received. */
+	SI_STAT_incoming_messages,
+
+
+	/* This *must* remain last, add new values above this. */
+	SI_NUM_STATS
+};
+
+struct smi_info {
+	int                    si_num;
+	struct ipmi_smi        *intf;
+	struct si_sm_data      *si_sm;
+	const struct si_sm_handlers *handlers;
+	spinlock_t             si_lock;
+	struct ipmi_smi_msg    *waiting_msg;
+	struct ipmi_smi_msg    *curr_msg;
+	enum si_intf_state     si_state;
+
+	/*
+	 * Used to handle the various types of I/O that can occur with
+	 * IPMI
+	 */
+	struct si_sm_io io;
+
+	/*
+	 * Per-OEM handler, called from handle_flags().  Returns 1
+	 * when handle_flags() needs to be re-run or 0 indicating it
+	 * set si_state itself.
+	 */
+	int (*oem_data_avail_handler)(struct smi_info *smi_info);
+
+	/*
+	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+	 * is set to hold the flags until we are done handling everything
+	 * from the flags.
+	 */
+#define RECEIVE_MSG_AVAIL	0x01
+#define EVENT_MSG_BUFFER_FULL	0x02
+#define WDT_PRE_TIMEOUT_INT	0x08
+#define OEM0_DATA_AVAIL     0x20
+#define OEM1_DATA_AVAIL     0x40
+#define OEM2_DATA_AVAIL     0x80
+#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
+			     OEM1_DATA_AVAIL | \
+			     OEM2_DATA_AVAIL)
+	unsigned char       msg_flags;
+
+	/* Does the BMC have an event buffer? */
+	bool		    has_event_buffer;
+
+	/*
+	 * If set to true, this will request events the next time the
+	 * state machine is idle.
+	 */
+	atomic_t            req_events;
+
+	/*
+	 * If true, run the state machine to completion on every send
+	 * call.  Generally used after a panic to make sure stuff goes
+	 * out.
+	 */
+	bool                run_to_completion;
+
+	/* The timer for this si. */
+	struct timer_list   si_timer;
+
+	/* This flag is set, if the timer can be set */
+	bool		    timer_can_start;
+
+	/* This flag is set, if the timer is running (timer_pending() isn't enough) */
+	bool		    timer_running;
+
+	/* The time (in jiffies) the last timeout occurred at. */
+	unsigned long       last_timeout_jiffies;
+
+	/* Are we waiting for the events, pretimeouts, received msgs? */
+	atomic_t            need_watch;
+
+	/*
+	 * The driver will disable interrupts when it gets into a
+	 * situation where it cannot handle messages due to lack of
+	 * memory.  Once that situation clears up, it will re-enable
+	 * interrupts.
+	 */
+	bool interrupt_disabled;
+
+	/*
+	 * Does the BMC support events?
+	 */
+	bool supports_event_msg_buff;
+
+	/*
+	 * Can we disable interrupts the global enables receive irq
+	 * bit?  There are currently two forms of brokenness, some
+	 * systems cannot disable the bit (which is technically within
+	 * the spec but a bad idea) and some systems have the bit
+	 * forced to zero even though interrupts work (which is
+	 * clearly outside the spec).  The next bool tells which form
+	 * of brokenness is present.
+	 */
+	bool cannot_disable_irq;
+
+	/*
+	 * Some systems are broken and cannot set the irq enable
+	 * bit, even if they support interrupts.
+	 */
+	bool irq_enable_broken;
+
+	/*
+	 * Did we get an attention that we did not handle?
+	 */
+	bool got_attn;
+
+	/* From the get device id response... */
+	struct ipmi_device_id device_id;
+
+	/* Default driver model device. */
+	struct platform_device *pdev;
+
+	/* Have we added the device group to the device? */
+	bool dev_group_added;
+
+	/* Have we added the platform device? */
+	bool pdev_registered;
+
+	/* Counters and things for the proc filesystem. */
+	atomic_t stats[SI_NUM_STATS];
+
+	struct task_struct *thread;
+
+	struct list_head link;
+};
+
+#define smi_inc_stat(smi, stat) \
+	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+#define IPMI_MAX_INTFS 4
+static int force_kipmid[IPMI_MAX_INTFS];
+static int num_force_kipmid;
+
+static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
+static int num_max_busy_us;
+
+static bool unload_when_empty = true;
+
+static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *smi_info);
+static void cleanup_ipmi_si(void);
+
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+	struct timespec64 t;
+
+	getnstimeofday64(&t);
+	pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
+static int register_xaction_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
+}
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+			     struct ipmi_smi_msg *msg)
+{
+	/* Deliver the message to the upper layer. */
+	ipmi_smi_msg_received(smi_info->intf, msg);
+}
+
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
+{
+	struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+		cCode = IPMI_ERR_UNSPECIFIED;
+	/* else use it as is */
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = cCode;
+	msg->rsp_size = 3;
+
+	smi_info->curr_msg = NULL;
+	deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+	int              rv;
+
+	if (!smi_info->waiting_msg) {
+		smi_info->curr_msg = NULL;
+		rv = SI_SM_IDLE;
+	} else {
+		int err;
+
+		smi_info->curr_msg = smi_info->waiting_msg;
+		smi_info->waiting_msg = NULL;
+		debug_timestamp("Start2");
+		err = atomic_notifier_call_chain(&xaction_notifier_list,
+				0, smi_info);
+		if (err & NOTIFY_STOP_MASK) {
+			rv = SI_SM_CALL_WITHOUT_DELAY;
+			goto out;
+		}
+		err = smi_info->handlers->start_transaction(
+			smi_info->si_sm,
+			smi_info->curr_msg->data,
+			smi_info->curr_msg->data_size);
+		if (err)
+			return_hosed_msg(smi_info, err);
+
+		rv = SI_SM_CALL_WITHOUT_DELAY;
+	}
+out:
+	return rv;
+}
+
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+	if (!smi_info->timer_can_start)
+		return;
+	smi_info->last_timeout_jiffies = jiffies;
+	mod_timer(&smi_info->si_timer, new_val);
+	smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+			  unsigned int size)
+{
+	smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+	if (smi_info->thread)
+		wake_up_process(smi_info->thread);
+
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info)
+{
+	unsigned char msg[2];
+
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+	start_new_msg(smi_info, msg, 2);
+	smi_info->si_state = SI_CHECKING_ENABLES;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+	unsigned char msg[3];
+
+	/* Make sure the watchdog pre-timeout flag is not set at startup. */
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+
+	start_new_msg(smi_info, msg, 3);
+	smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+	smi_info->curr_msg->data_size = 2;
+
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
+	smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+	smi_info->curr_msg->data_size = 2;
+
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
+	smi_info->si_state = SI_GETTING_EVENTS;
+}
+
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory.  Once we have some
+ * memory, we will re-enable the interrupt.
+ *
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
+ */
+static inline bool disable_si_irq(struct smi_info *smi_info)
+{
+	if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+		smi_info->interrupt_disabled = true;
+		start_check_enables(smi_info);
+		return true;
+	}
+	return false;
+}
+
+static inline bool enable_si_irq(struct smi_info *smi_info)
+{
+	if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
+		smi_info->interrupt_disabled = false;
+		start_check_enables(smi_info);
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Allocate a message.  If unable to allocate, start the interrupt
+ * disable process and return NULL.  If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		if (!disable_si_irq(smi_info))
+			smi_info->si_state = SI_NORMAL;
+	} else if (enable_si_irq(smi_info)) {
+		ipmi_free_smi_msg(msg);
+		msg = NULL;
+	}
+	return msg;
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+retry:
+	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+		/* Watchdog pre-timeout */
+		smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+		start_clear_flags(smi_info);
+		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+		ipmi_smi_watchdog_pretimeout(smi_info->intf);
+	} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+		/* Messages available. */
+		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+		if (!smi_info->curr_msg)
+			return;
+
+		start_getting_msg_queue(smi_info);
+	} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+		/* Events available. */
+		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+		if (!smi_info->curr_msg)
+			return;
+
+		start_getting_events(smi_info);
+	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+		   smi_info->oem_data_avail_handler) {
+		if (smi_info->oem_data_avail_handler(smi_info))
+			goto retry;
+	} else
+		smi_info->si_state = SI_NORMAL;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+			     IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+				 bool *irq_on)
+{
+	u8 enables = 0;
+
+	if (smi_info->supports_event_msg_buff)
+		enables |= IPMI_BMC_EVT_MSG_BUFF;
+
+	if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
+	     smi_info->cannot_disable_irq) &&
+	    !smi_info->irq_enable_broken)
+		enables |= IPMI_BMC_RCV_MSG_INTR;
+
+	if (smi_info->supports_event_msg_buff &&
+	    smi_info->io.irq && !smi_info->interrupt_disabled &&
+	    !smi_info->irq_enable_broken)
+		enables |= IPMI_BMC_EVT_MSG_INTR;
+
+	*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+	return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+	u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+	irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+	if ((bool)irqstate == irq_on)
+		return;
+
+	if (irq_on)
+		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+				     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+	else
+		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg;
+
+	debug_timestamp("Done");
+	switch (smi_info->si_state) {
+	case SI_NORMAL:
+		if (!smi_info->curr_msg)
+			break;
+
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		deliver_recv_msg(smi_info, msg);
+		break;
+
+	case SI_GETTING_FLAGS:
+	{
+		unsigned char msg[4];
+		unsigned int  len;
+
+		/* We got the flags from the SMI, now handle them. */
+		len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0) {
+			/* Error fetching flags, just give up for now. */
+			smi_info->si_state = SI_NORMAL;
+		} else if (len < 4) {
+			/*
+			 * Hmm, no flags.  That's technically illegal, but
+			 * don't use uninitialized data.
+			 */
+			smi_info->si_state = SI_NORMAL;
+		} else {
+			smi_info->msg_flags = msg[3];
+			handle_flags(smi_info);
+		}
+		break;
+	}
+
+	case SI_CLEARING_FLAGS:
+	{
+		unsigned char msg[3];
+
+		/* We cleared the flags. */
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+		if (msg[2] != 0) {
+			/* Error clearing flags */
+			dev_warn(smi_info->io.dev,
+				 "Error clearing flags: %2.2x\n", msg[2]);
+		}
+		smi_info->si_state = SI_NORMAL;
+		break;
+	}
+
+	case SI_GETTING_EVENTS:
+	{
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		if (msg->rsp[2] != 0) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the event flag. */
+			smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(smi_info);
+		} else {
+			smi_inc_stat(smi_info, events);
+
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
+			handle_flags(smi_info);
+
+			deliver_recv_msg(smi_info, msg);
+		}
+		break;
+	}
+
+	case SI_GETTING_MESSAGES:
+	{
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		if (msg->rsp[2] != 0) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(smi_info);
+		} else {
+			smi_inc_stat(smi_info, incoming_messages);
+
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
+			handle_flags(smi_info);
+
+			deliver_recv_msg(smi_info, msg);
+		}
+		break;
+	}
+
+	case SI_CHECKING_ENABLES:
+	{
+		unsigned char msg[4];
+		u8 enables;
+		bool irq_on;
+
+		/* We got the flags from the SMI, now handle them. */
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0) {
+			dev_warn(smi_info->io.dev,
+				 "Couldn't get irq info: %x.\n", msg[2]);
+			dev_warn(smi_info->io.dev,
+				 "Maybe ok, but ipmi might run very slowly.\n");
+			smi_info->si_state = SI_NORMAL;
+			break;
+		}
+		enables = current_global_enables(smi_info, 0, &irq_on);
+		if (smi_info->io.si_type == SI_BT)
+			/* BT has its own interrupt enable bit. */
+			check_bt_irq(smi_info, irq_on);
+		if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+			/* Enables are not correct, fix them. */
+			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+			msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
+			smi_info->handlers->start_transaction(
+				smi_info->si_sm, msg, 3);
+			smi_info->si_state = SI_SETTING_ENABLES;
+		} else if (smi_info->supports_event_msg_buff) {
+			smi_info->curr_msg = ipmi_alloc_smi_msg();
+			if (!smi_info->curr_msg) {
+				smi_info->si_state = SI_NORMAL;
+				break;
+			}
+			start_getting_events(smi_info);
+		} else {
+			smi_info->si_state = SI_NORMAL;
+		}
+		break;
+	}
+
+	case SI_SETTING_ENABLES:
+	{
+		unsigned char msg[4];
+
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0)
+			dev_warn(smi_info->io.dev,
+				 "Could not set the global enables: 0x%x.\n",
+				 msg[2]);
+
+		if (smi_info->supports_event_msg_buff) {
+			smi_info->curr_msg = ipmi_alloc_smi_msg();
+			if (!smi_info->curr_msg) {
+				smi_info->si_state = SI_NORMAL;
+				break;
+			}
+			start_getting_events(smi_info);
+		} else {
+			smi_info->si_state = SI_NORMAL;
+		}
+		break;
+	}
+	}
+}
+
+/*
+ * Called on timeouts and events.  Timeouts should pass the elapsed
+ * time, interrupts should pass in zero.  Must be called with
+ * si_lock held and interrupts disabled.
+ */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+					   int time)
+{
+	enum si_sm_result si_sm_result;
+
+restart:
+	/*
+	 * There used to be a loop here that waited a little while
+	 * (around 25us) before giving up.  That turned out to be
+	 * pointless, the minimum delays I was seeing were in the 300us
+	 * range, which is far too long to wait in an interrupt.  So
+	 * we just run until the state machine tells us something
+	 * happened or it needs a delay.
+	 */
+	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+	time = 0;
+	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+
+	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+		smi_inc_stat(smi_info, complete_transactions);
+
+		handle_transaction_done(smi_info);
+		goto restart;
+	} else if (si_sm_result == SI_SM_HOSED) {
+		smi_inc_stat(smi_info, hosed_count);
+
+		/*
+		 * Do the before return_hosed_msg, because that
+		 * releases the lock.
+		 */
+		smi_info->si_state = SI_NORMAL;
+		if (smi_info->curr_msg != NULL) {
+			/*
+			 * If we were handling a user message, format
+			 * a response to send to the upper layer to
+			 * tell it about the error.
+			 */
+			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+		}
+		goto restart;
+	}
+
+	/*
+	 * We prefer handling attn over new messages.  But don't do
+	 * this if there is not yet an upper layer to handle anything.
+	 */
+	if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
+		unsigned char msg[2];
+
+		if (smi_info->si_state != SI_NORMAL) {
+			/*
+			 * We got an ATTN, but we are doing something else.
+			 * Handle the ATTN later.
+			 */
+			smi_info->got_attn = true;
+		} else {
+			smi_info->got_attn = false;
+			smi_inc_stat(smi_info, attentions);
+
+			/*
+			 * Got a attn, send down a get message flags to see
+			 * what's causing it.  It would be better to handle
+			 * this in the upper layer, but due to the way
+			 * interrupts work with the SMI, that's not really
+			 * possible.
+			 */
+			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+			msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+			start_new_msg(smi_info, msg, 2);
+			smi_info->si_state = SI_GETTING_FLAGS;
+			goto restart;
+		}
+	}
+
+	/* If we are currently idle, try to start the next message. */
+	if (si_sm_result == SI_SM_IDLE) {
+		smi_inc_stat(smi_info, idles);
+
+		si_sm_result = start_next_msg(smi_info);
+		if (si_sm_result != SI_SM_IDLE)
+			goto restart;
+	}
+
+	if ((si_sm_result == SI_SM_IDLE)
+	    && (atomic_read(&smi_info->req_events))) {
+		/*
+		 * We are idle and the upper layer requested that I fetch
+		 * events, so do so.
+		 */
+		atomic_set(&smi_info->req_events, 0);
+
+		/*
+		 * Take this opportunity to check the interrupt and
+		 * message enable state for the BMC.  The BMC can be
+		 * asynchronously reset, and may thus get interrupts
+		 * disable and messages disabled.
+		 */
+		if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
+			start_check_enables(smi_info);
+		} else {
+			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+			if (!smi_info->curr_msg)
+				goto out;
+
+			start_getting_events(smi_info);
+		}
+		goto restart;
+	}
+
+	if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+		/* Ok it if fails, the timer will just go off. */
+		if (del_timer(&smi_info->si_timer))
+			smi_info->timer_running = false;
+	}
+
+out:
+	return si_sm_result;
+}
+
+static void check_start_timer_thread(struct smi_info *smi_info)
+{
+	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+		smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+		if (smi_info->thread)
+			wake_up_process(smi_info->thread);
+
+		start_next_msg(smi_info);
+		smi_event_handler(smi_info, 0);
+	}
+}
+
+static void flush_messages(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+	enum si_sm_result result;
+
+	/*
+	 * Currently, this function is called only in run-to-completion
+	 * mode.  This means we are single-threaded, no need for locks.
+	 */
+	result = smi_event_handler(smi_info, 0);
+	while (result != SI_SM_IDLE) {
+		udelay(SI_SHORT_TIMEOUT_USEC);
+		result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
+	}
+}
+
+static void sender(void                *send_info,
+		   struct ipmi_smi_msg *msg)
+{
+	struct smi_info   *smi_info = send_info;
+	unsigned long     flags;
+
+	debug_timestamp("Enqueue");
+
+	if (smi_info->run_to_completion) {
+		/*
+		 * If we are running to completion, start it.  Upper
+		 * layer will call flush_messages to clear it out.
+		 */
+		smi_info->waiting_msg = msg;
+		return;
+	}
+
+	spin_lock_irqsave(&smi_info->si_lock, flags);
+	/*
+	 * The following two lines don't need to be under the lock for
+	 * the lock's sake, but they do need SMP memory barriers to
+	 * avoid getting things out of order.  We are already claiming
+	 * the lock, anyway, so just do it under the lock to avoid the
+	 * ordering problem.
+	 */
+	BUG_ON(smi_info->waiting_msg);
+	smi_info->waiting_msg = msg;
+	check_start_timer_thread(smi_info);
+	spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void set_run_to_completion(void *send_info, bool i_run_to_completion)
+{
+	struct smi_info   *smi_info = send_info;
+
+	smi_info->run_to_completion = i_run_to_completion;
+	if (i_run_to_completion)
+		flush_messages(smi_info);
+}
+
+/*
+ * Use -1 in the nsec value of the busy waiting timespec to tell that
+ * we are spinning in kipmid looking for something and not delaying
+ * between checks
+ */
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
+{
+	ts->tv_nsec = -1;
+}
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
+{
+	return ts->tv_nsec != -1;
+}
+
+static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+					const struct smi_info *smi_info,
+					struct timespec64 *busy_until)
+{
+	unsigned int max_busy_us = 0;
+
+	if (smi_info->si_num < num_max_busy_us)
+		max_busy_us = kipmid_max_busy_us[smi_info->si_num];
+	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+		ipmi_si_set_not_busy(busy_until);
+	else if (!ipmi_si_is_busy(busy_until)) {
+		getnstimeofday64(busy_until);
+		timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+	} else {
+		struct timespec64 now;
+
+		getnstimeofday64(&now);
+		if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
+			ipmi_si_set_not_busy(busy_until);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard.  This is only enabled for systems
+ * that are not BT and do not have interrupts.  It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled).  See the paragraph on kimid_max_busy_us in
+ * Documentation/IPMI.txt for details.
+ */
+static int ipmi_thread(void *data)
+{
+	struct smi_info *smi_info = data;
+	unsigned long flags;
+	enum si_sm_result smi_result;
+	struct timespec64 busy_until;
+
+	ipmi_si_set_not_busy(&busy_until);
+	set_user_nice(current, MAX_NICE);
+	while (!kthread_should_stop()) {
+		int busy_wait;
+
+		spin_lock_irqsave(&(smi_info->si_lock), flags);
+		smi_result = smi_event_handler(smi_info, 0);
+
+		/*
+		 * If the driver is doing something, there is a possible
+		 * race with the timer.  If the timer handler see idle,
+		 * and the thread here sees something else, the timer
+		 * handler won't restart the timer even though it is
+		 * required.  So start it here if necessary.
+		 */
+		if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+			smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+		busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+						  &busy_until);
+		if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+			; /* do nothing */
+		else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
+			schedule();
+		else if (smi_result == SI_SM_IDLE) {
+			if (atomic_read(&smi_info->need_watch)) {
+				schedule_timeout_interruptible(100);
+			} else {
+				/* Wait to be woken up when we are needed. */
+				__set_current_state(TASK_INTERRUPTIBLE);
+				schedule();
+			}
+		} else
+			schedule_timeout_interruptible(1);
+	}
+	return 0;
+}
+
+
+static void poll(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+	unsigned long flags = 0;
+	bool run_to_completion = smi_info->run_to_completion;
+
+	/*
+	 * Make sure there is some delay in the poll loop so we can
+	 * drive time forward and timeout things.
+	 */
+	udelay(10);
+	if (!run_to_completion)
+		spin_lock_irqsave(&smi_info->si_lock, flags);
+	smi_event_handler(smi_info, 10);
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void request_events(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+
+	if (!smi_info->has_event_buffer)
+		return;
+
+	atomic_set(&smi_info->req_events, 1);
+}
+
+static void set_need_watch(void *send_info, bool enable)
+{
+	struct smi_info *smi_info = send_info;
+	unsigned long flags;
+
+	atomic_set(&smi_info->need_watch, enable);
+	spin_lock_irqsave(&smi_info->si_lock, flags);
+	check_start_timer_thread(smi_info);
+	spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void smi_timeout(struct timer_list *t)
+{
+	struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
+	enum si_sm_result smi_result;
+	unsigned long     flags;
+	unsigned long     jiffies_now;
+	long              time_diff;
+	long		  timeout;
+
+	spin_lock_irqsave(&(smi_info->si_lock), flags);
+	debug_timestamp("Timer");
+
+	jiffies_now = jiffies;
+	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+		     * SI_USEC_PER_JIFFY);
+	smi_result = smi_event_handler(smi_info, time_diff);
+
+	if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+		/* Running with interrupts, only do long timeouts. */
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
+		smi_inc_stat(smi_info, long_timeouts);
+		goto do_mod_timer;
+	}
+
+	/*
+	 * If the state machine asks for a short delay, then shorten
+	 * the timer timeout.
+	 */
+	if (smi_result == SI_SM_CALL_WITH_DELAY) {
+		smi_inc_stat(smi_info, short_timeouts);
+		timeout = jiffies + 1;
+	} else {
+		smi_inc_stat(smi_info, long_timeouts);
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
+	}
+
+do_mod_timer:
+	if (smi_result != SI_SM_IDLE)
+		smi_mod_timer(smi_info, timeout);
+	else
+		smi_info->timer_running = false;
+	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+irqreturn_t ipmi_si_irq_handler(int irq, void *data)
+{
+	struct smi_info *smi_info = data;
+	unsigned long   flags;
+
+	if (smi_info->io.si_type == SI_BT)
+		/* We need to clear the IRQ flag for the BT interface. */
+		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+				     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+				     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+
+	spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+	smi_inc_stat(smi_info, interrupts);
+
+	debug_timestamp("Interrupt");
+
+	smi_event_handler(smi_info, 0);
+	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+	return IRQ_HANDLED;
+}
+
+static int smi_start_processing(void            *send_info,
+				struct ipmi_smi *intf)
+{
+	struct smi_info *new_smi = send_info;
+	int             enable = 0;
+
+	new_smi->intf = intf;
+
+	/* Set up the timer that drives the interface. */
+	timer_setup(&new_smi->si_timer, smi_timeout, 0);
+	new_smi->timer_can_start = true;
+	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
+	/* Try to claim any interrupts. */
+	if (new_smi->io.irq_setup) {
+		new_smi->io.irq_handler_data = new_smi;
+		new_smi->io.irq_setup(&new_smi->io);
+	}
+
+	/*
+	 * Check if the user forcefully enabled the daemon.
+	 */
+	if (new_smi->si_num < num_force_kipmid)
+		enable = force_kipmid[new_smi->si_num];
+	/*
+	 * The BT interface is efficient enough to not need a thread,
+	 * and there is no need for a thread if we have interrupts.
+	 */
+	else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+		enable = 1;
+
+	if (enable) {
+		new_smi->thread = kthread_run(ipmi_thread, new_smi,
+					      "kipmi%d", new_smi->si_num);
+		if (IS_ERR(new_smi->thread)) {
+			dev_notice(new_smi->io.dev, "Could not start"
+				   " kernel thread due to error %ld, only using"
+				   " timers to drive the interface\n",
+				   PTR_ERR(new_smi->thread));
+			new_smi->thread = NULL;
+		}
+	}
+
+	return 0;
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct smi_info *smi = send_info;
+
+	data->addr_src = smi->io.addr_source;
+	data->dev = smi->io.dev;
+	data->addr_info = smi->io.addr_info;
+	get_device(smi->io.dev);
+
+	return 0;
+}
+
+static void set_maintenance_mode(void *send_info, bool enable)
+{
+	struct smi_info   *smi_info = send_info;
+
+	if (!enable)
+		atomic_set(&smi_info->req_events, 0);
+}
+
+static void shutdown_smi(void *send_info);
+static const struct ipmi_smi_handlers handlers = {
+	.owner                  = THIS_MODULE,
+	.start_processing       = smi_start_processing,
+	.shutdown               = shutdown_smi,
+	.get_smi_info		= get_smi_info,
+	.sender			= sender,
+	.request_events		= request_events,
+	.set_need_watch		= set_need_watch,
+	.set_maintenance_mode   = set_maintenance_mode,
+	.set_run_to_completion  = set_run_to_completion,
+	.flush_messages		= flush_messages,
+	.poll			= poll,
+};
+
+static LIST_HEAD(smi_infos);
+static DEFINE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
+
+static const char * const addr_space_to_str[] = { "i/o", "mem" };
+
+module_param_array(force_kipmid, int, &num_force_kipmid, 0);
+MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
+		 " disabled(0).  Normally the IPMI driver auto-detects"
+		 " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, bool, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+		 " specified or found, default is 1.  Setting to 0"
+		 " is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+		 "Max time (in microseconds) to busy-wait for IPMI data before"
+		 " sleeping. 0 (default) means to wait forever. Set to 100-500"
+		 " if kipmid is using up a lot of CPU time.");
+
+void ipmi_irq_finish_setup(struct si_sm_io *io)
+{
+	if (io->si_type == SI_BT)
+		/* Enable the interrupt in the BT interface. */
+		io->outputb(io, IPMI_BT_INTMASK_REG,
+			    IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+}
+
+void ipmi_irq_start_cleanup(struct si_sm_io *io)
+{
+	if (io->si_type == SI_BT)
+		/* Disable the interrupt in the BT interface. */
+		io->outputb(io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void std_irq_cleanup(struct si_sm_io *io)
+{
+	ipmi_irq_start_cleanup(io);
+	free_irq(io->irq, io->irq_handler_data);
+}
+
+int ipmi_std_irq_setup(struct si_sm_io *io)
+{
+	int rv;
+
+	if (!io->irq)
+		return 0;
+
+	rv = request_irq(io->irq,
+			 ipmi_si_irq_handler,
+			 IRQF_SHARED,
+			 DEVICE_NAME,
+			 io->irq_handler_data);
+	if (rv) {
+		dev_warn(io->dev, "%s unable to claim interrupt %d,"
+			 " running polled\n",
+			 DEVICE_NAME, io->irq);
+		io->irq = 0;
+	} else {
+		io->irq_cleanup = std_irq_cleanup;
+		ipmi_irq_finish_setup(io);
+		dev_info(io->dev, "Using irq %d\n", io->irq);
+	}
+
+	return rv;
+}
+
+static int wait_for_msg_done(struct smi_info *smi_info)
+{
+	enum si_sm_result     smi_result;
+
+	smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+	for (;;) {
+		if (smi_result == SI_SM_CALL_WITH_DELAY ||
+		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+			schedule_timeout_uninterruptible(1);
+			smi_result = smi_info->handlers->event(
+				smi_info->si_sm, jiffies_to_usecs(1));
+		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+			smi_result = smi_info->handlers->event(
+				smi_info->si_sm, 0);
+		} else
+			break;
+	}
+	if (smi_result == SI_SM_HOSED)
+		/*
+		 * We couldn't get the state machine to run, so whatever's at
+		 * the port is probably not an IPMI SMI interface.
+		 */
+		return -ENODEV;
+
+	return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+	unsigned char         msg[2];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv = 0;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	/*
+	 * Do a Get Device ID command, since it comes back with some
+	 * useful info.
+	 */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_DEVICE_ID_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv)
+		goto out;
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	/* Check and record info from the get device id, in case we need it. */
+	rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+			resp + 2, resp_len - 2, &smi_info->device_id);
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+static int get_global_enables(struct smi_info *smi_info, u8 *enables)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		dev_warn(smi_info->io.dev,
+			 "Error getting response from get global enables command: %d\n",
+			 rv);
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 4 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
+			resp[2] != 0) {
+		dev_warn(smi_info->io.dev,
+			 "Invalid return from get global enables command: %ld %x %x %x\n",
+			 resp_len, resp[0], resp[1], resp[2]);
+		rv = -EINVAL;
+		goto out;
+	} else {
+		*enables = resp[3];
+	}
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+/*
+ * Returns 1 if it gets an error from the command.
+ */
+static int set_global_enables(struct smi_info *smi_info, u8 enables)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = enables;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		dev_warn(smi_info->io.dev,
+			 "Error getting response from set global enables command: %d\n",
+			 rv);
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 3 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+		dev_warn(smi_info->io.dev,
+			 "Invalid return from set global enables command: %ld %x %x\n",
+			 resp_len, resp[0], resp[1]);
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[2] != 0)
+		rv = 1;
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC).  Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+	u8 enables = 0;
+	int rv;
+
+	rv = get_global_enables(smi_info, &enables);
+	if (!rv) {
+		if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
+			/* Already clear, should work ok. */
+			return;
+
+		enables &= ~IPMI_BMC_RCV_MSG_INTR;
+		rv = set_global_enables(smi_info, enables);
+	}
+
+	if (rv < 0) {
+		dev_err(smi_info->io.dev,
+			"Cannot check clearing the rcv irq: %d\n", rv);
+		return;
+	}
+
+	if (rv) {
+		/*
+		 * An error when setting the event buffer bit means
+		 * clearing the bit is not supported.
+		 */
+		dev_warn(smi_info->io.dev,
+			 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+		smi_info->cannot_disable_irq = true;
+	}
+}
+
+/*
+ * Some BMCs do not support setting the interrupt bits in the global
+ * enables even if they support interrupts.  Clearly bad, but we can
+ * compensate.
+ */
+static void check_set_rcv_irq(struct smi_info *smi_info)
+{
+	u8 enables = 0;
+	int rv;
+
+	if (!smi_info->io.irq)
+		return;
+
+	rv = get_global_enables(smi_info, &enables);
+	if (!rv) {
+		enables |= IPMI_BMC_RCV_MSG_INTR;
+		rv = set_global_enables(smi_info, enables);
+	}
+
+	if (rv < 0) {
+		dev_err(smi_info->io.dev,
+			"Cannot check setting the rcv irq: %d\n", rv);
+		return;
+	}
+
+	if (rv) {
+		/*
+		 * An error when setting the event buffer bit means
+		 * setting the bit is not supported.
+		 */
+		dev_warn(smi_info->io.dev,
+			 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+		smi_info->cannot_disable_irq = true;
+		smi_info->irq_enable_broken = true;
+	}
+}
+
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv = 0;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 4 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
+			resp[2] != 0) {
+		pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+		/* buffer is already enabled, nothing to do. */
+		smi_info->supports_event_msg_buff = true;
+		goto out;
+	}
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 3 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+		pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[2] != 0)
+		/*
+		 * An error when setting the event buffer bit means
+		 * that the event buffer is not supported.
+		 */
+		rv = -ENOENT;
+	else
+		smi_info->supports_event_msg_buff = true;
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+#define IPMI_SI_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev,			\
+				  struct device_attribute *attr,	\
+				  char *buf)				\
+{									\
+	struct smi_info *smi_info = dev_get_drvdata(dev);		\
+									\
+	return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name));	\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct smi_info *smi_info = dev_get_drvdata(dev);
+
+	return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct smi_info *smi_info = dev_get_drvdata(dev);
+	int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
+
+	return snprintf(buf, 10, "%d\n", enabled);
+}
+static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
+		   ipmi_interrupts_enabled_show, NULL);
+
+IPMI_SI_ATTR(short_timeouts);
+IPMI_SI_ATTR(long_timeouts);
+IPMI_SI_ATTR(idles);
+IPMI_SI_ATTR(interrupts);
+IPMI_SI_ATTR(attentions);
+IPMI_SI_ATTR(flag_fetches);
+IPMI_SI_ATTR(hosed_count);
+IPMI_SI_ATTR(complete_transactions);
+IPMI_SI_ATTR(events);
+IPMI_SI_ATTR(watchdog_pretimeouts);
+IPMI_SI_ATTR(incoming_messages);
+
+static ssize_t ipmi_params_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct smi_info *smi_info = dev_get_drvdata(dev);
+
+	return snprintf(buf, 200,
+			"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+			si_to_str[smi_info->io.si_type],
+			addr_space_to_str[smi_info->io.addr_type],
+			smi_info->io.addr_data,
+			smi_info->io.regspacing,
+			smi_info->io.regsize,
+			smi_info->io.regshift,
+			smi_info->io.irq,
+			smi_info->io.slave_addr);
+}
+static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
+
+static struct attribute *ipmi_si_dev_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_interrupts_enabled.attr,
+	&dev_attr_short_timeouts.attr,
+	&dev_attr_long_timeouts.attr,
+	&dev_attr_idles.attr,
+	&dev_attr_interrupts.attr,
+	&dev_attr_attentions.attr,
+	&dev_attr_flag_fetches.attr,
+	&dev_attr_hosed_count.attr,
+	&dev_attr_complete_transactions.attr,
+	&dev_attr_events.attr,
+	&dev_attr_watchdog_pretimeouts.attr,
+	&dev_attr_incoming_messages.attr,
+	&dev_attr_params.attr,
+	NULL
+};
+
+static const struct attribute_group ipmi_si_dev_attr_group = {
+	.attrs		= ipmi_si_dev_attrs,
+};
+
+/*
+ * oem_data_avail_to_receive_msg_avail
+ * @info - smi_info structure with msg_flags set
+ *
+ * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+ * Returns 1 indicating need to re-run handle_flags().
+ */
+static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+{
+	smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+			       RECEIVE_MSG_AVAIL);
+	return 1;
+}
+
+/*
+ * setup_dell_poweredge_oem_data_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Systems that match, but have firmware version < 1.40 may assert
+ * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+ * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
+ * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+ * as RECEIVE_MSG_AVAIL instead.
+ *
+ * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+ * assert the OEM[012] bits, and if it did, the driver would have to
+ * change to handle that properly, we don't actually check for the
+ * firmware version.
+ * Device ID = 0x20                BMC on PowerEdge 8G servers
+ * Device Revision = 0x80
+ * Firmware Revision1 = 0x01       BMC version 1.40
+ * Firmware Revision2 = 0x40       BCD encoded
+ * IPMI Version = 0x51             IPMI 1.5
+ * Manufacturer ID = A2 02 00      Dell IANA
+ *
+ * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
+ * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
+ *
+ */
+#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
+#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+#define DELL_IANA_MFR_ID 0x0002a2
+static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+{
+	struct ipmi_device_id *id = &smi_info->device_id;
+	if (id->manufacturer_id == DELL_IANA_MFR_ID) {
+		if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
+		    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
+		    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+			smi_info->oem_data_avail_handler =
+				oem_data_avail_to_receive_msg_avail;
+		} else if (ipmi_version_major(id) < 1 ||
+			   (ipmi_version_major(id) == 1 &&
+			    ipmi_version_minor(id) < 5)) {
+			smi_info->oem_data_avail_handler =
+				oem_data_avail_to_receive_msg_avail;
+		}
+	}
+}
+
+#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
+static void return_hosed_msg_badsize(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
+	msg->rsp_size = 3;
+	smi_info->curr_msg = NULL;
+	deliver_recv_msg(smi_info, msg);
+}
+
+/*
+ * dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
+ * not respond to a Get SDR command if the length of the data
+ * requested is exactly 0x3A, which leads to command timeouts and no
+ * data returned.  This intercepts such commands, and causes userspace
+ * callers to try again with a different-sized buffer, which succeeds.
+ */
+
+#define STORAGE_NETFN 0x0A
+#define STORAGE_CMD_GET_SDR 0x23
+static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
+					     unsigned long unused,
+					     void *in)
+{
+	struct smi_info *smi_info = in;
+	unsigned char *data = smi_info->curr_msg->data;
+	unsigned int size   = smi_info->curr_msg->data_size;
+	if (size >= 8 &&
+	    (data[0]>>2) == STORAGE_NETFN &&
+	    data[1] == STORAGE_CMD_GET_SDR &&
+	    data[7] == 0x3A) {
+		return_hosed_msg_badsize(smi_info);
+		return NOTIFY_STOP;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dell_poweredge_bt_xaction_notifier = {
+	.notifier_call	= dell_poweredge_bt_xaction_handler,
+};
+
+/*
+ * setup_dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.start_transaction_pre_hook
+ * when we know what function to use there.
+ */
+static void
+setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
+{
+	struct ipmi_device_id *id = &smi_info->device_id;
+	if (id->manufacturer_id == DELL_IANA_MFR_ID &&
+	    smi_info->io.si_type == SI_BT)
+		register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
+}
+
+/*
+ * setup_oem_data_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.oem_data_available_handler
+ * when we know what function to use there.
+ */
+
+static void setup_oem_data_handler(struct smi_info *smi_info)
+{
+	setup_dell_poweredge_oem_data_handler(smi_info);
+}
+
+static void setup_xaction_handlers(struct smi_info *smi_info)
+{
+	setup_dell_poweredge_bt_xaction_handler(smi_info);
+}
+
+static void check_for_broken_irqs(struct smi_info *smi_info)
+{
+	check_clr_rcv_irq(smi_info);
+	check_set_rcv_irq(smi_info);
+}
+
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
+{
+	if (smi_info->thread != NULL) {
+		kthread_stop(smi_info->thread);
+		smi_info->thread = NULL;
+	}
+
+	smi_info->timer_can_start = false;
+	if (smi_info->timer_running)
+		del_timer_sync(&smi_info->si_timer);
+}
+
+static struct smi_info *find_dup_si(struct smi_info *info)
+{
+	struct smi_info *e;
+
+	list_for_each_entry(e, &smi_infos, link) {
+		if (e->io.addr_type != info->io.addr_type)
+			continue;
+		if (e->io.addr_data == info->io.addr_data) {
+			/*
+			 * This is a cheap hack, ACPI doesn't have a defined
+			 * slave address but SMBIOS does.  Pick it up from
+			 * any source that has it available.
+			 */
+			if (info->io.slave_addr && !e->io.slave_addr)
+				e->io.slave_addr = info->io.slave_addr;
+			return e;
+		}
+	}
+
+	return NULL;
+}
+
+int ipmi_si_add_smi(struct si_sm_io *io)
+{
+	int rv = 0;
+	struct smi_info *new_smi, *dup;
+
+	if (!io->io_setup) {
+		if (io->addr_type == IPMI_IO_ADDR_SPACE) {
+			io->io_setup = ipmi_si_port_setup;
+		} else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
+			io->io_setup = ipmi_si_mem_setup;
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
+	if (!new_smi)
+		return -ENOMEM;
+	spin_lock_init(&new_smi->si_lock);
+
+	new_smi->io = *io;
+
+	mutex_lock(&smi_infos_lock);
+	dup = find_dup_si(new_smi);
+	if (dup) {
+		if (new_smi->io.addr_source == SI_ACPI &&
+		    dup->io.addr_source == SI_SMBIOS) {
+			/* We prefer ACPI over SMBIOS. */
+			dev_info(dup->io.dev,
+				 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+				 si_to_str[new_smi->io.si_type]);
+			cleanup_one_si(dup);
+		} else {
+			dev_info(new_smi->io.dev,
+				 "%s-specified %s state machine: duplicate\n",
+				 ipmi_addr_src_to_str(new_smi->io.addr_source),
+				 si_to_str[new_smi->io.si_type]);
+			rv = -EBUSY;
+			kfree(new_smi);
+			goto out_err;
+		}
+	}
+
+	pr_info(PFX "Adding %s-specified %s state machine\n",
+		ipmi_addr_src_to_str(new_smi->io.addr_source),
+		si_to_str[new_smi->io.si_type]);
+
+	list_add_tail(&new_smi->link, &smi_infos);
+
+	if (initialized)
+		rv = try_smi_init(new_smi);
+out_err:
+	mutex_unlock(&smi_infos_lock);
+	return rv;
+}
+
+/*
+ * Try to start up an interface.  Must be called with smi_infos_lock
+ * held, primarily to keep smi_num consistent, we only one to do these
+ * one at a time.
+ */
+static int try_smi_init(struct smi_info *new_smi)
+{
+	int rv = 0;
+	int i;
+	char *init_name = NULL;
+
+	pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+		ipmi_addr_src_to_str(new_smi->io.addr_source),
+		si_to_str[new_smi->io.si_type],
+		addr_space_to_str[new_smi->io.addr_type],
+		new_smi->io.addr_data,
+		new_smi->io.slave_addr, new_smi->io.irq);
+
+	switch (new_smi->io.si_type) {
+	case SI_KCS:
+		new_smi->handlers = &kcs_smi_handlers;
+		break;
+
+	case SI_SMIC:
+		new_smi->handlers = &smic_smi_handlers;
+		break;
+
+	case SI_BT:
+		new_smi->handlers = &bt_smi_handlers;
+		break;
+
+	default:
+		/* No support for anything else yet. */
+		rv = -EIO;
+		goto out_err;
+	}
+
+	new_smi->si_num = smi_num;
+
+	/* Do this early so it's available for logs. */
+	if (!new_smi->io.dev) {
+		init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
+				      new_smi->si_num);
+
+		/*
+		 * If we don't already have a device from something
+		 * else (like PCI), then register a new one.
+		 */
+		new_smi->pdev = platform_device_alloc("ipmi_si",
+						      new_smi->si_num);
+		if (!new_smi->pdev) {
+			pr_err(PFX "Unable to allocate platform device\n");
+			rv = -ENOMEM;
+			goto out_err;
+		}
+		new_smi->io.dev = &new_smi->pdev->dev;
+		new_smi->io.dev->driver = &ipmi_platform_driver.driver;
+		/* Nulled by device_add() */
+		new_smi->io.dev->init_name = init_name;
+	}
+
+	/* Allocate the state machine's data and initialize it. */
+	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+	if (!new_smi->si_sm) {
+		rv = -ENOMEM;
+		goto out_err;
+	}
+	new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
+							   &new_smi->io);
+
+	/* Now that we know the I/O size, we can set up the I/O. */
+	rv = new_smi->io.io_setup(&new_smi->io);
+	if (rv) {
+		dev_err(new_smi->io.dev, "Could not set up I/O space\n");
+		goto out_err;
+	}
+
+	/* Do low-level detection first. */
+	if (new_smi->handlers->detect(new_smi->si_sm)) {
+		if (new_smi->io.addr_source)
+			dev_err(new_smi->io.dev,
+				"Interface detection failed\n");
+		rv = -ENODEV;
+		goto out_err;
+	}
+
+	/*
+	 * Attempt a get device id command.  If it fails, we probably
+	 * don't have a BMC here.
+	 */
+	rv = try_get_dev_id(new_smi);
+	if (rv) {
+		if (new_smi->io.addr_source)
+			dev_err(new_smi->io.dev,
+			       "There appears to be no BMC at this location\n");
+		goto out_err;
+	}
+
+	setup_oem_data_handler(new_smi);
+	setup_xaction_handlers(new_smi);
+	check_for_broken_irqs(new_smi);
+
+	new_smi->waiting_msg = NULL;
+	new_smi->curr_msg = NULL;
+	atomic_set(&new_smi->req_events, 0);
+	new_smi->run_to_completion = false;
+	for (i = 0; i < SI_NUM_STATS; i++)
+		atomic_set(&new_smi->stats[i], 0);
+
+	new_smi->interrupt_disabled = true;
+	atomic_set(&new_smi->need_watch, 0);
+
+	rv = try_enable_event_buffer(new_smi);
+	if (rv == 0)
+		new_smi->has_event_buffer = true;
+
+	/*
+	 * Start clearing the flags before we enable interrupts or the
+	 * timer to avoid racing with the timer.
+	 */
+	start_clear_flags(new_smi);
+
+	/*
+	 * IRQ is defined to be set when non-zero.  req_events will
+	 * cause a global flags check that will enable interrupts.
+	 */
+	if (new_smi->io.irq) {
+		new_smi->interrupt_disabled = false;
+		atomic_set(&new_smi->req_events, 1);
+	}
+
+	if (new_smi->pdev && !new_smi->pdev_registered) {
+		rv = platform_device_add(new_smi->pdev);
+		if (rv) {
+			dev_err(new_smi->io.dev,
+				"Unable to register system interface device: %d\n",
+				rv);
+			goto out_err;
+		}
+		new_smi->pdev_registered = true;
+	}
+
+	dev_set_drvdata(new_smi->io.dev, new_smi);
+	rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+	if (rv) {
+		dev_err(new_smi->io.dev,
+			"Unable to add device attributes: error %d\n",
+			rv);
+		goto out_err;
+	}
+	new_smi->dev_group_added = true;
+
+	rv = ipmi_register_smi(&handlers,
+			       new_smi,
+			       new_smi->io.dev,
+			       new_smi->io.slave_addr);
+	if (rv) {
+		dev_err(new_smi->io.dev,
+			"Unable to register device: error %d\n",
+			rv);
+		goto out_err;
+	}
+
+	/* Don't increment till we know we have succeeded. */
+	smi_num++;
+
+	dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
+		 si_to_str[new_smi->io.si_type]);
+
+	WARN_ON(new_smi->io.dev->init_name != NULL);
+
+ out_err:
+	kfree(init_name);
+	return rv;
+}
+
+static int init_ipmi_si(void)
+{
+	struct smi_info *e;
+	enum ipmi_addr_src type = SI_INVALID;
+
+	if (initialized)
+		return 0;
+
+	pr_info("IPMI System Interface driver.\n");
+
+	/* If the user gave us a device, they presumably want us to use it */
+	if (!ipmi_si_hardcode_find_bmc())
+		goto do_scan;
+
+	ipmi_si_platform_init();
+
+	ipmi_si_pci_init();
+
+	ipmi_si_parisc_init();
+
+	/* We prefer devices with interrupts, but in the case of a machine
+	   with multiple BMCs we assume that there will be several instances
+	   of a given type so if we succeed in registering a type then also
+	   try to register everything else of the same type */
+do_scan:
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry(e, &smi_infos, link) {
+		/* Try to register a device if it has an IRQ and we either
+		   haven't successfully registered a device yet or this
+		   device has the same type as one we successfully registered */
+		if (e->io.irq && (!type || e->io.addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->io.addr_source;
+			}
+		}
+	}
+
+	/* type will only have been set if we successfully registered an si */
+	if (type)
+		goto skip_fallback_noirq;
+
+	/* Fall back to the preferred device */
+
+	list_for_each_entry(e, &smi_infos, link) {
+		if (!e->io.irq && (!type || e->io.addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->io.addr_source;
+			}
+		}
+	}
+
+skip_fallback_noirq:
+	initialized = 1;
+	mutex_unlock(&smi_infos_lock);
+
+	if (type)
+		return 0;
+
+	mutex_lock(&smi_infos_lock);
+	if (unload_when_empty && list_empty(&smi_infos)) {
+		mutex_unlock(&smi_infos_lock);
+		cleanup_ipmi_si();
+		pr_warn(PFX "Unable to find any System Interface(s)\n");
+		return -ENODEV;
+	} else {
+		mutex_unlock(&smi_infos_lock);
+		return 0;
+	}
+}
+module_init(init_ipmi_si);
+
+static void shutdown_smi(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+
+	if (smi_info->dev_group_added) {
+		device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
+		smi_info->dev_group_added = false;
+	}
+	if (smi_info->io.dev)
+		dev_set_drvdata(smi_info->io.dev, NULL);
+
+	/*
+	 * Make sure that interrupts, the timer and the thread are
+	 * stopped and will not run again.
+	 */
+	smi_info->interrupt_disabled = true;
+	if (smi_info->io.irq_cleanup) {
+		smi_info->io.irq_cleanup(&smi_info->io);
+		smi_info->io.irq_cleanup = NULL;
+	}
+	stop_timer_and_thread(smi_info);
+
+	/*
+	 * Wait until we know that we are out of any interrupt
+	 * handlers might have been running before we freed the
+	 * interrupt.
+	 */
+	synchronize_sched();
+
+	/*
+	 * Timeouts are stopped, now make sure the interrupts are off
+	 * in the BMC.  Note that timers and CPU interrupts are off,
+	 * so no need for locks.
+	 */
+	while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+		poll(smi_info);
+		schedule_timeout_uninterruptible(1);
+	}
+	if (smi_info->handlers)
+		disable_si_irq(smi_info);
+	while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+		poll(smi_info);
+		schedule_timeout_uninterruptible(1);
+	}
+	if (smi_info->handlers)
+		smi_info->handlers->cleanup(smi_info->si_sm);
+
+	if (smi_info->io.addr_source_cleanup) {
+		smi_info->io.addr_source_cleanup(&smi_info->io);
+		smi_info->io.addr_source_cleanup = NULL;
+	}
+	if (smi_info->io.io_cleanup) {
+		smi_info->io.io_cleanup(&smi_info->io);
+		smi_info->io.io_cleanup = NULL;
+	}
+
+	kfree(smi_info->si_sm);
+	smi_info->si_sm = NULL;
+
+	smi_info->intf = NULL;
+}
+
+/*
+ * Must be called with smi_infos_lock held, to serialize the
+ * smi_info->intf check.
+ */
+static void cleanup_one_si(struct smi_info *smi_info)
+{
+	if (!smi_info)
+		return;
+
+	list_del(&smi_info->link);
+
+	if (smi_info->intf)
+		ipmi_unregister_smi(smi_info->intf);
+
+	if (smi_info->pdev) {
+		if (smi_info->pdev_registered)
+			platform_device_unregister(smi_info->pdev);
+		else
+			platform_device_put(smi_info->pdev);
+	}
+
+	kfree(smi_info);
+}
+
+int ipmi_si_remove_by_dev(struct device *dev)
+{
+	struct smi_info *e;
+	int rv = -ENOENT;
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry(e, &smi_infos, link) {
+		if (e->io.dev == dev) {
+			cleanup_one_si(e);
+			rv = 0;
+			break;
+		}
+	}
+	mutex_unlock(&smi_infos_lock);
+
+	return rv;
+}
+
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+			    unsigned long addr)
+{
+	/* remove */
+	struct smi_info *e, *tmp_e;
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+		if (e->io.addr_type != addr_space)
+			continue;
+		if (e->io.si_type != si_type)
+			continue;
+		if (e->io.addr_data == addr)
+			cleanup_one_si(e);
+	}
+	mutex_unlock(&smi_infos_lock);
+}
+
+static void cleanup_ipmi_si(void)
+{
+	struct smi_info *e, *tmp_e;
+
+	if (!initialized)
+		return;
+
+	ipmi_si_pci_shutdown();
+
+	ipmi_si_parisc_shutdown();
+
+	ipmi_si_platform_shutdown();
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+		cleanup_one_si(e);
+	mutex_unlock(&smi_infos_lock);
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_ALIAS("platform:dmi-ipmi-si");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+		   " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
new file mode 100644
index 0000000..1b869d5
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+	return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+		     unsigned char b)
+{
+	writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct si_sm_io *io, int num)
+{
+	unsigned long addr = io->addr_data;
+	int idx;
+
+	for (idx = 0; idx < num; idx++)
+		release_mem_region(addr + idx * io->regspacing,
+				   io->regsize);
+}
+
+static void mem_cleanup(struct si_sm_io *io)
+{
+	if (io->addr) {
+		iounmap(io->addr);
+		mem_region_cleanup(io, io->io_size);
+	}
+}
+
+int ipmi_si_mem_setup(struct si_sm_io *io)
+{
+	unsigned long addr = io->addr_data;
+	int           mapsize, idx;
+
+	if (!addr)
+		return -ENODEV;
+
+	io->io_cleanup = mem_cleanup;
+
+	/*
+	 * Figure out the actual readb/readw/readl/etc routine to use based
+	 * upon the register size.
+	 */
+	switch (io->regsize) {
+	case 1:
+		io->inputb = intf_mem_inb;
+		io->outputb = intf_mem_outb;
+		break;
+	case 2:
+		io->inputb = intf_mem_inw;
+		io->outputb = intf_mem_outw;
+		break;
+	case 4:
+		io->inputb = intf_mem_inl;
+		io->outputb = intf_mem_outl;
+		break;
+#ifdef readq
+	case 8:
+		io->inputb = mem_inq;
+		io->outputb = mem_outq;
+		break;
+#endif
+	default:
+		dev_warn(io->dev, "Invalid register size: %d\n",
+			 io->regsize);
+		return -EINVAL;
+	}
+
+	/*
+	 * Some BIOSes reserve disjoint memory regions in their ACPI
+	 * tables.  This causes problems when trying to request the
+	 * entire region.  Therefore we must request each register
+	 * separately.
+	 */
+	for (idx = 0; idx < io->io_size; idx++) {
+		if (request_mem_region(addr + idx * io->regspacing,
+				       io->regsize, DEVICE_NAME) == NULL) {
+			/* Undo allocations */
+			mem_region_cleanup(io, idx);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * Calculate the total amount of memory to claim.  This is an
+	 * unusual looking calculation, but it avoids claiming any
+	 * more memory than it has to.  It will claim everything
+	 * between the first address to the end of the last full
+	 * register.
+	 */
+	mapsize = ((io->io_size * io->regspacing)
+		   - (io->regspacing - io->regsize));
+	io->addr = ioremap(addr, mapsize);
+	if (io->addr == NULL) {
+		mem_region_cleanup(io, io->io_size);
+		return -EIO;
+	}
+	return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
new file mode 100644
index 0000000..f3c9982
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#include "ipmi_si.h"
+
+static bool parisc_registered;
+
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+	struct si_sm_io io;
+
+	memset(&io, 0, sizeof(io));
+
+	io.si_type	= SI_KCS;
+	io.addr_source	= SI_DEVICETREE;
+	io.addr_type	= IPMI_MEM_ADDR_SPACE;
+	io.addr_data	= dev->hpa.start;
+	io.regsize	= 1;
+	io.regspacing	= 1;
+	io.regshift	= 0;
+	io.irq		= 0; /* no interrupt */
+	io.irq_setup	= NULL;
+	io.dev		= &dev->dev;
+
+	dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
+
+	return ipmi_si_add_smi(&io);
+}
+
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+	return ipmi_si_remove_by_dev(&dev->dev);
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+	{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+	.name =		"ipmi",
+	.id_table =	ipmi_parisc_tbl,
+	.probe =	ipmi_parisc_probe,
+	.remove =	__exit_p(ipmi_parisc_remove),
+};
+
+void ipmi_si_parisc_init(void)
+{
+	register_parisc_driver(&ipmi_parisc_driver);
+	parisc_registered = true;
+}
+
+void ipmi_si_parisc_shutdown(void)
+{
+	if (parisc_registered)
+		unregister_parisc_driver(&ipmi_parisc_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
new file mode 100644
index 0000000..f54ca68
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_pci.c
+ *
+ * Handling for IPMI devices on the PCI bus.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_pci: "
+
+static bool pci_registered;
+
+static bool si_trypci = true;
+
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via pci");
+
+#define PCI_CLASS_SERIAL_IPMI		0x0c07
+#define PCI_CLASS_SERIAL_IPMI_SMIC	0x0c0700
+#define PCI_CLASS_SERIAL_IPMI_KCS	0x0c0701
+#define PCI_CLASS_SERIAL_IPMI_BT	0x0c0702
+
+#define PCI_DEVICE_ID_HP_MMC 0x121A
+
+static void ipmi_pci_cleanup(struct si_sm_io *io)
+{
+	struct pci_dev *pdev = io->addr_source_data;
+
+	pci_disable_device(pdev);
+}
+
+static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
+{
+	if (io->si_type == SI_KCS) {
+		unsigned char	status;
+		int		regspacing;
+
+		io->regsize = DEFAULT_REGSIZE;
+		io->regshift = 0;
+
+		/* detect 1, 4, 16byte spacing */
+		for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+			io->regspacing = regspacing;
+			if (io->io_setup(io)) {
+				dev_err(io->dev,
+					"Could not setup I/O space\n");
+				return DEFAULT_REGSPACING;
+			}
+			/* write invalid cmd */
+			io->outputb(io, 1, 0x10);
+			/* read status back */
+			status = io->inputb(io, 1);
+			io->io_cleanup(io);
+			if (status)
+				return regspacing;
+			regspacing *= 4;
+		}
+	}
+	return DEFAULT_REGSPACING;
+}
+
+static struct pci_device_id ipmi_pci_blacklist[] = {
+	/*
+	 * This is a "Virtual IPMI device", whatever that is.  It appears
+	 * as a KCS device by the class, but it is not one.
+	 */
+	{ PCI_VDEVICE(REALTEK, 0x816c) },
+	{ 0, }
+};
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	int rv;
+	struct si_sm_io io;
+
+	if (pci_match_id(ipmi_pci_blacklist, pdev))
+		return -ENODEV;
+
+	memset(&io, 0, sizeof(io));
+	io.addr_source = SI_PCI;
+	dev_info(&pdev->dev, "probing via PCI");
+
+	switch (pdev->class) {
+	case PCI_CLASS_SERIAL_IPMI_SMIC:
+		io.si_type = SI_SMIC;
+		break;
+
+	case PCI_CLASS_SERIAL_IPMI_KCS:
+		io.si_type = SI_KCS;
+		break;
+
+	case PCI_CLASS_SERIAL_IPMI_BT:
+		io.si_type = SI_BT;
+		break;
+
+	default:
+		dev_info(&pdev->dev, "Unknown IPMI class: %x\n", pdev->class);
+		return -ENOMEM;
+	}
+
+	rv = pci_enable_device(pdev);
+	if (rv) {
+		dev_err(&pdev->dev, "couldn't enable PCI device\n");
+		return rv;
+	}
+
+	io.addr_source_cleanup = ipmi_pci_cleanup;
+	io.addr_source_data = pdev;
+
+	if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+		io.addr_type = IPMI_IO_ADDR_SPACE;
+		io.io_setup = ipmi_si_port_setup;
+	} else {
+		io.addr_type = IPMI_MEM_ADDR_SPACE;
+		io.io_setup = ipmi_si_mem_setup;
+	}
+	io.addr_data = pci_resource_start(pdev, 0);
+
+	io.regspacing = ipmi_pci_probe_regspacing(&io);
+	io.regsize = DEFAULT_REGSIZE;
+	io.regshift = 0;
+
+	io.irq = pdev->irq;
+	if (io.irq)
+		io.irq_setup = ipmi_std_irq_setup;
+
+	io.dev = &pdev->dev;
+
+	dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+		&pdev->resource[0], io.regsize, io.regspacing, io.irq);
+
+	rv = ipmi_si_add_smi(&io);
+	if (rv)
+		pci_disable_device(pdev);
+
+	return rv;
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+	ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+	{ PCI_VDEVICE(HP, PCI_DEVICE_ID_HP_MMC) },
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_SMIC, ~0) },
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_KCS, ~0) },
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_BT, ~0) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+	.name =         DEVICE_NAME,
+	.id_table =     ipmi_pci_devices,
+	.probe =        ipmi_pci_probe,
+	.remove =       ipmi_pci_remove,
+};
+
+void ipmi_si_pci_init(void)
+{
+	if (si_trypci) {
+		int rv = pci_register_driver(&ipmi_pci_driver);
+		if (rv)
+			pr_err(PFX "Unable to register PCI driver: %d\n", rv);
+		else
+			pci_registered = true;
+	}
+}
+
+void ipmi_si_pci_shutdown(void)
+{
+	if (pci_registered)
+		pci_unregister_driver(&ipmi_pci_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
new file mode 100644
index 0000000..bf69927
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_platform.c
+ *
+ * Handling for platform devices in IPMI (ACPI, OF, and things
+ * coming from the platform.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include "ipmi_si.h"
+#include "ipmi_dmi.h"
+
+#define PFX "ipmi_platform: "
+
+static bool si_tryplatform = true;
+#ifdef CONFIG_ACPI
+static bool          si_tryacpi = true;
+#endif
+#ifdef CONFIG_OF
+static bool          si_tryopenfirmware = true;
+#endif
+#ifdef CONFIG_DMI
+static bool          si_trydmi = true;
+#else
+static bool          si_trydmi = false;
+#endif
+
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via platform"
+		 " interfaces besides ACPI, OpenFirmware, and DMI");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_OF
+module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
+MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via OpenFirmware");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via DMI");
+#endif
+
+#ifdef CONFIG_ACPI
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+	u32 gpe_number, void *context)
+{
+	struct si_sm_io *io = context;
+
+	ipmi_si_irq_handler(io->irq, io->irq_handler_data);
+	return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct si_sm_io *io)
+{
+	if (!io->irq)
+		return;
+
+	ipmi_irq_start_cleanup(io);
+	acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct si_sm_io *io)
+{
+	acpi_status status;
+
+	if (!io->irq)
+		return 0;
+
+	status = acpi_install_gpe_handler(NULL,
+					  io->irq,
+					  ACPI_GPE_LEVEL_TRIGGERED,
+					  &ipmi_acpi_gpe,
+					  io);
+	if (status != AE_OK) {
+		dev_warn(io->dev,
+			 "Unable to claim ACPI GPE %d, running polled\n",
+			 io->irq);
+		io->irq = 0;
+		return -EINVAL;
+	} else {
+		io->irq_cleanup = acpi_gpe_irq_cleanup;
+		ipmi_irq_finish_setup(io);
+		dev_info(io->dev, "Using ACPI GPE %d\n", io->irq);
+		return 0;
+	}
+}
+#endif
+
+static struct resource *
+ipmi_get_info_from_resources(struct platform_device *pdev,
+			     struct si_sm_io *io)
+{
+	struct resource *res, *res_second;
+
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (res) {
+		io->addr_type = IPMI_IO_ADDR_SPACE;
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res)
+			io->addr_type = IPMI_MEM_ADDR_SPACE;
+	}
+	if (!res) {
+		dev_err(&pdev->dev, "no I/O or memory address\n");
+		return NULL;
+	}
+	io->addr_data = res->start;
+
+	io->regspacing = DEFAULT_REGSPACING;
+	res_second = platform_get_resource(pdev,
+			       (io->addr_type == IPMI_IO_ADDR_SPACE) ?
+					IORESOURCE_IO : IORESOURCE_MEM,
+			       1);
+	if (res_second) {
+		if (res_second->start > io->addr_data)
+			io->regspacing = res_second->start - io->addr_data;
+	}
+	io->regsize = DEFAULT_REGSIZE;
+	io->regshift = 0;
+
+	return res;
+}
+
+static int platform_ipmi_probe(struct platform_device *pdev)
+{
+	struct si_sm_io io;
+	u8 type, slave_addr, addr_source;
+	int rv;
+
+	rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
+	if (rv)
+		addr_source = SI_PLATFORM;
+	if (addr_source >= SI_LAST)
+		return -EINVAL;
+
+	if (addr_source == SI_SMBIOS) {
+		if (!si_trydmi)
+			return -ENODEV;
+	} else {
+		if (!si_tryplatform)
+			return -ENODEV;
+	}
+
+	rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+	if (rv)
+		return -ENODEV;
+
+	memset(&io, 0, sizeof(io));
+	io.addr_source = addr_source;
+	dev_info(&pdev->dev, PFX "probing via %s\n",
+		 ipmi_addr_src_to_str(addr_source));
+
+	switch (type) {
+	case SI_KCS:
+	case SI_SMIC:
+	case SI_BT:
+		io.si_type = type;
+		break;
+	default:
+		dev_err(&pdev->dev, "ipmi-type property is invalid\n");
+		return -EINVAL;
+	}
+
+	if (!ipmi_get_info_from_resources(pdev, &io))
+		return -EINVAL;
+
+	rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+	if (rv) {
+		dev_warn(&pdev->dev, "device has no slave-addr property\n");
+		io.slave_addr = 0x20;
+	} else {
+		io.slave_addr = slave_addr;
+	}
+
+	io.irq = platform_get_irq(pdev, 0);
+	if (io.irq > 0)
+		io.irq_setup = ipmi_std_irq_setup;
+	else
+		io.irq = 0;
+
+	io.dev = &pdev->dev;
+
+	pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+		(io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+		io.addr_data, io.regsize, io.regspacing, io.irq);
+
+	ipmi_si_add_smi(&io);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+	{ .type = "ipmi", .compatible = "ipmi-kcs",
+	  .data = (void *)(unsigned long) SI_KCS },
+	{ .type = "ipmi", .compatible = "ipmi-smic",
+	  .data = (void *)(unsigned long) SI_SMIC },
+	{ .type = "ipmi", .compatible = "ipmi-bt",
+	  .data = (void *)(unsigned long) SI_BT },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct si_sm_io io;
+	struct resource resource;
+	const __be32 *regsize, *regspacing, *regshift;
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+	int proplen;
+
+	if (!si_tryopenfirmware)
+		return -ENODEV;
+
+	dev_info(&pdev->dev, "probing via device tree\n");
+
+	match = of_match_device(of_ipmi_match, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	if (!of_device_is_available(np))
+		return -EINVAL;
+
+	ret = of_address_to_resource(np, 0, &resource);
+	if (ret) {
+		dev_warn(&pdev->dev, PFX "invalid address from OF\n");
+		return ret;
+	}
+
+	regsize = of_get_property(np, "reg-size", &proplen);
+	if (regsize && proplen != 4) {
+		dev_warn(&pdev->dev, PFX "invalid regsize from OF\n");
+		return -EINVAL;
+	}
+
+	regspacing = of_get_property(np, "reg-spacing", &proplen);
+	if (regspacing && proplen != 4) {
+		dev_warn(&pdev->dev, PFX "invalid regspacing from OF\n");
+		return -EINVAL;
+	}
+
+	regshift = of_get_property(np, "reg-shift", &proplen);
+	if (regshift && proplen != 4) {
+		dev_warn(&pdev->dev, PFX "invalid regshift from OF\n");
+		return -EINVAL;
+	}
+
+	memset(&io, 0, sizeof(io));
+	io.si_type	= (enum si_type) match->data;
+	io.addr_source	= SI_DEVICETREE;
+	io.irq_setup	= ipmi_std_irq_setup;
+
+	if (resource.flags & IORESOURCE_IO)
+		io.addr_type = IPMI_IO_ADDR_SPACE;
+	else
+		io.addr_type = IPMI_MEM_ADDR_SPACE;
+
+	io.addr_data	= resource.start;
+
+	io.regsize	= regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+	io.regspacing	= regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+	io.regshift	= regshift ? be32_to_cpup(regshift) : 0;
+
+	io.irq		= irq_of_parse_and_map(pdev->dev.of_node, 0);
+	io.dev		= &pdev->dev;
+
+	dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+		io.addr_data, io.regsize, io.regspacing, io.irq);
+
+	return ipmi_si_add_smi(&io);
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct si_sm_io *io, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+	if (!slave_addr) {
+		u32 flags = IORESOURCE_IO;
+
+		if (io->addr_type == IPMI_MEM_ADDR_SPACE)
+			flags = IORESOURCE_MEM;
+
+		slave_addr = ipmi_dmi_get_slave_addr(io->si_type, flags,
+						     io->addr_data);
+	}
+#endif
+
+	return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *pdev)
+{
+	struct si_sm_io io;
+	acpi_handle handle;
+	acpi_status status;
+	unsigned long long tmp;
+	struct resource *res;
+	int rv = -EINVAL;
+
+	if (!si_tryacpi)
+		return -ENODEV;
+
+	handle = ACPI_HANDLE(&pdev->dev);
+	if (!handle)
+		return -ENODEV;
+
+	memset(&io, 0, sizeof(io));
+	io.addr_source = SI_ACPI;
+	dev_info(&pdev->dev, PFX "probing via ACPI\n");
+
+	io.addr_info.acpi_info.acpi_handle = handle;
+
+	/* _IFT tells us the interface type: KCS, BT, etc */
+	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+	if (ACPI_FAILURE(status)) {
+		dev_err(&pdev->dev,
+			"Could not find ACPI IPMI interface type\n");
+		goto err_free;
+	}
+
+	switch (tmp) {
+	case 1:
+		io.si_type = SI_KCS;
+		break;
+	case 2:
+		io.si_type = SI_SMIC;
+		break;
+	case 3:
+		io.si_type = SI_BT;
+		break;
+	case 4: /* SSIF, just ignore */
+		rv = -ENODEV;
+		goto err_free;
+	default:
+		dev_info(&pdev->dev, "unknown IPMI type %lld\n", tmp);
+		goto err_free;
+	}
+
+	res = ipmi_get_info_from_resources(pdev, &io);
+	if (!res) {
+		rv = -EINVAL;
+		goto err_free;
+	}
+
+	/* If _GPE exists, use it; otherwise use standard interrupts */
+	status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+	if (ACPI_SUCCESS(status)) {
+		io.irq = tmp;
+		io.irq_setup = acpi_gpe_irq_setup;
+	} else {
+		int irq = platform_get_irq(pdev, 0);
+
+		if (irq > 0) {
+			io.irq = irq;
+			io.irq_setup = ipmi_std_irq_setup;
+		}
+	}
+
+	io.slave_addr = find_slave_address(&io, io.slave_addr);
+
+	io.dev = &pdev->dev;
+
+	dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
+		 res, io.regsize, io.regspacing, io.irq);
+
+	return ipmi_si_add_smi(&io);
+
+err_free:
+	return rv;
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+	{ "IPI0001", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *pdev)
+{
+	if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0)
+		return 0;
+
+	if (acpi_ipmi_probe(pdev) == 0)
+		return 0;
+
+	return platform_ipmi_probe(pdev);
+}
+
+static int ipmi_remove(struct platform_device *pdev)
+{
+	return ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_platform_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.of_match_table = of_ipmi_match,
+		.acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+	},
+	.probe		= ipmi_probe,
+	.remove		= ipmi_remove,
+};
+
+void ipmi_si_platform_init(void)
+{
+	int rv = platform_driver_register(&ipmi_platform_driver);
+	if (rv)
+		pr_err(PFX "Unable to register driver: %d\n", rv);
+}
+
+void ipmi_si_platform_shutdown(void)
+{
+	platform_driver_unregister(&ipmi_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
new file mode 100644
index 0000000..ef6dffc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct si_sm_io *io)
+{
+	unsigned int addr = io->addr_data;
+	int          idx;
+
+	if (addr) {
+		for (idx = 0; idx < io->io_size; idx++)
+			release_region(addr + idx * io->regspacing,
+				       io->regsize);
+	}
+}
+
+int ipmi_si_port_setup(struct si_sm_io *io)
+{
+	unsigned int addr = io->addr_data;
+	int          idx;
+
+	if (!addr)
+		return -ENODEV;
+
+	io->io_cleanup = port_cleanup;
+
+	/*
+	 * Figure out the actual inb/inw/inl/etc routine to use based
+	 * upon the register size.
+	 */
+	switch (io->regsize) {
+	case 1:
+		io->inputb = port_inb;
+		io->outputb = port_outb;
+		break;
+	case 2:
+		io->inputb = port_inw;
+		io->outputb = port_outw;
+		break;
+	case 4:
+		io->inputb = port_inl;
+		io->outputb = port_outl;
+		break;
+	default:
+		dev_warn(io->dev, "Invalid register size: %d\n",
+			 io->regsize);
+		return -EINVAL;
+	}
+
+	/*
+	 * Some BIOSes reserve disjoint I/O regions in their ACPI
+	 * tables.  This causes problems when trying to register the
+	 * entire I/O region.  Therefore we must register each I/O
+	 * port separately.
+	 */
+	for (idx = 0; idx < io->io_size; idx++) {
+		if (request_region(addr + idx * io->regspacing,
+				   io->regsize, DEVICE_NAME) == NULL) {
+			/* Undo allocations */
+			while (idx--)
+				release_region(addr + idx * io->regspacing,
+					       io->regsize);
+			return -EIO;
+		}
+	}
+	return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 0000000..aaddf04
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines.  This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/ipmi.h>
+
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
+struct si_sm_data;
+
+enum si_type {
+	SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT
+};
+
+/*
+ * The structure for doing I/O in the state machine.  The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
+	unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
+	void (*outputb)(const struct si_sm_io *io,
+			unsigned int  offset,
+			unsigned char b);
+
+	/*
+	 * Generic info used by the actual handling routines, the
+	 * state machine shouldn't touch these.
+	 */
+	void __iomem *addr;
+	int  regspacing;
+	int  regsize;
+	int  regshift;
+	int addr_type;
+	long addr_data;
+	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+	void (*addr_source_cleanup)(struct si_sm_io *io);
+	void *addr_source_data;
+	union ipmi_smi_info_union addr_info;
+
+	int (*io_setup)(struct si_sm_io *info);
+	void (*io_cleanup)(struct si_sm_io *info);
+	unsigned int io_size;
+
+	int irq;
+	int (*irq_setup)(struct si_sm_io *io);
+	void *irq_handler_data;
+	void (*irq_cleanup)(struct si_sm_io *io);
+
+	u8 slave_addr;
+	enum si_type si_type;
+	struct device *dev;
+};
+
+/* Results of SMI events. */
+enum si_sm_result {
+	SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+	SI_SM_CALL_WITH_DELAY,	/* Delay some before calling again. */
+	SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
+	SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+	SI_SM_IDLE,		/* The SM is in idle state. */
+	SI_SM_HOSED,		/* The hardware violated the state machine. */
+
+	/*
+	 * The hardware is asserting attn and the state machine is
+	 * idle.
+	 */
+	SI_SM_ATTN
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers {
+	/*
+	 * Put the version number of the state machine here so the
+	 * upper layer can print it.
+	 */
+	char *version;
+
+	/*
+	 * Initialize the data and return the amount of I/O space to
+	 * reserve for the space.
+	 */
+	unsigned int (*init_data)(struct si_sm_data *smi,
+				  struct si_sm_io   *io);
+
+	/*
+	 * Start a new transaction in the state machine.  This will
+	 * return -2 if the state machine is not idle, -1 if the size
+	 * is invalid (to large or too small), or 0 if the transaction
+	 * is successfully completed.
+	 */
+	int (*start_transaction)(struct si_sm_data *smi,
+				 unsigned char *data, unsigned int size);
+
+	/*
+	 * Return the results after the transaction.  This will return
+	 * -1 if the buffer is too small, zero if no transaction is
+	 * present, or the actual length of the result data.
+	 */
+	int (*get_result)(struct si_sm_data *smi,
+			  unsigned char *data, unsigned int length);
+
+	/*
+	 * Call this periodically (for a polled interface) or upon
+	 * receiving an interrupt (for a interrupt-driven interface).
+	 * If interrupt driven, you should probably poll this
+	 * periodically when not in idle state.  This should be called
+	 * with the time that passed since the last call, if it is
+	 * significant.  Time is in microseconds.
+	 */
+	enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+	/*
+	 * Attempt to detect an SMI.  Returns 0 on success or nonzero
+	 * on failure.
+	 */
+	int (*detect)(struct si_sm_data *smi);
+
+	/* The interface is shutting down, so clean it up. */
+	void (*cleanup)(struct si_sm_data *smi);
+
+	/* Return the size of the SMI structure in bytes. */
+	int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern const struct si_sm_handlers kcs_smi_handlers;
+extern const struct si_sm_handlers smic_smi_handlers;
+extern const struct si_sm_handlers bt_smi_handlers;
+
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 0000000..466a5aa
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by:	Hannes Schulz <schulz@schwaar.com>
+ *		ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ *   Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* smic_debug is a bit-field
+ *	SMIC_DEBUG_ENABLE -	turned on for now
+ *	SMIC_DEBUG_MSG -	commands and their responses
+ *	SMIC_DEBUG_STATES -	state machine
+*/
+#define SMIC_DEBUG_STATES	4
+#define SMIC_DEBUG_MSG		2
+#define	SMIC_DEBUG_ENABLE	1
+
+static int smic_debug = 1;
+module_param(smic_debug, int, 0644);
+MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+enum smic_states {
+	SMIC_IDLE,
+	SMIC_START_OP,
+	SMIC_OP_OK,
+	SMIC_WRITE_START,
+	SMIC_WRITE_NEXT,
+	SMIC_WRITE_END,
+	SMIC_WRITE2READ,
+	SMIC_READ_START,
+	SMIC_READ_NEXT,
+	SMIC_READ_END,
+	SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY	0x80
+#define SMIC_TX_DATA_READY	0x40
+
+/*
+ * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
+ * a few systems, and then only by Systems Management
+ * Interrupts, not by the OS.  Always ignore these bits.
+ *
+ */
+#define SMIC_SMI		0x10
+#define SMIC_EVM_DATA_AVAIL	0x08
+#define SMIC_SMS_DATA_AVAIL	0x04
+#define SMIC_FLAG_BSY		0x01
+
+/* SMIC Error Codes */
+#define	EC_NO_ERROR		0x00
+#define	EC_ABORTED		0x01
+#define	EC_ILLEGAL_CONTROL	0x02
+#define	EC_NO_RESPONSE		0x03
+#define	EC_ILLEGAL_COMMAND	0x04
+#define	EC_BUFFER_FULL		0x05
+
+struct si_sm_data {
+	enum smic_states state;
+	struct si_sm_io *io;
+	unsigned char	 write_data[MAX_SMIC_WRITE_SIZE];
+	int		 write_pos;
+	int		 write_count;
+	int		 orig_write_count;
+	unsigned char	 read_data[MAX_SMIC_READ_SIZE];
+	int		 read_pos;
+	int		 truncated;
+	unsigned int	 error_retries;
+	long		 smic_timeout;
+};
+
+static unsigned int init_smic_data(struct si_sm_data *smic,
+				   struct si_sm_io *io)
+{
+	smic->state = SMIC_IDLE;
+	smic->io = io;
+	smic->write_pos = 0;
+	smic->write_count = 0;
+	smic->orig_write_count = 0;
+	smic->read_pos = 0;
+	smic->error_retries = 0;
+	smic->truncated = 0;
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+	/* We use 3 bytes of I/O. */
+	return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+				  unsigned char *data, unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_SMIC_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (smic_debug & SMIC_DEBUG_MSG) {
+		printk(KERN_DEBUG "start_smic_transaction -");
+		for (i = 0; i < size; i++)
+			printk(" %02x", (unsigned char) data[i]);
+		printk("\n");
+	}
+	smic->error_retries = 0;
+	memcpy(smic->write_data, data, size);
+	smic->write_count = size;
+	smic->orig_write_count = size;
+	smic->write_pos = 0;
+	smic->read_pos = 0;
+	smic->state = SMIC_START_OP;
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+			   unsigned char *data, unsigned int length)
+{
+	int i;
+
+	if (smic_debug & SMIC_DEBUG_MSG) {
+		printk(KERN_DEBUG "smic_get result -");
+		for (i = 0; i < smic->read_pos; i++)
+			printk(" %02x", smic->read_data[i]);
+		printk("\n");
+	}
+	if (length < smic->read_pos) {
+		smic->read_pos = length;
+		smic->truncated = 1;
+	}
+	memcpy(data, smic->read_data, smic->read_pos);
+
+	if ((length >= 3) && (smic->read_pos < 3)) {
+		data[2] = IPMI_ERR_UNSPECIFIED;
+		smic->read_pos = 3;
+	}
+	if (smic->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		smic->truncated = 0;
+	}
+	return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+				    unsigned char   flags)
+{
+	smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+				      unsigned char   control)
+{
+	smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data(struct si_sm_data *smic,
+				    unsigned char   data)
+{
+	smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+	(smic->error_retries)++;
+	if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+		if (smic_debug & SMIC_DEBUG_ENABLE)
+			printk(KERN_WARNING
+			       "ipmi_smic_drv: smic hosed: %s\n", reason);
+		smic->state = SMIC_HOSED;
+	} else {
+		smic->write_count = smic->orig_write_count;
+		smic->write_pos = 0;
+		smic->read_pos = 0;
+		smic->state = SMIC_START_OP;
+		smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	}
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+	write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+	(smic->write_pos)++;
+	(smic->write_count)--;
+}
+
+static inline void read_next_byte(struct si_sm_data *smic)
+{
+	if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+		read_smic_data(smic);
+		smic->truncated = 1;
+	} else {
+		smic->read_data[smic->read_pos] = read_smic_data(smic);
+		smic->read_pos++;
+	}
+}
+
+/*  SMIC Control/Status Code Components */
+#define	SMIC_GET_STATUS		0x00	/* Control form's name */
+#define	SMIC_READY		0x00	/* Status  form's name */
+#define	SMIC_WR_START		0x01	/* Unified Control/Status names... */
+#define	SMIC_WR_NEXT		0x02
+#define	SMIC_WR_END		0x03
+#define	SMIC_RD_START		0x04
+#define	SMIC_RD_NEXT		0x05
+#define	SMIC_RD_END		0x06
+#define	SMIC_CODE_MASK		0x0f
+
+#define	SMIC_CONTROL		0x00
+#define	SMIC_STATUS		0x80
+#define	SMIC_CS_MASK		0x80
+
+#define	SMIC_SMS		0x40
+#define	SMIC_SMM		0x60
+#define	SMIC_STREAM_MASK	0x60
+
+/*  SMIC Control Codes */
+#define	SMIC_CC_SMS_GET_STATUS	(SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define	SMIC_CC_SMS_WR_START	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define	SMIC_CC_SMS_WR_NEXT	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define	SMIC_CC_SMS_WR_END	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define	SMIC_CC_SMS_RD_START	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define	SMIC_CC_SMS_RD_NEXT	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define	SMIC_CC_SMS_RD_END	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define	SMIC_CC_SMM_GET_STATUS	(SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define	SMIC_CC_SMM_WR_START	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define	SMIC_CC_SMM_WR_NEXT	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define	SMIC_CC_SMM_WR_END	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define	SMIC_CC_SMM_RD_START	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define	SMIC_CC_SMM_RD_NEXT	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define	SMIC_CC_SMM_RD_END	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/*  SMIC Status Codes */
+#define	SMIC_SC_SMS_READY	(SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define	SMIC_SC_SMS_WR_START	(SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define	SMIC_SC_SMS_WR_NEXT	(SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define	SMIC_SC_SMS_WR_END	(SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define	SMIC_SC_SMS_RD_START	(SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define	SMIC_SC_SMS_RD_NEXT	(SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define	SMIC_SC_SMS_RD_END	(SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define	SMIC_SC_SMM_READY	(SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define	SMIC_SC_SMM_WR_START	(SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define	SMIC_SC_SMM_WR_NEXT	(SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define	SMIC_SC_SMM_WR_END	(SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define	SMIC_SC_SMM_RD_START	(SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define	SMIC_SC_SMM_RD_NEXT	(SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define	SMIC_SC_SMM_RD_END	(SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+	SMIC_CC_SMS_GET_STATUS	0x40
+	SMIC_CC_SMS_WR_START	0x41
+	SMIC_CC_SMS_WR_NEXT	0x42
+	SMIC_CC_SMS_WR_END	0x43
+	SMIC_CC_SMS_RD_START	0x44
+	SMIC_CC_SMS_RD_NEXT	0x45
+	SMIC_CC_SMS_RD_END	0x46
+
+	SMIC_SC_SMS_READY	0xC0
+	SMIC_SC_SMS_WR_START	0xC1
+	SMIC_SC_SMS_WR_NEXT	0xC2
+	SMIC_SC_SMS_WR_END	0xC3
+	SMIC_SC_SMS_RD_START	0xC4
+	SMIC_SC_SMS_RD_NEXT	0xC5
+	SMIC_SC_SMS_RD_END	0xC6
+*/
+
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
+{
+	unsigned char status;
+	unsigned char flags;
+	unsigned char data;
+
+	if (smic->state == SMIC_HOSED) {
+		init_smic_data(smic, smic->io);
+		return SI_SM_HOSED;
+	}
+	if (smic->state != SMIC_IDLE) {
+		if (smic_debug & SMIC_DEBUG_STATES)
+			printk(KERN_DEBUG
+			       "smic_event - smic->smic_timeout = %ld,"
+			       " time = %ld\n",
+			       smic->smic_timeout, time);
+		/*
+		 * FIXME: smic_event is sometimes called with time >
+		 * SMIC_RETRY_TIMEOUT
+		 */
+		if (time < SMIC_RETRY_TIMEOUT) {
+			smic->smic_timeout -= time;
+			if (smic->smic_timeout < 0) {
+				start_error_recovery(smic, "smic timed out.");
+				return SI_SM_CALL_WITH_DELAY;
+			}
+		}
+	}
+	flags = read_smic_flags(smic);
+	if (flags & SMIC_FLAG_BSY)
+		return SI_SM_CALL_WITH_DELAY;
+
+	status = read_smic_status(smic);
+	if (smic_debug & SMIC_DEBUG_STATES)
+		printk(KERN_DEBUG
+		       "smic_event - state = %d, flags = 0x%02x,"
+		       " status = 0x%02x\n",
+		       smic->state, flags, status);
+
+	switch (smic->state) {
+	case SMIC_IDLE:
+		/* in IDLE we check for available messages */
+		if (flags & SMIC_SMS_DATA_AVAIL)
+			return SI_SM_ATTN;
+		return SI_SM_IDLE;
+
+	case SMIC_START_OP:
+		/* sanity check whether smic is really idle */
+		write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+		write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		smic->state = SMIC_OP_OK;
+		break;
+
+	case SMIC_OP_OK:
+		if (status != SMIC_SC_SMS_READY) {
+			/* this should not happen */
+			start_error_recovery(smic,
+					     "state = SMIC_OP_OK,"
+					     " status != SMIC_SC_SMS_READY");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* OK so far; smic is idle let us start ... */
+		write_smic_control(smic, SMIC_CC_SMS_WR_START);
+		write_next_byte(smic);
+		write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		smic->state = SMIC_WRITE_START;
+		break;
+
+	case SMIC_WRITE_START:
+		if (status != SMIC_SC_SMS_WR_START) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_START, "
+					     "status != SMIC_SC_SMS_WR_START");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/*
+		 * we must not issue WR_(NEXT|END) unless
+		 * TX_DATA_READY is set
+		 * */
+		if (flags & SMIC_TX_DATA_READY) {
+			if (smic->write_count == 1) {
+				/* last byte */
+				write_smic_control(smic, SMIC_CC_SMS_WR_END);
+				smic->state = SMIC_WRITE_END;
+			} else {
+				write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+				smic->state = SMIC_WRITE_NEXT;
+			}
+			write_next_byte(smic);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_WRITE_NEXT:
+		if (status != SMIC_SC_SMS_WR_NEXT) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_NEXT, "
+					     "status != SMIC_SC_SMS_WR_NEXT");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* this is the same code as in SMIC_WRITE_START */
+		if (flags & SMIC_TX_DATA_READY) {
+			if (smic->write_count == 1) {
+				write_smic_control(smic, SMIC_CC_SMS_WR_END);
+				smic->state = SMIC_WRITE_END;
+			} else {
+				write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+				smic->state = SMIC_WRITE_NEXT;
+			}
+			write_next_byte(smic);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_WRITE_END:
+		if (status != SMIC_SC_SMS_WR_END) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_END, "
+					     "status != SMIC_SC_SMS_WR_END");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* data register holds an error code */
+		data = read_smic_data(smic);
+		if (data != 0) {
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
+				       "SMIC_WRITE_END: data = %02x\n", data);
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_END, "
+					     "data != SUCCESS");
+			return SI_SM_CALL_WITH_DELAY;
+		} else
+			smic->state = SMIC_WRITE2READ;
+		break;
+
+	case SMIC_WRITE2READ:
+		/*
+		 * we must wait for RX_DATA_READY to be set before we
+		 * can continue
+		 */
+		if (flags & SMIC_RX_DATA_READY) {
+			write_smic_control(smic, SMIC_CC_SMS_RD_START);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_START;
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_READ_START:
+		if (status != SMIC_SC_SMS_RD_START) {
+			start_error_recovery(smic,
+					     "state = SMIC_READ_START, "
+					     "status != SMIC_SC_SMS_RD_START");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		if (flags & SMIC_RX_DATA_READY) {
+			read_next_byte(smic);
+			write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_NEXT;
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_READ_NEXT:
+		switch (status) {
+		/*
+		 * smic tells us that this is the last byte to be read
+		 * --> clean up
+		 */
+		case SMIC_SC_SMS_RD_END:
+			read_next_byte(smic);
+			write_smic_control(smic, SMIC_CC_SMS_RD_END);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_END;
+			break;
+		case SMIC_SC_SMS_RD_NEXT:
+			if (flags & SMIC_RX_DATA_READY) {
+				read_next_byte(smic);
+				write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+				write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+				smic->state = SMIC_READ_NEXT;
+			} else
+				return SI_SM_CALL_WITH_DELAY;
+			break;
+		default:
+			start_error_recovery(
+				smic,
+				"state = SMIC_READ_NEXT, "
+				"status != SMIC_SC_SMS_RD_(NEXT|END)");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		break;
+
+	case SMIC_READ_END:
+		if (status != SMIC_SC_SMS_READY) {
+			start_error_recovery(smic,
+					     "state = SMIC_READ_END, "
+					     "status != SMIC_SC_SMS_READY");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		data = read_smic_data(smic);
+		/* data register holds an error code */
+		if (data != 0) {
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
+				       "SMIC_READ_END: data = %02x\n", data);
+			start_error_recovery(smic,
+					     "state = SMIC_READ_END, "
+					     "data != SUCCESS");
+			return SI_SM_CALL_WITH_DELAY;
+		} else {
+			smic->state = SMIC_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+
+	case SMIC_HOSED:
+		init_smic_data(smic, smic->io);
+		return SI_SM_HOSED;
+
+	default:
+		if (smic_debug & SMIC_DEBUG_ENABLE) {
+			printk(KERN_DEBUG "smic->state = %d\n", smic->state);
+			start_error_recovery(smic, "state = UNKNOWN");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+	}
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+	/*
+	 * It's impossible for the SMIC fnags register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
+	if (read_smic_flags(smic) == 0xff)
+		return 1;
+
+	return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers smic_smi_handlers = {
+	.init_data         = init_smic_data,
+	.start_transaction = start_smic_transaction,
+	.get_result        = smic_get_result,
+	.event             = smic_event,
+	.detect            = smic_detect,
+	.cleanup           = smic_cleanup,
+	.size              = smic_size,
+};
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 0000000..9b78672
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,1919 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device.  Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ *         Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine.  It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+/*
+ * TODO: Figure out how to use SMB alerts.  This will require a new
+ * interface into the I2C driver, I believe.
+ */
+
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/time64.h>
+#include "ipmi_si_sm.h"
+#include "ipmi_dmi.h"
+
+#define PFX "ipmi_ssif: "
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD	0x57
+
+#define	SSIF_IPMI_REQUEST			2
+#define	SSIF_IPMI_MULTI_PART_REQUEST_START	6
+#define	SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE	7
+#define	SSIF_IPMI_RESPONSE			3
+#define	SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE	9
+
+/* ssif_debug is a bit-field
+ *	SSIF_DEBUG_MSG -	commands and their responses
+ *	SSIF_DEBUG_STATES -	message states
+ *	SSIF_DEBUG_TIMING -	 Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING	4
+#define SSIF_DEBUG_STATE	2
+#define SSIF_DEBUG_MSG		1
+#define SSIF_NODEBUG		0
+#define SSIF_DEFAULT_DEBUG	(SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC		20000	/* 20ms between message tries. */
+#define SSIF_MSG_PART_USEC	5000	/* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define	SSIF_SEND_RETRIES	5
+#define	SSIF_RECV_RETRIES	250
+
+#define SSIF_MSG_MSEC		(SSIF_MSG_USEC / 1000)
+#define SSIF_MSG_JIFFIES	((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES	((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+enum ssif_intf_state {
+	SSIF_NORMAL,
+	SSIF_GETTING_FLAGS,
+	SSIF_GETTING_EVENTS,
+	SSIF_CLEARING_FLAGS,
+	SSIF_GETTING_MESSAGES,
+	/* FIXME - add watchdog stuff. */
+};
+
+#define SSIF_IDLE(ssif)	 ((ssif)->ssif_state == SSIF_NORMAL \
+			  && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+	/* Number of total messages sent. */
+	SSIF_STAT_sent_messages = 0,
+
+	/*
+	 * Number of message parts sent.  Messages may be broken into
+	 * parts if they are long.
+	 */
+	SSIF_STAT_sent_messages_parts,
+
+	/*
+	 * Number of time a message was retried.
+	 */
+	SSIF_STAT_send_retries,
+
+	/*
+	 * Number of times the send of a message failed.
+	 */
+	SSIF_STAT_send_errors,
+
+	/*
+	 * Number of message responses received.
+	 */
+	SSIF_STAT_received_messages,
+
+	/*
+	 * Number of message fragments received.
+	 */
+	SSIF_STAT_received_message_parts,
+
+	/*
+	 * Number of times the receive of a message was retried.
+	 */
+	SSIF_STAT_receive_retries,
+
+	/*
+	 * Number of errors receiving messages.
+	 */
+	SSIF_STAT_receive_errors,
+
+	/*
+	 * Number of times a flag fetch was requested.
+	 */
+	SSIF_STAT_flag_fetches,
+
+	/*
+	 * Number of times the hardware didn't follow the state machine.
+	 */
+	SSIF_STAT_hosed,
+
+	/*
+	 * Number of received events.
+	 */
+	SSIF_STAT_events,
+
+	/* Number of asyncronous messages received. */
+	SSIF_STAT_incoming_messages,
+
+	/* Number of watchdog pretimeouts. */
+	SSIF_STAT_watchdog_pretimeouts,
+
+	/* Number of alers received. */
+	SSIF_STAT_alerts,
+
+	/* Always add statistics before this value, it must be last. */
+	SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+	struct i2c_board_info binfo;
+	char *adapter_name;
+	int debug;
+	int slave_addr;
+	enum ipmi_addr_src addr_src;
+	union ipmi_smi_info_union addr_info;
+	struct device *dev;
+	struct i2c_client *client;
+
+	struct i2c_client *added_client;
+
+	struct mutex clients_mutex;
+	struct list_head clients;
+
+	struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len);
+
+struct ssif_info {
+	struct ipmi_smi     *intf;
+	spinlock_t	    lock;
+	struct ipmi_smi_msg *waiting_msg;
+	struct ipmi_smi_msg *curr_msg;
+	enum ssif_intf_state ssif_state;
+	unsigned long       ssif_debug;
+
+	struct ipmi_smi_handlers handlers;
+
+	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+	union ipmi_smi_info_union addr_info;
+
+	/*
+	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+	 * is set to hold the flags until we are done handling everything
+	 * from the flags.
+	 */
+#define RECEIVE_MSG_AVAIL	0x01
+#define EVENT_MSG_BUFFER_FULL	0x02
+#define WDT_PRE_TIMEOUT_INT	0x08
+	unsigned char       msg_flags;
+
+	u8		    global_enables;
+	bool		    has_event_buffer;
+	bool		    supports_alert;
+
+	/*
+	 * Used to tell what we should do with alerts.  If we are
+	 * waiting on a response, read the data immediately.
+	 */
+	bool		    got_alert;
+	bool		    waiting_alert;
+
+	/*
+	 * If set to true, this will request events the next time the
+	 * state machine is idle.
+	 */
+	bool                req_events;
+
+	/*
+	 * If set to true, this will request flags the next time the
+	 * state machine is idle.
+	 */
+	bool                req_flags;
+
+	/*
+	 * Used to perform timer operations when run-to-completion
+	 * mode is on.  This is a countdown timer.
+	 */
+	int                 rtc_us_timer;
+
+	/* Used for sending/receiving data.  +1 for the length. */
+	unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+	unsigned int  data_len;
+
+	/* Temp receive buffer, gets copied into data. */
+	unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+	struct i2c_client *client;
+	ssif_i2c_done done_handler;
+
+	/* Thread interface handling */
+	struct task_struct *thread;
+	struct completion wake_thread;
+	bool stopping;
+	int i2c_read_write;
+	int i2c_command;
+	unsigned char *i2c_data;
+	unsigned int i2c_size;
+
+	struct timer_list retry_timer;
+	int retries_left;
+
+	/* Info from SSIF cmd */
+	unsigned char max_xmit_msg_size;
+	unsigned char max_recv_msg_size;
+	unsigned int  multi_support;
+	int           supports_pec;
+
+#define SSIF_NO_MULTI		0
+#define SSIF_MULTI_2_PART	1
+#define SSIF_MULTI_n_PART	2
+	unsigned char *multi_data;
+	unsigned int  multi_len;
+	unsigned int  multi_pos;
+
+	atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+	atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+	((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+		      unsigned char   *data,
+		      unsigned int    len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+					  unsigned long *flags)
+{
+	spin_lock_irqsave(&ssif_info->lock, *flags);
+	return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+				  unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg)
+{
+	if (msg->rsp_size < 0) {
+		return_hosed_msg(ssif_info, msg);
+		pr_err(PFX
+		       "Malformed message in deliver_recv_msg: rsp_size = %d\n",
+		       msg->rsp_size);
+	} else {
+		ipmi_smi_msg_received(ssif_info->intf, msg);
+	}
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg)
+{
+	ssif_inc_stat(ssif_info, hosed);
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = 0xFF; /* Unknown error. */
+	msg->rsp_size = 3;
+
+	deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held.  This will release the
+ * message lock.  Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	unsigned char msg[3];
+
+	ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+	ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	/* Make sure the watchdog pre-timeout flag is not set at startup. */
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+
+	if (start_send(ssif_info, msg, 3) != 0) {
+		/* Error, just go to normal state. */
+		ssif_info->ssif_state = SSIF_NORMAL;
+	}
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	unsigned char mb[2];
+
+	ssif_info->req_flags = false;
+	ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+	if (start_send(ssif_info, mb, 2) != 0)
+		ssif_info->ssif_state = SSIF_NORMAL;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+			     struct ipmi_smi_msg *msg)
+{
+	if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+		unsigned long oflags;
+
+		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+		ssif_info->curr_msg = NULL;
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		ipmi_free_smi_msg(msg);
+	}
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+
+	ssif_info->req_events = false;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	ssif_info->curr_msg = msg;
+	ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+	msg->data_size = 2;
+
+	check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+				 unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	ssif_info->curr_msg = msg;
+	ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg->data[1] = IPMI_GET_MSG_CMD;
+	msg->data_size = 2;
+
+	check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held.  This will release the
+ * message lock.  Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+		/* Watchdog pre-timeout */
+		ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+		start_clear_flags(ssif_info, flags);
+		ipmi_smi_watchdog_pretimeout(ssif_info->intf);
+	} else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+		/* Messages available. */
+		start_recv_msg_fetch(ssif_info, flags);
+	else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+		/* Events available. */
+		start_event_fetch(ssif_info, flags);
+	else {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	}
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+	struct ssif_info *ssif_info = data;
+
+	while (!kthread_should_stop()) {
+		int result;
+
+		/* Wait for something to do */
+		result = wait_for_completion_interruptible(
+						&ssif_info->wake_thread);
+		if (ssif_info->stopping)
+			break;
+		if (result == -ERESTARTSYS)
+			continue;
+		init_completion(&ssif_info->wake_thread);
+
+		if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+			result = i2c_smbus_write_block_data(
+				ssif_info->client, ssif_info->i2c_command,
+				ssif_info->i2c_data[0],
+				ssif_info->i2c_data + 1);
+			ssif_info->done_handler(ssif_info, result, NULL, 0);
+		} else {
+			result = i2c_smbus_read_block_data(
+				ssif_info->client, ssif_info->i2c_command,
+				ssif_info->i2c_data);
+			if (result < 0)
+				ssif_info->done_handler(ssif_info, result,
+							NULL, 0);
+			else
+				ssif_info->done_handler(ssif_info, 0,
+							ssif_info->i2c_data,
+							result);
+		}
+	}
+
+	return 0;
+}
+
+static int ssif_i2c_send(struct ssif_info *ssif_info,
+			ssif_i2c_done handler,
+			int read_write, int command,
+			unsigned char *data, unsigned int size)
+{
+	ssif_info->done_handler = handler;
+
+	ssif_info->i2c_read_write = read_write;
+	ssif_info->i2c_command = command;
+	ssif_info->i2c_data = data;
+	ssif_info->i2c_size = size;
+	complete(&ssif_info->wake_thread);
+	return 0;
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len);
+
+static void start_get(struct ssif_info *ssif_info)
+{
+	int rv;
+
+	ssif_info->rtc_us_timer = 0;
+	ssif_info->multi_pos = 0;
+
+	rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+			  SSIF_IPMI_RESPONSE,
+			  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+	if (rv < 0) {
+		/* request failed, just return the error. */
+		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error from i2c_non_blocking_op(5)\n");
+
+		msg_done_handler(ssif_info, -EIO, NULL, 0);
+	}
+}
+
+static void retry_timeout(struct timer_list *t)
+{
+	struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+	unsigned long oflags, *flags;
+	bool waiting;
+
+	if (ssif_info->stopping)
+		return;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	waiting = ssif_info->waiting_alert;
+	ssif_info->waiting_alert = false;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	if (waiting)
+		start_get(ssif_info);
+}
+
+
+static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+		       unsigned int data)
+{
+	struct ssif_info *ssif_info = i2c_get_clientdata(client);
+	unsigned long oflags, *flags;
+	bool do_get = false;
+
+	if (type != I2C_PROTOCOL_SMBUS_ALERT)
+		return;
+
+	ssif_inc_stat(ssif_info, alerts);
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	if (ssif_info->waiting_alert) {
+		ssif_info->waiting_alert = false;
+		del_timer(&ssif_info->retry_timer);
+		do_get = true;
+	} else if (ssif_info->curr_msg) {
+		ssif_info->got_alert = true;
+	}
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+	if (do_get)
+		start_get(ssif_info);
+}
+
+static int start_resend(struct ssif_info *ssif_info);
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len)
+{
+	struct ipmi_smi_msg *msg;
+	unsigned long oflags, *flags;
+	int rv;
+
+	/*
+	 * We are single-threaded here, so no need for a lock until we
+	 * start messing with driver states or the queues.
+	 */
+
+	if (result < 0) {
+		ssif_info->retries_left--;
+		if (ssif_info->retries_left > 0) {
+			ssif_inc_stat(ssif_info, receive_retries);
+
+			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+			ssif_info->waiting_alert = true;
+			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+			if (!ssif_info->stopping)
+				mod_timer(&ssif_info->retry_timer,
+					  jiffies + SSIF_MSG_JIFFIES);
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			return;
+		}
+
+		ssif_inc_stat(ssif_info, receive_errors);
+
+		if  (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error in msg_done_handler: %d\n", result);
+		len = 0;
+		goto continue_op;
+	}
+
+	if ((len > 1) && (ssif_info->multi_pos == 0)
+				&& (data[0] == 0x00) && (data[1] == 0x01)) {
+		/* Start of multi-part read.  Start the next transaction. */
+		int i;
+
+		ssif_inc_stat(ssif_info, received_message_parts);
+
+		/* Remove the multi-part read marker. */
+		len -= 2;
+		for (i = 0; i < len; i++)
+			ssif_info->data[i] = data[i+2];
+		ssif_info->multi_len = len;
+		ssif_info->multi_pos = 1;
+
+		rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+				  SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+				  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+		if (rv < 0) {
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Error from i2c_non_blocking_op(1)\n");
+
+			result = -EIO;
+		} else
+			return;
+	} else if (ssif_info->multi_pos) {
+		/* Middle of multi-part read.  Start the next transaction. */
+		int i;
+		unsigned char blocknum;
+
+		if (len == 0) {
+			result = -EIO;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info(PFX "Middle message with no data\n");
+
+			goto continue_op;
+		}
+
+		blocknum = data[0];
+
+		if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+			/* Received message too big, abort the operation. */
+			result = -E2BIG;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Received message too big\n");
+
+			goto continue_op;
+		}
+
+		/* Remove the blocknum from the data. */
+		len--;
+		for (i = 0; i < len; i++)
+			ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+		ssif_info->multi_len += len;
+		if (blocknum == 0xff) {
+			/* End of read */
+			len = ssif_info->multi_len;
+			data = ssif_info->data;
+		} else if (blocknum + 1 != ssif_info->multi_pos) {
+			/*
+			 * Out of sequence block, just abort.  Block
+			 * numbers start at zero for the second block,
+			 * but multi_pos starts at one, so the +1.
+			 */
+			result = -EIO;
+		} else {
+			ssif_inc_stat(ssif_info, received_message_parts);
+
+			ssif_info->multi_pos++;
+
+			rv = ssif_i2c_send(ssif_info, msg_done_handler,
+					   I2C_SMBUS_READ,
+					   SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+					   ssif_info->recv,
+					   I2C_SMBUS_BLOCK_DATA);
+			if (rv < 0) {
+				if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+					pr_info(PFX
+						"Error from ssif_i2c_send\n");
+
+				result = -EIO;
+			} else
+				return;
+		}
+	}
+
+	if (result < 0) {
+		ssif_inc_stat(ssif_info, receive_errors);
+	} else {
+		ssif_inc_stat(ssif_info, received_messages);
+		ssif_inc_stat(ssif_info, received_message_parts);
+	}
+
+
+ continue_op:
+	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+		pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+			ssif_info->ssif_state, result);
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	msg = ssif_info->curr_msg;
+	if (msg) {
+		msg->rsp_size = len;
+		if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+			msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+		memcpy(msg->rsp, data, msg->rsp_size);
+		ssif_info->curr_msg = NULL;
+	}
+
+	switch (ssif_info->ssif_state) {
+	case SSIF_NORMAL:
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		if (!msg)
+			break;
+
+		if (result < 0)
+			return_hosed_msg(ssif_info, msg);
+		else
+			deliver_recv_msg(ssif_info, msg);
+		break;
+
+	case SSIF_GETTING_FLAGS:
+		/* We got the flags from the SSIF, now handle them. */
+		if ((result < 0) || (len < 4) || (data[2] != 0)) {
+			/*
+			 * Error fetching flags, or invalid length,
+			 * just give up for now.
+			 */
+			ssif_info->ssif_state = SSIF_NORMAL;
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			pr_warn(PFX "Error getting flags: %d %d, %x\n",
+			       result, len, (len >= 3) ? data[2] : 0);
+		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+			/*
+			 * Don't abort here, maybe it was a queued
+			 * response to a previous command.
+			 */
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			pr_warn(PFX "Invalid response getting flags: %x %x\n",
+				data[0], data[1]);
+		} else {
+			ssif_inc_stat(ssif_info, flag_fetches);
+			ssif_info->msg_flags = data[3];
+			handle_flags(ssif_info, flags);
+		}
+		break;
+
+	case SSIF_CLEARING_FLAGS:
+		/* We cleared the flags. */
+		if ((result < 0) || (len < 3) || (data[2] != 0)) {
+			/* Error clearing flags */
+			pr_warn(PFX "Error clearing flags: %d %d, %x\n",
+			       result, len, (len >= 3) ? data[2] : 0);
+		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+				data[0], data[1]);
+		}
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		break;
+
+	case SSIF_GETTING_EVENTS:
+		if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the event flag. */
+			ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(ssif_info, flags);
+		} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+			pr_warn(PFX "Invalid response getting events: %x %x\n",
+				msg->rsp[0], msg->rsp[1]);
+			msg->done(msg);
+			/* Take off the event flag. */
+			ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(ssif_info, flags);
+		} else {
+			handle_flags(ssif_info, flags);
+			ssif_inc_stat(ssif_info, events);
+			deliver_recv_msg(ssif_info, msg);
+		}
+		break;
+
+	case SSIF_GETTING_MESSAGES:
+		if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(ssif_info, flags);
+		} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+				msg->rsp[0], msg->rsp[1]);
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(ssif_info, flags);
+		} else {
+			ssif_inc_stat(ssif_info, incoming_messages);
+			handle_flags(ssif_info, flags);
+			deliver_recv_msg(ssif_info, msg);
+		}
+		break;
+	}
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+		if (ssif_info->req_events)
+			start_event_fetch(ssif_info, flags);
+		else if (ssif_info->req_flags)
+			start_flag_fetch(ssif_info, flags);
+		else
+			start_next_msg(ssif_info, flags);
+	} else
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+		pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+				unsigned char *data, unsigned int len)
+{
+	int rv;
+
+	/* We are single-threaded here, so no need for a lock. */
+	if (result < 0) {
+		ssif_info->retries_left--;
+		if (ssif_info->retries_left > 0) {
+			if (!start_resend(ssif_info)) {
+				ssif_inc_stat(ssif_info, send_retries);
+				return;
+			}
+			/* request failed, just return the error. */
+			ssif_inc_stat(ssif_info, send_errors);
+
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info(PFX
+					"Out of retries in msg_written_handler\n");
+			msg_done_handler(ssif_info, -EIO, NULL, 0);
+			return;
+		}
+
+		ssif_inc_stat(ssif_info, send_errors);
+
+		/*
+		 * Got an error on transmit, let the done routine
+		 * handle it.
+		 */
+		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error in msg_written_handler: %d\n", result);
+
+		msg_done_handler(ssif_info, result, NULL, 0);
+		return;
+	}
+
+	if (ssif_info->multi_data) {
+		/*
+		 * In the middle of a multi-data write.  See the comment
+		 * in the SSIF_MULTI_n_PART case in the probe function
+		 * for details on the intricacies of this.
+		 */
+		int left;
+		unsigned char *data_to_send;
+
+		ssif_inc_stat(ssif_info, sent_messages_parts);
+
+		left = ssif_info->multi_len - ssif_info->multi_pos;
+		if (left > 32)
+			left = 32;
+		/* Length byte. */
+		ssif_info->multi_data[ssif_info->multi_pos] = left;
+		data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
+		ssif_info->multi_pos += left;
+		if (left < 32)
+			/*
+			 * Write is finished.  Note that we must end
+			 * with a write of less than 32 bytes to
+			 * complete the transaction, even if it is
+			 * zero bytes.
+			 */
+			ssif_info->multi_data = NULL;
+
+		rv = ssif_i2c_send(ssif_info, msg_written_handler,
+				  I2C_SMBUS_WRITE,
+				  SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+				  data_to_send,
+				  I2C_SMBUS_BLOCK_DATA);
+		if (rv < 0) {
+			/* request failed, just return the error. */
+			ssif_inc_stat(ssif_info, send_errors);
+
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Error from i2c_non_blocking_op(3)\n");
+			msg_done_handler(ssif_info, -EIO, NULL, 0);
+		}
+	} else {
+		/* Ready to request the result. */
+		unsigned long oflags, *flags;
+
+		ssif_inc_stat(ssif_info, sent_messages);
+		ssif_inc_stat(ssif_info, sent_messages_parts);
+
+		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+		if (ssif_info->got_alert) {
+			/* The result is already ready, just start it. */
+			ssif_info->got_alert = false;
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			start_get(ssif_info);
+		} else {
+			/* Wait a jiffie then request the next message */
+			ssif_info->waiting_alert = true;
+			ssif_info->retries_left = SSIF_RECV_RETRIES;
+			ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+			if (!ssif_info->stopping)
+				mod_timer(&ssif_info->retry_timer,
+					  jiffies + SSIF_MSG_PART_JIFFIES);
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+		}
+	}
+}
+
+static int start_resend(struct ssif_info *ssif_info)
+{
+	int rv;
+	int command;
+
+	ssif_info->got_alert = false;
+
+	if (ssif_info->data_len > 32) {
+		command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+		ssif_info->multi_data = ssif_info->data;
+		ssif_info->multi_len = ssif_info->data_len;
+		/*
+		 * Subtle thing, this is 32, not 33, because we will
+		 * overwrite the thing at position 32 (which was just
+		 * transmitted) with the new length.
+		 */
+		ssif_info->multi_pos = 32;
+		ssif_info->data[0] = 32;
+	} else {
+		ssif_info->multi_data = NULL;
+		command = SSIF_IPMI_REQUEST;
+		ssif_info->data[0] = ssif_info->data_len;
+	}
+
+	rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+			  command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+	if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
+		pr_info("Error from i2c_non_blocking_op(4)\n");
+	return rv;
+}
+
+static int start_send(struct ssif_info *ssif_info,
+		      unsigned char   *data,
+		      unsigned int    len)
+{
+	if (len > IPMI_MAX_MSG_LENGTH)
+		return -E2BIG;
+	if (len > ssif_info->max_xmit_msg_size)
+		return -E2BIG;
+
+	ssif_info->retries_left = SSIF_SEND_RETRIES;
+	memcpy(ssif_info->data + 1, data, len);
+	ssif_info->data_len = len;
+	return start_resend(ssif_info);
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+	unsigned long oflags;
+
+ restart:
+	if (!SSIF_IDLE(ssif_info)) {
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	if (!ssif_info->waiting_msg) {
+		ssif_info->curr_msg = NULL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	} else {
+		int rv;
+
+		ssif_info->curr_msg = ssif_info->waiting_msg;
+		ssif_info->waiting_msg = NULL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		rv = start_send(ssif_info,
+				ssif_info->curr_msg->data,
+				ssif_info->curr_msg->data_size);
+		if (rv) {
+			msg = ssif_info->curr_msg;
+			ssif_info->curr_msg = NULL;
+			return_hosed_msg(ssif_info, msg);
+			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+			goto restart;
+		}
+	}
+}
+
+static void sender(void                *send_info,
+		   struct ipmi_smi_msg *msg)
+{
+	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+	unsigned long oflags, *flags;
+
+	BUG_ON(ssif_info->waiting_msg);
+	ssif_info->waiting_msg = msg;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	start_next_msg(ssif_info, flags);
+
+	if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+		struct timespec64 t;
+
+		ktime_get_real_ts64(&t);
+		pr_info("**Enqueue %02x %02x: %lld.%6.6ld\n",
+		       msg->data[0], msg->data[1],
+		       (long long) t.tv_sec, (long) t.tv_nsec / NSEC_PER_USEC);
+	}
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	data->addr_src = ssif_info->addr_source;
+	data->dev = &ssif_info->client->dev;
+	data->addr_info = ssif_info->addr_info;
+	get_device(data->dev);
+
+	return 0;
+}
+
+/*
+ * Instead of having our own timer to periodically check the message
+ * flags, we let the message handler drive us.
+ */
+static void request_events(void *send_info)
+{
+	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+	unsigned long oflags, *flags;
+
+	if (!ssif_info->has_event_buffer)
+		return;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	/*
+	 * Request flags first, not events, because the lower layer
+	 * doesn't have a way to send an attention.  But make sure
+	 * event checking still happens.
+	 */
+	ssif_info->req_events = true;
+	if (SSIF_IDLE(ssif_info))
+		start_flag_fetch(ssif_info, flags);
+	else {
+		ssif_info->req_flags = true;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	}
+}
+
+static int ssif_start_processing(void            *send_info,
+				 struct ipmi_smi *intf)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	ssif_info->intf = intf;
+
+	return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC.  By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+		 "The default IPMB slave address for the controller.");
+
+static bool alerts_broken;
+module_param(alerts_broken, bool, 0);
+MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging.  This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static bool ssif_tryacpi = true;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = true;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+#define IPMI_SSIF_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev,			\
+				  struct device_attribute *attr,	\
+				  char *buf)				\
+{									\
+	struct ssif_info *ssif_info = dev_get_drvdata(dev);		\
+									\
+	return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return snprintf(buf, 10, "ssif\n");
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+IPMI_SSIF_ATTR(sent_messages);
+IPMI_SSIF_ATTR(sent_messages_parts);
+IPMI_SSIF_ATTR(send_retries);
+IPMI_SSIF_ATTR(send_errors);
+IPMI_SSIF_ATTR(received_messages);
+IPMI_SSIF_ATTR(received_message_parts);
+IPMI_SSIF_ATTR(receive_retries);
+IPMI_SSIF_ATTR(receive_errors);
+IPMI_SSIF_ATTR(flag_fetches);
+IPMI_SSIF_ATTR(hosed);
+IPMI_SSIF_ATTR(events);
+IPMI_SSIF_ATTR(watchdog_pretimeouts);
+IPMI_SSIF_ATTR(alerts);
+
+static struct attribute *ipmi_ssif_dev_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_sent_messages.attr,
+	&dev_attr_sent_messages_parts.attr,
+	&dev_attr_send_retries.attr,
+	&dev_attr_send_errors.attr,
+	&dev_attr_received_messages.attr,
+	&dev_attr_received_message_parts.attr,
+	&dev_attr_receive_retries.attr,
+	&dev_attr_receive_errors.attr,
+	&dev_attr_flag_fetches.attr,
+	&dev_attr_hosed.attr,
+	&dev_attr_events.attr,
+	&dev_attr_watchdog_pretimeouts.attr,
+	&dev_attr_alerts.attr,
+	NULL
+};
+
+static const struct attribute_group ipmi_ssif_dev_attr_group = {
+	.attrs		= ipmi_ssif_dev_attrs,
+};
+
+static void shutdown_ssif(void *send_info)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+	dev_set_drvdata(&ssif_info->client->dev, NULL);
+
+	/* make sure the driver is not looking for flags any more. */
+	while (ssif_info->ssif_state != SSIF_NORMAL)
+		schedule_timeout(1);
+
+	ssif_info->stopping = true;
+	del_timer_sync(&ssif_info->retry_timer);
+	if (ssif_info->thread) {
+		complete(&ssif_info->wake_thread);
+		kthread_stop(ssif_info->thread);
+	}
+}
+
+static int ssif_remove(struct i2c_client *client)
+{
+	struct ssif_info *ssif_info = i2c_get_clientdata(client);
+	struct ssif_addr_info *addr_info;
+
+	if (!ssif_info)
+		return 0;
+
+	/*
+	 * After this point, we won't deliver anything asychronously
+	 * to the message handler.  We can unregister ourself.
+	 */
+	ipmi_unregister_smi(ssif_info->intf);
+
+	list_for_each_entry(addr_info, &ssif_infos, link) {
+		if (addr_info->client == client) {
+			addr_info->client = NULL;
+			break;
+		}
+	}
+
+	kfree(ssif_info);
+
+	return 0;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+		  int *resp_len, unsigned char *resp)
+{
+	int retry_cnt;
+	int ret;
+
+	retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+	ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+	if (ret) {
+		retry_cnt--;
+		if (retry_cnt > 0)
+			goto retry1;
+		return -ENODEV;
+	}
+
+	ret = -ENODEV;
+	retry_cnt = SSIF_RECV_RETRIES;
+	while (retry_cnt > 0) {
+		ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+						resp);
+		if (ret > 0)
+			break;
+		msleep(SSIF_MSG_MSEC);
+		retry_cnt--;
+		if (retry_cnt <= 0)
+			break;
+	}
+
+	if (ret > 0) {
+		/* Validate that the response is correct. */
+		if (ret < 3 ||
+		    (resp[0] != (msg[0] | (1 << 2))) ||
+		    (resp[1] != msg[1]))
+			ret = -EINVAL;
+		else {
+			*resp_len = ret;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+	unsigned char *resp;
+	unsigned char msg[3];
+	int           rv;
+	int           len;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	/* Do a Get Device ID command, since it is required. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_DEVICE_ID_CMD;
+	rv = do_cmd(client, 2, msg, &len, resp);
+	if (rv)
+		rv = -ENODEV;
+	else
+		strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+	kfree(resp);
+	return rv;
+}
+
+static int strcmp_nospace(char *s1, char *s2)
+{
+	while (*s1 && *s2) {
+		while (isspace(*s1))
+			s1++;
+		while (isspace(*s2))
+			s2++;
+		if (*s1 > *s2)
+			return 1;
+		if (*s1 < *s2)
+			return -1;
+		s1++;
+		s2++;
+	}
+	return 0;
+}
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+					     char *adapter_name,
+					     bool match_null_name)
+{
+	struct ssif_addr_info *info, *found = NULL;
+
+restart:
+	list_for_each_entry(info, &ssif_infos, link) {
+		if (info->binfo.addr == addr) {
+			if (info->adapter_name || adapter_name) {
+				if (!info->adapter_name != !adapter_name) {
+					/* One is NULL and one is not */
+					continue;
+				}
+				if (adapter_name &&
+				    strcmp_nospace(info->adapter_name,
+						   adapter_name))
+					/* Names do not match */
+					continue;
+			}
+			found = info;
+			break;
+		}
+	}
+
+	if (!found && match_null_name) {
+		/* Try to get an exact match first, then try with a NULL name */
+		adapter_name = NULL;
+		match_null_name = false;
+		goto restart;
+	}
+
+	return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+	acpi_handle acpi_handle;
+
+	acpi_handle = ACPI_HANDLE(dev);
+	if (acpi_handle) {
+		ssif_info->addr_source = SI_ACPI;
+		ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+		return true;
+	}
+#endif
+	return false;
+}
+
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+	if (!slave_addr)
+		slave_addr = ipmi_dmi_get_slave_addr(
+			SI_TYPE_INVALID,
+			i2c_adapter_id(client->adapter),
+			client->addr);
+#endif
+
+	return slave_addr;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+			     IPMI_BMC_EVT_MSG_INTR)
+
+static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	unsigned char     msg[3];
+	unsigned char     *resp;
+	struct ssif_info   *ssif_info;
+	int               rv = 0;
+	int               len;
+	int               i;
+	u8		  slave_addr = 0;
+	struct ssif_addr_info *addr_info = NULL;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+	if (!ssif_info) {
+		kfree(resp);
+		return -ENOMEM;
+	}
+
+	if (!check_acpi(ssif_info, &client->dev)) {
+		addr_info = ssif_info_find(client->addr, client->adapter->name,
+					   true);
+		if (!addr_info) {
+			/* Must have come in through sysfs. */
+			ssif_info->addr_source = SI_HOTMOD;
+		} else {
+			ssif_info->addr_source = addr_info->addr_src;
+			ssif_info->ssif_debug = addr_info->debug;
+			ssif_info->addr_info = addr_info->addr_info;
+			addr_info->client = client;
+			slave_addr = addr_info->slave_addr;
+		}
+	}
+
+	slave_addr = find_slave_address(client, slave_addr);
+
+	pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+	       ipmi_addr_src_to_str(ssif_info->addr_source),
+	       client->addr, client->adapter->name, slave_addr);
+
+	ssif_info->client = client;
+	i2c_set_clientdata(client, ssif_info);
+
+	/* Now check for system interface capabilities */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+	msg[2] = 0; /* SSIF */
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (!rv && (len >= 3) && (resp[2] == 0)) {
+		if (len < 7) {
+			if (ssif_dbg_probe)
+				pr_info(PFX "SSIF info too short: %d\n", len);
+			goto no_support;
+		}
+
+		/* Got a good SSIF response, handle it. */
+		ssif_info->max_xmit_msg_size = resp[5];
+		ssif_info->max_recv_msg_size = resp[6];
+		ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+		ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+		/* Sanitize the data */
+		switch (ssif_info->multi_support) {
+		case SSIF_NO_MULTI:
+			if (ssif_info->max_xmit_msg_size > 32)
+				ssif_info->max_xmit_msg_size = 32;
+			if (ssif_info->max_recv_msg_size > 32)
+				ssif_info->max_recv_msg_size = 32;
+			break;
+
+		case SSIF_MULTI_2_PART:
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
+			if (ssif_info->max_recv_msg_size > 62)
+				ssif_info->max_recv_msg_size = 62;
+			break;
+
+		case SSIF_MULTI_n_PART:
+			/*
+			 * The specification is rather confusing at
+			 * this point, but I think I understand what
+			 * is meant.  At least I have a workable
+			 * solution.  With multi-part messages, you
+			 * cannot send a message that is a multiple of
+			 * 32-bytes in length, because the start and
+			 * middle messages are 32-bytes and the end
+			 * message must be at least one byte.  You
+			 * can't fudge on an extra byte, that would
+			 * screw up things like fru data writes.  So
+			 * we limit the length to 63 bytes.  That way
+			 * a 32-byte message gets sent as a single
+			 * part.  A larger message will be a 32-byte
+			 * start and the next message is always going
+			 * to be 1-31 bytes in length.  Not ideal, but
+			 * it should work.
+			 */
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
+			break;
+
+		default:
+			/* Data is not sane, just give up. */
+			goto no_support;
+		}
+	} else {
+ no_support:
+		/* Assume no multi-part or PEC support */
+		pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+		       rv, len, resp[2]);
+
+		ssif_info->max_xmit_msg_size = 32;
+		ssif_info->max_recv_msg_size = 32;
+		ssif_info->multi_support = SSIF_NO_MULTI;
+		ssif_info->supports_pec = 0;
+	}
+
+	/* Make sure the NMI timeout is cleared. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 3) || (resp[2] != 0))
+		pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
+			rv, len, resp[2]);
+
+	/* Attempt to enable the event buffer. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	rv = do_cmd(client, 2, msg, &len, resp);
+	if (rv || (len < 4) || (resp[2] != 0)) {
+		pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	ssif_info->global_enables = resp[3];
+
+	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+		ssif_info->has_event_buffer = true;
+		/* buffer is already enabled, nothing to do. */
+		goto found;
+	}
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 2)) {
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	if (resp[2] == 0) {
+		/* A successful return means the event buffer is supported. */
+		ssif_info->has_event_buffer = true;
+		ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+	}
+
+	/* Some systems don't behave well if you enable alerts. */
+	if (alerts_broken)
+		goto found;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 2)) {
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	if (resp[2] == 0) {
+		/* A successful return means the alert is supported. */
+		ssif_info->supports_alert = true;
+		ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+	}
+
+ found:
+	if (ssif_dbg_probe) {
+		pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+			client->addr);
+	}
+
+	spin_lock_init(&ssif_info->lock);
+	ssif_info->ssif_state = SSIF_NORMAL;
+	timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+
+	for (i = 0; i < SSIF_NUM_STATS; i++)
+		atomic_set(&ssif_info->stats[i], 0);
+
+	if (ssif_info->supports_pec)
+		ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+	ssif_info->handlers.owner = THIS_MODULE;
+	ssif_info->handlers.start_processing = ssif_start_processing;
+	ssif_info->handlers.shutdown = shutdown_ssif;
+	ssif_info->handlers.get_smi_info = get_smi_info;
+	ssif_info->handlers.sender = sender;
+	ssif_info->handlers.request_events = request_events;
+
+	{
+		unsigned int thread_num;
+
+		thread_num = ((i2c_adapter_id(ssif_info->client->adapter)
+			       << 8) |
+			      ssif_info->client->addr);
+		init_completion(&ssif_info->wake_thread);
+		ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+					       "kssif%4.4x", thread_num);
+		if (IS_ERR(ssif_info->thread)) {
+			rv = PTR_ERR(ssif_info->thread);
+			dev_notice(&ssif_info->client->dev,
+				   "Could not start kernel thread: error %d\n",
+				   rv);
+			goto out;
+		}
+	}
+
+	dev_set_drvdata(&ssif_info->client->dev, ssif_info);
+	rv = device_add_group(&ssif_info->client->dev,
+			      &ipmi_ssif_dev_attr_group);
+	if (rv) {
+		dev_err(&ssif_info->client->dev,
+			"Unable to add device attributes: error %d\n",
+			rv);
+		goto out;
+	}
+
+	rv = ipmi_register_smi(&ssif_info->handlers,
+			       ssif_info,
+			       &ssif_info->client->dev,
+			       slave_addr);
+	 if (rv) {
+		pr_err(PFX "Unable to register device: error %d\n", rv);
+		goto out_remove_attr;
+	}
+
+ out:
+	if (rv) {
+		if (addr_info)
+			addr_info->client = NULL;
+
+		dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
+		kfree(ssif_info);
+	}
+	kfree(resp);
+	return rv;
+
+out_remove_attr:
+	device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+	dev_set_drvdata(&ssif_info->client->dev, NULL);
+	goto out;
+}
+
+static int ssif_adapter_handler(struct device *adev, void *opaque)
+{
+	struct ssif_addr_info *addr_info = opaque;
+
+	if (adev->type != &i2c_adapter_type)
+		return 0;
+
+	addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+						 &addr_info->binfo);
+
+	if (!addr_info->adapter_name)
+		return 1; /* Only try the first I2C adapter by default. */
+	return 0;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+			   int debug, int slave_addr,
+			   enum ipmi_addr_src addr_src,
+			   struct device *dev)
+{
+	struct ssif_addr_info *addr_info;
+	int rv = 0;
+
+	mutex_lock(&ssif_infos_mutex);
+	if (ssif_info_find(addr, adapter_name, false)) {
+		rv = -EEXIST;
+		goto out_unlock;
+	}
+
+	addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+	if (!addr_info) {
+		rv = -ENOMEM;
+		goto out_unlock;
+	}
+
+	if (adapter_name) {
+		addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+		if (!addr_info->adapter_name) {
+			kfree(addr_info);
+			rv = -ENOMEM;
+			goto out_unlock;
+		}
+	}
+
+	strncpy(addr_info->binfo.type, DEVICE_NAME,
+		sizeof(addr_info->binfo.type));
+	addr_info->binfo.addr = addr;
+	addr_info->binfo.platform_data = addr_info;
+	addr_info->debug = debug;
+	addr_info->slave_addr = slave_addr;
+	addr_info->addr_src = addr_src;
+	addr_info->dev = dev;
+
+	if (dev)
+		dev_set_drvdata(dev, addr_info);
+
+	list_add_tail(&addr_info->link, &ssif_infos);
+
+	if (initialized)
+		i2c_for_each_dev(addr_info, ssif_adapter_handler);
+	/* Otherwise address list will get it */
+
+out_unlock:
+	mutex_unlock(&ssif_infos_mutex);
+	return rv;
+}
+
+static void free_ssif_clients(void)
+{
+	struct ssif_addr_info *info, *tmp;
+
+	mutex_lock(&ssif_infos_mutex);
+	list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+		list_del(&info->link);
+		kfree(info->adapter_name);
+		kfree(info);
+	}
+	mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+	struct ssif_addr_info *info;
+	unsigned int count = 0, i;
+	unsigned short *address_list;
+
+	list_for_each_entry(info, &ssif_infos, link)
+		count++;
+
+	address_list = kcalloc(count + 1, sizeof(*address_list),
+			       GFP_KERNEL);
+	if (!address_list)
+		return NULL;
+
+	i = 0;
+	list_for_each_entry(info, &ssif_infos, link) {
+		unsigned short addr = info->binfo.addr;
+		int j;
+
+		for (j = 0; j < i; j++) {
+			if (address_list[j] == addr)
+				goto skip_addr;
+		}
+		address_list[i] = addr;
+skip_addr:
+		i++;
+	}
+	address_list[i] = I2C_CLIENT_END;
+
+	return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ssif_acpi_match[] = {
+	{ "IPI0001", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+#endif
+
+#ifdef CONFIG_DMI
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	u8 slave_addr = 0;
+	u16 i2c_addr;
+	int rv;
+
+	if (!ssif_trydmi)
+		return -ENODEV;
+
+	rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
+	if (rv) {
+		dev_warn(&pdev->dev, PFX "No i2c-addr property\n");
+		return -ENODEV;
+	}
+
+	rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+	if (rv)
+		dev_warn(&pdev->dev, "device has no slave-addr property");
+
+	return new_ssif_client(i2c_addr, NULL, 0,
+			       slave_addr, SI_SMBIOS, &pdev->dev);
+}
+#else
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver		= {
+		.name			= DEVICE_NAME
+	},
+	.probe		= ssif_probe,
+	.remove		= ssif_remove,
+	.alert		= ssif_alert,
+	.id_table	= ssif_id,
+	.detect		= ssif_detect
+};
+
+static int ssif_platform_probe(struct platform_device *dev)
+{
+	return dmi_ipmi_probe(dev);
+}
+
+static int ssif_platform_remove(struct platform_device *dev)
+{
+	struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
+
+	if (!addr_info)
+		return 0;
+
+	mutex_lock(&ssif_infos_mutex);
+	i2c_unregister_device(addr_info->added_client);
+
+	list_del(&addr_info->link);
+	kfree(addr_info);
+	mutex_unlock(&ssif_infos_mutex);
+	return 0;
+}
+
+static struct platform_driver ipmi_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+	},
+	.probe		= ssif_platform_probe,
+	.remove		= ssif_platform_remove,
+};
+
+static int init_ipmi_ssif(void)
+{
+	int i;
+	int rv;
+
+	if (initialized)
+		return 0;
+
+	pr_info("IPMI SSIF Interface driver\n");
+
+	/* build list for i2c from addr list */
+	for (i = 0; i < num_addrs; i++) {
+		rv = new_ssif_client(addr[i], adapter_name[i],
+				     dbg[i], slave_addrs[i],
+				     SI_HARDCODED, NULL);
+		if (rv)
+			pr_err(PFX
+			       "Couldn't add hardcoded device at addr 0x%x\n",
+			       addr[i]);
+	}
+
+	if (ssif_tryacpi)
+		ssif_i2c_driver.driver.acpi_match_table	=
+			ACPI_PTR(ssif_acpi_match);
+
+	if (ssif_trydmi) {
+		rv = platform_driver_register(&ipmi_driver);
+		if (rv)
+			pr_err(PFX "Unable to register driver: %d\n", rv);
+	}
+
+	ssif_i2c_driver.address_list = ssif_address_list();
+
+	rv = i2c_add_driver(&ssif_i2c_driver);
+	if (!rv)
+		initialized = true;
+
+	return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void cleanup_ipmi_ssif(void)
+{
+	if (!initialized)
+		return;
+
+	initialized = false;
+
+	i2c_del_driver(&ssif_i2c_driver);
+
+	platform_driver_unregister(&ipmi_driver);
+
+	free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_ALIAS("platform:dmi-ipmi-ssif");
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 0000000..ca1c5c5
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/mutex.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kdebug.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#ifdef CONFIG_X86
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time.  If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work.  So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
+#include <asm/kdebug.h>
+#include <asm/nmi.h>
+#define HAVE_DIE_NMI
+#endif
+
+#define	PFX "IPMI Watchdog: "
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG		(1 << 7)
+#define WDOG_DONT_STOP_ON_SET	(1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+	byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2	1
+#define WDOG_TIMER_USE_BIOS_POST	2
+#define WDOG_TIMER_USE_OS_LOAD		3
+#define WDOG_TIMER_USE_SMS_OS		4
+#define WDOG_TIMER_USE_OEM		5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+	byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE		0
+#define WDOG_PRETIMEOUT_SMI		1
+#define WDOG_PRETIMEOUT_NMI		2
+#define WDOG_PRETIMEOUT_MSG_INT		3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE		0
+#define WDOG_PREOP_PANIC	1
+/* Cause data to be available to read.  Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA	2
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+	byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE		0
+#define WDOG_TIMEOUT_RESET		1
+#define WDOG_TIMEOUT_POWER_DOWN		2
+#define WDOG_TIMEOUT_POWER_CYCLE	3
+
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2	(1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST	(1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD	(1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS	(1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM		(1 << 5)
+
+/*
+ * Setting/getting the watchdog timer value.  This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response.  The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+	(byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+	(((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER		0x22
+#define IPMI_WDOG_SET_TIMER		0x24
+#define IPMI_WDOG_GET_TIMER		0x25
+
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP	0x80
+
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+static struct ipmi_user *watchdog_user;
+static int watchdog_ifnum;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout;
+
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q;
+static atomic_t pretimeout_since_last_heartbeat;
+static char expect_close;
+
+static int ifnum_to_use = -1;
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB			0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY	1
+#define IPMI_SET_TIMEOUT_FORCE_HB		2
+
+static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
+
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
+static int start_now;
+
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
+{
+	char *endp;
+	int  l;
+	int  rv = 0;
+
+	if (!val)
+		return -EINVAL;
+	l = simple_strtoul(val, &endp, 0);
+	if (endp == val)
+		return -EINVAL;
+
+	*((int *)kp->arg) = l;
+	if (watchdog_user)
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	return rv;
+}
+
+static const struct kernel_param_ops param_ops_timeout = {
+	.set = set_param_timeout,
+	.get = param_get_int,
+};
+#define param_check_timeout param_check_int
+
+typedef int (*action_fn)(const char *intval, char *outval);
+
+static int action_op(const char *inval, char *outval);
+static int preaction_op(const char *inval, char *outval);
+static int preop_op(const char *inval, char *outval);
+static void check_parms(void);
+
+static int set_param_str(const char *val, const struct kernel_param *kp)
+{
+	action_fn  fn = (action_fn) kp->arg;
+	int        rv = 0;
+	char       valcp[16];
+	char       *s;
+
+	strncpy(valcp, val, 15);
+	valcp[15] = '\0';
+
+	s = strstrip(valcp);
+
+	rv = fn(s, NULL);
+	if (rv)
+		goto out;
+
+	check_parms();
+	if (watchdog_user)
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ out:
+	return rv;
+}
+
+static int get_param_str(char *buffer, const struct kernel_param *kp)
+{
+	action_fn fn = (action_fn) kp->arg;
+	int       rv;
+
+	rv = fn(NULL, buffer);
+	if (rv)
+		return rv;
+	return strlen(buffer);
+}
+
+
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+		return 0;
+
+	ipmi_unregister_watchdog(watchdog_ifnum);
+	ipmi_register_watchdog(ifnum_to_use);
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_wdog_ifnum = {
+	.set = set_param_wdog_ifnum,
+	.get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static const struct kernel_param_ops param_ops_str = {
+	.set = set_param_str,
+	.get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
+module_param(timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+
+module_param(pretimeout, timeout, 0644);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
+
+module_param_cb(action, &param_ops_str, action_op, 0644);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+		 "reset, none, power_cycle, power_off.");
+
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
+MODULE_PARM_DESC(preaction, "Pretimeout action.  One of: "
+		 "pre_none, pre_smi, pre_nmi, pre_int.");
+
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
+		 "preop_none, preop_panic, preop_give_data.");
+
+module_param(start_now, int, 0444);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+		 "soon as the driver is loaded.");
+
+module_param(nowayout, bool, 0644);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+		 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* Is someone using the watchdog?  Only one user is allowed. */
+static unsigned long ipmi_wdog_open;
+
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer.  The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
+static int ipmi_start_timer_on_heartbeat;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+/* If a pretimeout occurs, this is used to allow only one panic to happen. */
+static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
+
+#ifdef HAVE_DIE_NMI
+static int testing_nmi;
+static int nmi_handler_registered;
+#endif
+
+static int __ipmi_heartbeat(void);
+
+/*
+ * We use a mutex to make sure that only one thing can send a set a
+ * message at one time.  The mutex is claimed when a message is sent
+ * and freed when both the send and receive messages are free.
+ */
+static atomic_t msg_tofree = ATOMIC_INIT(0);
+static DECLARE_COMPLETION(msg_wait);
+static void msg_free_smi(struct ipmi_smi_msg *msg)
+{
+	if (atomic_dec_and_test(&msg_tofree))
+		complete(&msg_wait);
+}
+static void msg_free_recv(struct ipmi_recv_msg *msg)
+{
+	if (atomic_dec_and_test(&msg_tofree))
+		complete(&msg_wait);
+}
+static struct ipmi_smi_msg smi_msg = {
+	.done = msg_free_smi
+};
+static struct ipmi_recv_msg recv_msg = {
+	.done = msg_free_recv
+};
+
+static int __ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
+			      struct ipmi_recv_msg *recv_msg,
+			      int                  *send_heartbeat_now)
+{
+	struct kernel_ipmi_msg            msg;
+	unsigned char                     data[6];
+	int                               rv;
+	struct ipmi_system_interface_addr addr;
+	int                               hbnow = 0;
+
+
+	data[0] = 0;
+	WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+	if ((ipmi_version_major > 1)
+	    || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+		/* This is an IPMI 1.5-only feature. */
+		data[0] |= WDOG_DONT_STOP_ON_SET;
+	} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+		/*
+		 * In ipmi 1.0, setting the timer stops the watchdog, we
+		 * need to start it back up again.
+		 */
+		hbnow = 1;
+	}
+
+	data[1] = 0;
+	WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+	if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
+	    WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+	    data[2] = pretimeout;
+	} else {
+	    WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+	    data[2] = 0; /* No pretimeout. */
+	}
+	data[3] = 0;
+	WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_SET_TIMER;
+	msg.data = data;
+	msg.data_len = sizeof(data);
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      smi_msg,
+				      recv_msg,
+				      1);
+	if (rv)
+		pr_warn(PFX "set timeout error: %d\n", rv);
+	else if (send_heartbeat_now)
+		*send_heartbeat_now = hbnow;
+
+	return rv;
+}
+
+static int _ipmi_set_timeout(int do_heartbeat)
+{
+	int send_heartbeat_now;
+	int rv;
+
+	if (!watchdog_user)
+		return -ENODEV;
+
+	atomic_set(&msg_tofree, 2);
+
+	rv = __ipmi_set_timeout(&smi_msg,
+				&recv_msg,
+				&send_heartbeat_now);
+	if (rv)
+		return rv;
+
+	wait_for_completion(&msg_wait);
+
+	if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+		|| ((send_heartbeat_now)
+		    && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+		rv = __ipmi_heartbeat();
+
+	return rv;
+}
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+	int rv;
+
+	mutex_lock(&ipmi_watchdog_mutex);
+	rv = _ipmi_set_timeout(do_heartbeat);
+	mutex_unlock(&ipmi_watchdog_mutex);
+
+	return rv;
+}
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void panic_smi_free(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+static void panic_recv_free(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
+	.done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
+	.done = panic_recv_free
+};
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+	struct kernel_ipmi_msg             msg;
+	struct ipmi_system_interface_addr addr;
+	int rv;
+
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+		return;
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_RESET_TIMER;
+	msg.data = NULL;
+	msg.data_len = 0;
+	atomic_add(1, &panic_done_count);
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      &panic_halt_heartbeat_smi_msg,
+				      &panic_halt_heartbeat_recv_msg,
+				      1);
+	if (rv)
+		atomic_sub(1, &panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_smi_msg = {
+	.done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_recv_msg = {
+	.done = panic_recv_free
+};
+
+/*
+ * Special call, doesn't claim any locks.  This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
+static void panic_halt_ipmi_set_timeout(void)
+{
+	int send_heartbeat_now;
+	int rv;
+
+	/* Wait for the messages to be free. */
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll_interface(watchdog_user);
+	atomic_add(1, &panic_done_count);
+	rv = __ipmi_set_timeout(&panic_halt_smi_msg,
+				&panic_halt_recv_msg,
+				&send_heartbeat_now);
+	if (rv) {
+		atomic_sub(1, &panic_done_count);
+		pr_warn(PFX "Unable to extend the watchdog timeout.");
+	} else {
+		if (send_heartbeat_now)
+			panic_halt_ipmi_heartbeat();
+	}
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll_interface(watchdog_user);
+}
+
+static int __ipmi_heartbeat(void)
+{
+	struct kernel_ipmi_msg msg;
+	int rv;
+	struct ipmi_system_interface_addr addr;
+	int timeout_retries = 0;
+
+restart:
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+		return 0;
+
+	atomic_set(&msg_tofree, 2);
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_RESET_TIMER;
+	msg.data = NULL;
+	msg.data_len = 0;
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      &smi_msg,
+				      &recv_msg,
+				      1);
+	if (rv) {
+		pr_warn(PFX "heartbeat send failure: %d\n", rv);
+		return rv;
+	}
+
+	/* Wait for the heartbeat to be sent. */
+	wait_for_completion(&msg_wait);
+
+	if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+		timeout_retries++;
+		if (timeout_retries > 3) {
+			pr_err(PFX ": Unable to restore the IPMI watchdog's settings, giving up.\n");
+			rv = -EIO;
+			goto out;
+		}
+
+		/*
+		 * The timer was not initialized, that means the BMC was
+		 * probably reset and lost the watchdog information.  Attempt
+		 * to restore the timer's info.  Note that we still hold
+		 * the heartbeat lock, to keep a heartbeat from happening
+		 * in this process, so must say no heartbeat to avoid a
+		 * deadlock on this mutex
+		 */
+		rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		if (rv) {
+			pr_err(PFX ": Unable to send the command to set the watchdog's settings, giving up.\n");
+			goto out;
+		}
+
+		/* Might need a heartbeat send, go ahead and do it. */
+		goto restart;
+	} else if (recv_msg.msg.data[0] != 0) {
+		/*
+		 * Got an error in the heartbeat response.  It was already
+		 * reported in ipmi_wdog_msg_handler, but we should return
+		 * an error here.
+		 */
+		rv = -EINVAL;
+	}
+
+out:
+	return rv;
+}
+
+static int _ipmi_heartbeat(void)
+{
+	int rv;
+
+	if (!watchdog_user)
+		return -ENODEV;
+
+	if (ipmi_start_timer_on_heartbeat) {
+		ipmi_start_timer_on_heartbeat = 0;
+		ipmi_watchdog_state = action_val;
+		rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+	} else if (atomic_cmpxchg(&pretimeout_since_last_heartbeat, 1, 0)) {
+		/*
+		 * A pretimeout occurred, make sure we set the timeout.
+		 * We don't want to set the action, though, we want to
+		 * leave that alone (thus it can't be combined with the
+		 * above operation.
+		 */
+		rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+	} else {
+		rv = __ipmi_heartbeat();
+	}
+
+	return rv;
+}
+
+static int ipmi_heartbeat(void)
+{
+	int rv;
+
+	mutex_lock(&ipmi_watchdog_mutex);
+	rv = _ipmi_heartbeat();
+	mutex_unlock(&ipmi_watchdog_mutex);
+
+	return rv;
+}
+
+static struct watchdog_info ident = {
+	.options	= 0,	/* WDIOF_SETTIMEOUT, */
+	.firmware_version = 1,
+	.identity	= "IPMI"
+};
+
+static int ipmi_ioctl(struct file *file,
+		      unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int i;
+	int val;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		i = copy_to_user(argp, &ident, sizeof(ident));
+		return i ? -EFAULT : 0;
+
+	case WDIOC_SETTIMEOUT:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		timeout = val;
+		return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	case WDIOC_GETTIMEOUT:
+		i = copy_to_user(argp, &timeout, sizeof(timeout));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	case WDIOC_SETPRETIMEOUT:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		pretimeout = val;
+		return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	case WDIOC_GETPRETIMEOUT:
+		i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	case WDIOC_KEEPALIVE:
+		return _ipmi_heartbeat();
+
+	case WDIOC_SETOPTIONS:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		if (val & WDIOS_DISABLECARD) {
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			_ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+			ipmi_start_timer_on_heartbeat = 0;
+		}
+
+		if (val & WDIOS_ENABLECARD) {
+			ipmi_watchdog_state = action_val;
+			_ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		}
+		return 0;
+
+	case WDIOC_GETSTATUS:
+		val = 0;
+		i = copy_to_user(argp, &val, sizeof(val));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static long ipmi_unlocked_ioctl(struct file *file,
+				unsigned int cmd,
+				unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&ipmi_watchdog_mutex);
+	ret = ipmi_ioctl(file, cmd, arg);
+	mutex_unlock(&ipmi_watchdog_mutex);
+
+	return ret;
+}
+
+static ssize_t ipmi_write(struct file *file,
+			  const char  __user *buf,
+			  size_t      len,
+			  loff_t      *ppos)
+{
+	int rv;
+
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* In case it was set long ago */
+			expect_close = 0;
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, buf + i))
+					return -EFAULT;
+				if (c == 'V')
+					expect_close = 42;
+			}
+		}
+		rv = ipmi_heartbeat();
+		if (rv)
+			return rv;
+	}
+	return len;
+}
+
+static ssize_t ipmi_read(struct file *file,
+			 char        __user *buf,
+			 size_t      count,
+			 loff_t      *ppos)
+{
+	int          rv = 0;
+	wait_queue_entry_t wait;
+
+	if (count <= 0)
+		return 0;
+
+	/*
+	 * Reading returns if the pretimeout has gone off, and it only does
+	 * it once per pretimeout.
+	 */
+	spin_lock_irq(&ipmi_read_lock);
+	if (!data_to_read) {
+		if (file->f_flags & O_NONBLOCK) {
+			rv = -EAGAIN;
+			goto out;
+		}
+
+		init_waitqueue_entry(&wait, current);
+		add_wait_queue(&read_q, &wait);
+		while (!data_to_read) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(&ipmi_read_lock);
+			schedule();
+			spin_lock_irq(&ipmi_read_lock);
+		}
+		remove_wait_queue(&read_q, &wait);
+
+		if (signal_pending(current)) {
+			rv = -ERESTARTSYS;
+			goto out;
+		}
+	}
+	data_to_read = 0;
+
+ out:
+	spin_unlock_irq(&ipmi_read_lock);
+
+	if (rv == 0) {
+		if (copy_to_user(buf, &data_to_read, 1))
+			rv = -EFAULT;
+		else
+			rv = 1;
+	}
+
+	return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+	switch (iminor(ino)) {
+	case WATCHDOG_MINOR:
+		if (test_and_set_bit(0, &ipmi_wdog_open))
+			return -EBUSY;
+
+
+		/*
+		 * Don't start the timer now, let it start on the
+		 * first heartbeat.
+		 */
+		ipmi_start_timer_on_heartbeat = 1;
+		return nonseekable_open(ino, filep);
+
+	default:
+		return (-ENODEV);
+	}
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+	__poll_t mask = 0;
+
+	poll_wait(file, &read_q, wait);
+
+	spin_lock_irq(&ipmi_read_lock);
+	if (data_to_read)
+		mask |= (EPOLLIN | EPOLLRDNORM);
+	spin_unlock_irq(&ipmi_read_lock);
+
+	return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+	int result;
+
+	result = fasync_helper(fd, file, on, &fasync_q);
+
+	return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+	if (iminor(ino) == WATCHDOG_MINOR) {
+		if (expect_close == 42) {
+			mutex_lock(&ipmi_watchdog_mutex);
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			_ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+			mutex_unlock(&ipmi_watchdog_mutex);
+		} else {
+			pr_crit(PFX
+				"Unexpected close, not stopping watchdog!\n");
+			ipmi_heartbeat();
+		}
+		clear_bit(0, &ipmi_wdog_open);
+	}
+
+	expect_close = 0;
+
+	return 0;
+}
+
+static const struct file_operations ipmi_wdog_fops = {
+	.owner   = THIS_MODULE,
+	.read    = ipmi_read,
+	.poll    = ipmi_poll,
+	.write   = ipmi_write,
+	.unlocked_ioctl = ipmi_unlocked_ioctl,
+	.open    = ipmi_open,
+	.release = ipmi_close,
+	.fasync  = ipmi_fasync,
+	.llseek  = no_llseek,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &ipmi_wdog_fops
+};
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+				  void                 *handler_data)
+{
+	if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+			msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+		pr_info(PFX "response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n");
+	else if (msg->msg.data[0] != 0)
+		pr_err(PFX "response: Error %x on cmd %x\n",
+		       msg->msg.data[0],
+		       msg->msg.cmd);
+
+	ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+	if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+		if (preop_val == WDOG_PREOP_PANIC) {
+			if (atomic_inc_and_test(&preop_panic_excl))
+				panic("Watchdog pre-timeout");
+		} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&ipmi_read_lock, flags);
+			data_to_read = 1;
+			wake_up_interruptible(&read_q);
+			kill_fasync(&fasync_q, SIGIO, POLL_IN);
+			spin_unlock_irqrestore(&ipmi_read_lock, flags);
+		}
+	}
+
+	/*
+	 * On some machines, the heartbeat will give an error and not
+	 * work unless we re-enable the timer.  So do so.
+	 */
+	atomic_set(&pretimeout_since_last_heartbeat, 1);
+}
+
+static void ipmi_wdog_panic_handler(void *user_data)
+{
+	static int panic_event_handled;
+
+	/*
+	 * On a panic, if we have a panic timeout, make sure to extend
+	 * the watchdog timer to a reasonable value to complete the
+	 * panic, if the watchdog timer is running.  Plus the
+	 * pretimeout is meaningless at panic time.
+	 */
+	if (watchdog_user && !panic_event_handled &&
+	    ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+		/* Make sure we do this only once. */
+		panic_event_handled = 1;
+
+		timeout = panic_wdt_timeout;
+		pretimeout = 0;
+		panic_halt_ipmi_set_timeout();
+	}
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs = {
+	.ipmi_recv_hndl           = ipmi_wdog_msg_handler,
+	.ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler,
+	.ipmi_panic_handler       = ipmi_wdog_panic_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+	int rv = -EBUSY;
+
+	if (watchdog_user)
+		goto out;
+
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+		goto out;
+
+	watchdog_ifnum = ipmi_intf;
+
+	rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+	if (rv < 0) {
+		pr_crit(PFX "Unable to register with ipmi\n");
+		goto out;
+	}
+
+	rv = ipmi_get_version(watchdog_user,
+			      &ipmi_version_major,
+			      &ipmi_version_minor);
+	if (rv) {
+		pr_warn(PFX "Unable to get IPMI version, assuming 1.0\n");
+		ipmi_version_major = 1;
+		ipmi_version_minor = 0;
+	}
+
+	rv = misc_register(&ipmi_wdog_miscdev);
+	if (rv < 0) {
+		ipmi_destroy_user(watchdog_user);
+		watchdog_user = NULL;
+		pr_crit(PFX "Unable to register misc device\n");
+	}
+
+#ifdef HAVE_DIE_NMI
+	if (nmi_handler_registered) {
+		int old_pretimeout = pretimeout;
+		int old_timeout = timeout;
+		int old_preop_val = preop_val;
+
+		/*
+		 * Set the pretimeout to go off in a second and give
+		 * ourselves plenty of time to stop the timer.
+		 */
+		ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+		preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
+		pretimeout = 99;
+		timeout = 100;
+
+		testing_nmi = 1;
+
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		if (rv) {
+			pr_warn(PFX "Error starting timer to test NMI: 0x%x.  The NMI pretimeout will likely not work\n",
+				rv);
+			rv = 0;
+			goto out_restore;
+		}
+
+		msleep(1500);
+
+		if (testing_nmi != 2) {
+			pr_warn(PFX "IPMI NMI didn't seem to occur.  The NMI pretimeout will likely not work\n");
+		}
+ out_restore:
+		testing_nmi = 0;
+		preop_val = old_preop_val;
+		pretimeout = old_pretimeout;
+		timeout = old_timeout;
+	}
+#endif
+
+ out:
+	if ((start_now) && (rv == 0)) {
+		/* Run from startup, so start the timer now. */
+		start_now = 0; /* Disable this function after first startup. */
+		ipmi_watchdog_state = action_val;
+		ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		pr_info(PFX "Starting now!\n");
+	} else {
+		/* Stop the timer now. */
+		ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+		ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+	}
+}
+
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+	int rv;
+	struct ipmi_user *loc_user = watchdog_user;
+
+	if (!loc_user)
+		return;
+
+	if (watchdog_ifnum != ipmi_intf)
+		return;
+
+	/* Make sure no one can call us any more. */
+	misc_deregister(&ipmi_wdog_miscdev);
+
+	watchdog_user = NULL;
+
+	/*
+	 * Wait to make sure the message makes it out.  The lower layer has
+	 * pointers to our buffers, we want to make sure they are done before
+	 * we release our memory.
+	 */
+	while (atomic_read(&msg_tofree))
+		msg_free_smi(NULL);
+
+	mutex_lock(&ipmi_watchdog_mutex);
+
+	/* Disconnect from IPMI. */
+	rv = ipmi_destroy_user(loc_user);
+	if (rv)
+		pr_warn(PFX "error unlinking from IPMI: %d\n",  rv);
+
+	/* If it comes back, restart it properly. */
+	ipmi_start_timer_on_heartbeat = 1;
+
+	mutex_unlock(&ipmi_watchdog_mutex);
+}
+
+#ifdef HAVE_DIE_NMI
+static int
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
+{
+	/*
+	 * If we get here, it's an NMI that's not a memory or I/O
+	 * error.  We can't truly tell if it's from IPMI or not
+	 * without sending a message, and sending a message is almost
+	 * impossible because of locking.
+	 */
+
+	if (testing_nmi) {
+		testing_nmi = 2;
+		return NMI_HANDLED;
+	}
+
+	/* If we are not expecting a timeout, ignore it. */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+		return NMI_DONE;
+
+	if (preaction_val != WDOG_PRETIMEOUT_NMI)
+		return NMI_DONE;
+
+	/*
+	 * If no one else handled the NMI, we assume it was the IPMI
+	 * watchdog.
+	 */
+	if (preop_val == WDOG_PREOP_PANIC) {
+		/* On some machines, the heartbeat will give
+		   an error and not work unless we re-enable
+		   the timer.   So do so. */
+		atomic_set(&pretimeout_since_last_heartbeat, 1);
+		if (atomic_inc_and_test(&preop_panic_excl))
+			nmi_panic(regs, PFX "pre-timeout");
+	}
+
+	return NMI_HANDLED;
+}
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+			       unsigned long         code,
+			       void                  *unused)
+{
+	static int reboot_event_handled;
+
+	if ((watchdog_user) && (!reboot_event_handled)) {
+		/* Make sure we only do this once. */
+		reboot_event_handled = 1;
+
+		if (code == SYS_POWER_OFF || code == SYS_HALT) {
+			/* Disable the WDT if we are shutting down. */
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+			/* Set a long timer to let the reboot happen or
+			   reset if it hangs, but only if the watchdog
+			   timer was already running. */
+			if (timeout < 120)
+				timeout = 120;
+			pretimeout = 0;
+			ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+	.notifier_call	= wdog_reboot_handler,
+	.next		= NULL,
+	.priority	= 0
+};
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+	ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+	ipmi_unregister_watchdog(if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_new_smi,
+	.smi_gone = ipmi_smi_gone
+};
+
+static int action_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, action);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "reset") == 0)
+		action_val = WDOG_TIMEOUT_RESET;
+	else if (strcmp(inval, "none") == 0)
+		action_val = WDOG_TIMEOUT_NONE;
+	else if (strcmp(inval, "power_cycle") == 0)
+		action_val = WDOG_TIMEOUT_POWER_CYCLE;
+	else if (strcmp(inval, "power_off") == 0)
+		action_val = WDOG_TIMEOUT_POWER_DOWN;
+	else
+		return -EINVAL;
+	strcpy(action, inval);
+	return 0;
+}
+
+static int preaction_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, preaction);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "pre_none") == 0)
+		preaction_val = WDOG_PRETIMEOUT_NONE;
+	else if (strcmp(inval, "pre_smi") == 0)
+		preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_DIE_NMI
+	else if (strcmp(inval, "pre_nmi") == 0)
+		preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+	else if (strcmp(inval, "pre_int") == 0)
+		preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+	else
+		return -EINVAL;
+	strcpy(preaction, inval);
+	return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, preop);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "preop_none") == 0)
+		preop_val = WDOG_PREOP_NONE;
+	else if (strcmp(inval, "preop_panic") == 0)
+		preop_val = WDOG_PREOP_PANIC;
+	else if (strcmp(inval, "preop_give_data") == 0)
+		preop_val = WDOG_PREOP_GIVE_DATA;
+	else
+		return -EINVAL;
+	strcpy(preop, inval);
+	return 0;
+}
+
+static void check_parms(void)
+{
+#ifdef HAVE_DIE_NMI
+	int do_nmi = 0;
+	int rv;
+
+	if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+		do_nmi = 1;
+		if (preop_val == WDOG_PREOP_GIVE_DATA) {
+			pr_warn(PFX "Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n");
+			preop_op("preop_none", NULL);
+			do_nmi = 0;
+		}
+	}
+	if (do_nmi && !nmi_handler_registered) {
+		rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+						"ipmi");
+		if (rv) {
+			pr_warn(PFX "Can't register nmi handler\n");
+			return;
+		} else
+			nmi_handler_registered = 1;
+	} else if (!do_nmi && nmi_handler_registered) {
+		unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+		nmi_handler_registered = 0;
+	}
+#endif
+}
+
+static int __init ipmi_wdog_init(void)
+{
+	int rv;
+
+	if (action_op(action, NULL)) {
+		action_op("reset", NULL);
+		pr_info(PFX "Unknown action '%s', defaulting to reset\n",
+			action);
+	}
+
+	if (preaction_op(preaction, NULL)) {
+		preaction_op("pre_none", NULL);
+		pr_info(PFX "Unknown preaction '%s', defaulting to none\n",
+			preaction);
+	}
+
+	if (preop_op(preop, NULL)) {
+		preop_op("preop_none", NULL);
+		pr_info(PFX "Unknown preop '%s', defaulting to none\n", preop);
+	}
+
+	check_parms();
+
+	register_reboot_notifier(&wdog_reboot_notifier);
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+	if (rv) {
+#ifdef HAVE_DIE_NMI
+		if (nmi_handler_registered)
+			unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+		unregister_reboot_notifier(&wdog_reboot_notifier);
+		pr_warn(PFX "can't register smi watcher\n");
+		return rv;
+	}
+
+	pr_info(PFX "driver initialized\n");
+
+	return 0;
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	ipmi_unregister_watchdog(watchdog_ifnum);
+
+#ifdef HAVE_DIE_NMI
+	if (nmi_handler_registered)
+		unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+
+	unregister_reboot_notifier(&wdog_reboot_notifier);
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
new file mode 100644
index 0000000..e6124bd
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "kcs-bmc: " fmt
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipmi_bmc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc.h"
+
+#define DEVICE_NAME "ipmi-kcs"
+
+#define KCS_MSG_BUFSIZ    1000
+
+#define KCS_ZERO_DATA     0
+
+
+/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
+#define KCS_STATUS_STATE(state) (state << 6)
+#define KCS_STATUS_STATE_MASK   GENMASK(7, 6)
+#define KCS_STATUS_CMD_DAT      BIT(3)
+#define KCS_STATUS_SMS_ATN      BIT(2)
+#define KCS_STATUS_IBF          BIT(1)
+#define KCS_STATUS_OBF          BIT(0)
+
+/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
+enum kcs_states {
+	IDLE_STATE  = 0,
+	READ_STATE  = 1,
+	WRITE_STATE = 2,
+	ERROR_STATE = 3,
+};
+
+/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
+#define KCS_CMD_GET_STATUS_ABORT  0x60
+#define KCS_CMD_WRITE_START       0x61
+#define KCS_CMD_WRITE_END         0x62
+#define KCS_CMD_READ_BYTE         0x68
+
+static inline u8 read_data(struct kcs_bmc *kcs_bmc)
+{
+	return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
+}
+
+static inline void write_data(struct kcs_bmc *kcs_bmc, u8 data)
+{
+	kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
+}
+
+static inline u8 read_status(struct kcs_bmc *kcs_bmc)
+{
+	return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
+}
+
+static inline void write_status(struct kcs_bmc *kcs_bmc, u8 data)
+{
+	kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
+}
+
+static void update_status_bits(struct kcs_bmc *kcs_bmc, u8 mask, u8 val)
+{
+	u8 tmp = read_status(kcs_bmc);
+
+	tmp &= ~mask;
+	tmp |= val & mask;
+
+	write_status(kcs_bmc, tmp);
+}
+
+static inline void set_state(struct kcs_bmc *kcs_bmc, u8 state)
+{
+	update_status_bits(kcs_bmc, KCS_STATUS_STATE_MASK,
+					KCS_STATUS_STATE(state));
+}
+
+static void kcs_force_abort(struct kcs_bmc *kcs_bmc)
+{
+	set_state(kcs_bmc, ERROR_STATE);
+	read_data(kcs_bmc);
+	write_data(kcs_bmc, KCS_ZERO_DATA);
+
+	kcs_bmc->phase = KCS_PHASE_ERROR;
+	kcs_bmc->data_in_avail = false;
+	kcs_bmc->data_in_idx = 0;
+}
+
+static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc)
+{
+	u8 data;
+
+	switch (kcs_bmc->phase) {
+	case KCS_PHASE_WRITE_START:
+		kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
+		/* fall through */
+
+	case KCS_PHASE_WRITE_DATA:
+		if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
+			set_state(kcs_bmc, WRITE_STATE);
+			write_data(kcs_bmc, KCS_ZERO_DATA);
+			kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
+						read_data(kcs_bmc);
+		} else {
+			kcs_force_abort(kcs_bmc);
+			kcs_bmc->error = KCS_LENGTH_ERROR;
+		}
+		break;
+
+	case KCS_PHASE_WRITE_END_CMD:
+		if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
+			set_state(kcs_bmc, READ_STATE);
+			kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
+						read_data(kcs_bmc);
+			kcs_bmc->phase = KCS_PHASE_WRITE_DONE;
+			kcs_bmc->data_in_avail = true;
+			wake_up_interruptible(&kcs_bmc->queue);
+		} else {
+			kcs_force_abort(kcs_bmc);
+			kcs_bmc->error = KCS_LENGTH_ERROR;
+		}
+		break;
+
+	case KCS_PHASE_READ:
+		if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len)
+			set_state(kcs_bmc, IDLE_STATE);
+
+		data = read_data(kcs_bmc);
+		if (data != KCS_CMD_READ_BYTE) {
+			set_state(kcs_bmc, ERROR_STATE);
+			write_data(kcs_bmc, KCS_ZERO_DATA);
+			break;
+		}
+
+		if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) {
+			write_data(kcs_bmc, KCS_ZERO_DATA);
+			kcs_bmc->phase = KCS_PHASE_IDLE;
+			break;
+		}
+
+		write_data(kcs_bmc,
+			kcs_bmc->data_out[kcs_bmc->data_out_idx++]);
+		break;
+
+	case KCS_PHASE_ABORT_ERROR1:
+		set_state(kcs_bmc, READ_STATE);
+		read_data(kcs_bmc);
+		write_data(kcs_bmc, kcs_bmc->error);
+		kcs_bmc->phase = KCS_PHASE_ABORT_ERROR2;
+		break;
+
+	case KCS_PHASE_ABORT_ERROR2:
+		set_state(kcs_bmc, IDLE_STATE);
+		read_data(kcs_bmc);
+		write_data(kcs_bmc, KCS_ZERO_DATA);
+		kcs_bmc->phase = KCS_PHASE_IDLE;
+		break;
+
+	default:
+		kcs_force_abort(kcs_bmc);
+		break;
+	}
+}
+
+static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
+{
+	u8 cmd;
+
+	set_state(kcs_bmc, WRITE_STATE);
+	write_data(kcs_bmc, KCS_ZERO_DATA);
+
+	cmd = read_data(kcs_bmc);
+	switch (cmd) {
+	case KCS_CMD_WRITE_START:
+		kcs_bmc->phase = KCS_PHASE_WRITE_START;
+		kcs_bmc->error = KCS_NO_ERROR;
+		kcs_bmc->data_in_avail = false;
+		kcs_bmc->data_in_idx = 0;
+		break;
+
+	case KCS_CMD_WRITE_END:
+		if (kcs_bmc->phase != KCS_PHASE_WRITE_DATA) {
+			kcs_force_abort(kcs_bmc);
+			break;
+		}
+
+		kcs_bmc->phase = KCS_PHASE_WRITE_END_CMD;
+		break;
+
+	case KCS_CMD_GET_STATUS_ABORT:
+		if (kcs_bmc->error == KCS_NO_ERROR)
+			kcs_bmc->error = KCS_ABORTED_BY_COMMAND;
+
+		kcs_bmc->phase = KCS_PHASE_ABORT_ERROR1;
+		kcs_bmc->data_in_avail = false;
+		kcs_bmc->data_in_idx = 0;
+		break;
+
+	default:
+		kcs_force_abort(kcs_bmc);
+		kcs_bmc->error = KCS_ILLEGAL_CONTROL_CODE;
+		break;
+	}
+}
+
+int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
+{
+	unsigned long flags;
+	int ret = -ENODATA;
+	u8 status;
+
+	spin_lock_irqsave(&kcs_bmc->lock, flags);
+
+	status = read_status(kcs_bmc);
+	if (status & KCS_STATUS_IBF) {
+		if (!kcs_bmc->running)
+			kcs_force_abort(kcs_bmc);
+		else if (status & KCS_STATUS_CMD_DAT)
+			kcs_bmc_handle_cmd(kcs_bmc);
+		else
+			kcs_bmc_handle_data(kcs_bmc);
+
+		ret = 0;
+	}
+
+	spin_unlock_irqrestore(&kcs_bmc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(kcs_bmc_handle_event);
+
+static inline struct kcs_bmc *to_kcs_bmc(struct file *filp)
+{
+	return container_of(filp->private_data, struct kcs_bmc, miscdev);
+}
+
+static int kcs_bmc_open(struct inode *inode, struct file *filp)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+	int ret = 0;
+
+	spin_lock_irq(&kcs_bmc->lock);
+	if (!kcs_bmc->running)
+		kcs_bmc->running = 1;
+	else
+		ret = -EBUSY;
+	spin_unlock_irq(&kcs_bmc->lock);
+
+	return ret;
+}
+
+static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+	__poll_t mask = 0;
+
+	poll_wait(filp, &kcs_bmc->queue, wait);
+
+	spin_lock_irq(&kcs_bmc->lock);
+	if (kcs_bmc->data_in_avail)
+		mask |= EPOLLIN;
+	spin_unlock_irq(&kcs_bmc->lock);
+
+	return mask;
+}
+
+static ssize_t kcs_bmc_read(struct file *filp, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+	bool data_avail;
+	size_t data_len;
+	ssize_t ret;
+
+	if (!(filp->f_flags & O_NONBLOCK))
+		wait_event_interruptible(kcs_bmc->queue,
+					 kcs_bmc->data_in_avail);
+
+	mutex_lock(&kcs_bmc->mutex);
+
+	spin_lock_irq(&kcs_bmc->lock);
+	data_avail = kcs_bmc->data_in_avail;
+	if (data_avail) {
+		data_len = kcs_bmc->data_in_idx;
+		memcpy(kcs_bmc->kbuffer, kcs_bmc->data_in, data_len);
+	}
+	spin_unlock_irq(&kcs_bmc->lock);
+
+	if (!data_avail) {
+		ret = -EAGAIN;
+		goto out_unlock;
+	}
+
+	if (count < data_len) {
+		pr_err("channel=%u with too large data : %zu\n",
+			kcs_bmc->channel, data_len);
+
+		spin_lock_irq(&kcs_bmc->lock);
+		kcs_force_abort(kcs_bmc);
+		spin_unlock_irq(&kcs_bmc->lock);
+
+		ret = -EOVERFLOW;
+		goto out_unlock;
+	}
+
+	if (copy_to_user(buf, kcs_bmc->kbuffer, data_len)) {
+		ret = -EFAULT;
+		goto out_unlock;
+	}
+
+	ret = data_len;
+
+	spin_lock_irq(&kcs_bmc->lock);
+	if (kcs_bmc->phase == KCS_PHASE_WRITE_DONE) {
+		kcs_bmc->phase = KCS_PHASE_WAIT_READ;
+		kcs_bmc->data_in_avail = false;
+		kcs_bmc->data_in_idx = 0;
+	} else {
+		ret = -EAGAIN;
+	}
+	spin_unlock_irq(&kcs_bmc->lock);
+
+out_unlock:
+	mutex_unlock(&kcs_bmc->mutex);
+
+	return ret;
+}
+
+static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+	ssize_t ret;
+
+	/* a minimum response size '3' : netfn + cmd + ccode */
+	if (count < 3 || count > KCS_MSG_BUFSIZ)
+		return -EINVAL;
+
+	mutex_lock(&kcs_bmc->mutex);
+
+	if (copy_from_user(kcs_bmc->kbuffer, buf, count)) {
+		ret = -EFAULT;
+		goto out_unlock;
+	}
+
+	spin_lock_irq(&kcs_bmc->lock);
+	if (kcs_bmc->phase == KCS_PHASE_WAIT_READ) {
+		kcs_bmc->phase = KCS_PHASE_READ;
+		kcs_bmc->data_out_idx = 1;
+		kcs_bmc->data_out_len = count;
+		memcpy(kcs_bmc->data_out, kcs_bmc->kbuffer, count);
+		write_data(kcs_bmc, kcs_bmc->data_out[0]);
+		ret = count;
+	} else {
+		ret = -EINVAL;
+	}
+	spin_unlock_irq(&kcs_bmc->lock);
+
+out_unlock:
+	mutex_unlock(&kcs_bmc->mutex);
+
+	return ret;
+}
+
+static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+	long ret = 0;
+
+	spin_lock_irq(&kcs_bmc->lock);
+
+	switch (cmd) {
+	case IPMI_BMC_IOCTL_SET_SMS_ATN:
+		update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
+				   KCS_STATUS_SMS_ATN);
+		break;
+
+	case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
+		update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
+				   0);
+		break;
+
+	case IPMI_BMC_IOCTL_FORCE_ABORT:
+		kcs_force_abort(kcs_bmc);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	spin_unlock_irq(&kcs_bmc->lock);
+
+	return ret;
+}
+
+static int kcs_bmc_release(struct inode *inode, struct file *filp)
+{
+	struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
+
+	spin_lock_irq(&kcs_bmc->lock);
+	kcs_bmc->running = 0;
+	kcs_force_abort(kcs_bmc);
+	spin_unlock_irq(&kcs_bmc->lock);
+
+	return 0;
+}
+
+static const struct file_operations kcs_bmc_fops = {
+	.owner          = THIS_MODULE,
+	.open           = kcs_bmc_open,
+	.read           = kcs_bmc_read,
+	.write          = kcs_bmc_write,
+	.release        = kcs_bmc_release,
+	.poll           = kcs_bmc_poll,
+	.unlocked_ioctl = kcs_bmc_ioctl,
+};
+
+struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
+{
+	struct kcs_bmc *kcs_bmc;
+
+	kcs_bmc = devm_kzalloc(dev, sizeof(*kcs_bmc) + sizeof_priv, GFP_KERNEL);
+	if (!kcs_bmc)
+		return NULL;
+
+	spin_lock_init(&kcs_bmc->lock);
+	kcs_bmc->channel = channel;
+
+	mutex_init(&kcs_bmc->mutex);
+	init_waitqueue_head(&kcs_bmc->queue);
+
+	kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+	kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+	kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+	if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer)
+		return NULL;
+
+	kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+	kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+					       DEVICE_NAME, channel);
+	kcs_bmc->miscdev.fops = &kcs_bmc_fops;
+
+	return kcs_bmc;
+}
+EXPORT_SYMBOL(kcs_bmc_alloc);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc.h b/drivers/char/ipmi/kcs_bmc.h
new file mode 100644
index 0000000..eb9ea4c
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#ifndef __KCS_BMC_H__
+#define __KCS_BMC_H__
+
+#include <linux/miscdevice.h>
+
+/* Different phases of the KCS BMC module.
+ *  KCS_PHASE_IDLE:
+ *            BMC should not be expecting nor sending any data.
+ *  KCS_PHASE_WRITE_START:
+ *            BMC is receiving a WRITE_START command from system software.
+ *  KCS_PHASE_WRITE_DATA:
+ *            BMC is receiving a data byte from system software.
+ *  KCS_PHASE_WRITE_END_CMD:
+ *            BMC is waiting a last data byte from system software.
+ *  KCS_PHASE_WRITE_DONE:
+ *            BMC has received the whole request from system software.
+ *  KCS_PHASE_WAIT_READ:
+ *            BMC is waiting the response from the upper IPMI service.
+ *  KCS_PHASE_READ:
+ *            BMC is transferring the response to system software.
+ *  KCS_PHASE_ABORT_ERROR1:
+ *            BMC is waiting error status request from system software.
+ *  KCS_PHASE_ABORT_ERROR2:
+ *            BMC is waiting for idle status afer error from system software.
+ *  KCS_PHASE_ERROR:
+ *            BMC has detected a protocol violation at the interface level.
+ */
+enum kcs_phases {
+	KCS_PHASE_IDLE,
+
+	KCS_PHASE_WRITE_START,
+	KCS_PHASE_WRITE_DATA,
+	KCS_PHASE_WRITE_END_CMD,
+	KCS_PHASE_WRITE_DONE,
+
+	KCS_PHASE_WAIT_READ,
+	KCS_PHASE_READ,
+
+	KCS_PHASE_ABORT_ERROR1,
+	KCS_PHASE_ABORT_ERROR2,
+	KCS_PHASE_ERROR
+};
+
+/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
+enum kcs_errors {
+	KCS_NO_ERROR                = 0x00,
+	KCS_ABORTED_BY_COMMAND      = 0x01,
+	KCS_ILLEGAL_CONTROL_CODE    = 0x02,
+	KCS_LENGTH_ERROR            = 0x06,
+	KCS_UNSPECIFIED_ERROR       = 0xFF
+};
+
+/* IPMI 2.0 - 9.5, KCS Interface Registers
+ * @idr: Input Data Register
+ * @odr: Output Data Register
+ * @str: Status Register
+ */
+struct kcs_ioreg {
+	u32 idr;
+	u32 odr;
+	u32 str;
+};
+
+struct kcs_bmc {
+	spinlock_t lock;
+
+	u32 channel;
+	int running;
+
+	/* Setup by BMC KCS controller driver */
+	struct kcs_ioreg ioreg;
+	u8 (*io_inputb)(struct kcs_bmc *kcs_bmc, u32 reg);
+	void (*io_outputb)(struct kcs_bmc *kcs_bmc, u32 reg, u8 b);
+
+	enum kcs_phases phase;
+	enum kcs_errors error;
+
+	wait_queue_head_t queue;
+	bool data_in_avail;
+	int  data_in_idx;
+	u8  *data_in;
+
+	int  data_out_idx;
+	int  data_out_len;
+	u8  *data_out;
+
+	struct mutex mutex;
+	u8 *kbuffer;
+
+	struct miscdevice miscdev;
+
+	unsigned long priv[];
+};
+
+static inline void *kcs_bmc_priv(struct kcs_bmc *kcs_bmc)
+{
+	return kcs_bmc->priv;
+}
+
+int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc);
+struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv,
+					u32 channel);
+#endif /* __KCS_BMC_H__ */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
new file mode 100644
index 0000000..3c95594
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "kcs_bmc.h"
+
+
+#define DEVICE_NAME     "ast-kcs-bmc"
+
+#define KCS_CHANNEL_MAX     4
+
+/* mapped to lpc-bmc@0 IO space */
+#define LPC_HICR0            0x000
+#define     LPC_HICR0_LPC3E          BIT(7)
+#define     LPC_HICR0_LPC2E          BIT(6)
+#define     LPC_HICR0_LPC1E          BIT(5)
+#define LPC_HICR2            0x008
+#define     LPC_HICR2_IBFIF3         BIT(3)
+#define     LPC_HICR2_IBFIF2         BIT(2)
+#define     LPC_HICR2_IBFIF1         BIT(1)
+#define LPC_HICR4            0x010
+#define     LPC_HICR4_LADR12AS       BIT(7)
+#define     LPC_HICR4_KCSENBL        BIT(2)
+#define LPC_LADR3H           0x014
+#define LPC_LADR3L           0x018
+#define LPC_LADR12H          0x01C
+#define LPC_LADR12L          0x020
+#define LPC_IDR1             0x024
+#define LPC_IDR2             0x028
+#define LPC_IDR3             0x02C
+#define LPC_ODR1             0x030
+#define LPC_ODR2             0x034
+#define LPC_ODR3             0x038
+#define LPC_STR1             0x03C
+#define LPC_STR2             0x040
+#define LPC_STR3             0x044
+
+/* mapped to lpc-host@80 IO space */
+#define LPC_HICRB            0x080
+#define     LPC_HICRB_IBFIF4         BIT(1)
+#define     LPC_HICRB_LPC4E          BIT(0)
+#define LPC_LADR4            0x090
+#define LPC_IDR4             0x094
+#define LPC_ODR4             0x098
+#define LPC_STR4             0x09C
+
+struct aspeed_kcs_bmc {
+	struct regmap *map;
+};
+
+
+static u8 aspeed_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+{
+	struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+	u32 val = 0;
+	int rc;
+
+	rc = regmap_read(priv->map, reg, &val);
+	WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+	return rc == 0 ? (u8) val : 0;
+}
+
+static void aspeed_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+{
+	struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+	int rc;
+
+	rc = regmap_write(priv->map, reg, data);
+	WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+
+/*
+ * AST_usrGuide_KCS.pdf
+ * 2. Background:
+ *   we note D for Data, and C for Cmd/Status, default rules are
+ *     A. KCS1 / KCS2 ( D / C:X / X+4 )
+ *        D / C : CA0h / CA4h
+ *        D / C : CA8h / CACh
+ *     B. KCS3 ( D / C:XX2h / XX3h )
+ *        D / C : CA2h / CA3h
+ *        D / C : CB2h / CB3h
+ *     C. KCS4
+ *        D / C : CA4h / CA5h
+ */
+static void aspeed_kcs_set_address(struct kcs_bmc *kcs_bmc, u16 addr)
+{
+	struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+
+	switch (kcs_bmc->channel) {
+	case 1:
+		regmap_update_bits(priv->map, LPC_HICR4,
+				LPC_HICR4_LADR12AS, 0);
+		regmap_write(priv->map, LPC_LADR12H, addr >> 8);
+		regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+		break;
+
+	case 2:
+		regmap_update_bits(priv->map, LPC_HICR4,
+				LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
+		regmap_write(priv->map, LPC_LADR12H, addr >> 8);
+		regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+		break;
+
+	case 3:
+		regmap_write(priv->map, LPC_LADR3H, addr >> 8);
+		regmap_write(priv->map, LPC_LADR3L, addr & 0xFF);
+		break;
+
+	case 4:
+		regmap_write(priv->map, LPC_LADR4, ((addr + 1) << 16) |
+			addr);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void aspeed_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+{
+	struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+
+	switch (kcs_bmc->channel) {
+	case 1:
+		if (enable) {
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1);
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC1E, LPC_HICR0_LPC1E);
+		} else {
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC1E, 0);
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF1, 0);
+		}
+		break;
+
+	case 2:
+		if (enable) {
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2);
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC2E, LPC_HICR0_LPC2E);
+		} else {
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC2E, 0);
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF2, 0);
+		}
+		break;
+
+	case 3:
+		if (enable) {
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3);
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC3E, LPC_HICR0_LPC3E);
+			regmap_update_bits(priv->map, LPC_HICR4,
+					LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL);
+		} else {
+			regmap_update_bits(priv->map, LPC_HICR0,
+					LPC_HICR0_LPC3E, 0);
+			regmap_update_bits(priv->map, LPC_HICR4,
+					LPC_HICR4_KCSENBL, 0);
+			regmap_update_bits(priv->map, LPC_HICR2,
+					LPC_HICR2_IBFIF3, 0);
+		}
+		break;
+
+	case 4:
+		if (enable)
+			regmap_update_bits(priv->map, LPC_HICRB,
+					LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
+					LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E);
+		else
+			regmap_update_bits(priv->map, LPC_HICRB,
+					LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
+					0);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+{
+	struct kcs_bmc *kcs_bmc = arg;
+
+	if (!kcs_bmc_handle_event(kcs_bmc))
+		return IRQ_HANDLED;
+
+	return IRQ_NONE;
+}
+
+static int aspeed_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+			struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int irq;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
+				dev_name(dev), kcs_bmc);
+}
+
+static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
+	{ .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
+	{ .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
+	{ .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
+	{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
+};
+
+static int aspeed_kcs_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct aspeed_kcs_bmc *priv;
+	struct kcs_bmc *kcs_bmc;
+	u32 chan, addr;
+	int rc;
+
+	rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
+	if ((rc != 0) || (chan == 0 || chan > KCS_CHANNEL_MAX)) {
+		dev_err(dev, "no valid 'kcs_chan' configured\n");
+		return -ENODEV;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "kcs_addr", &addr);
+	if (rc) {
+		dev_err(dev, "no valid 'kcs_addr' configured\n");
+		return -ENODEV;
+	}
+
+	kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
+	if (!kcs_bmc)
+		return -ENOMEM;
+
+	priv = kcs_bmc_priv(kcs_bmc);
+	priv->map = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(priv->map)) {
+		dev_err(dev, "Couldn't get regmap\n");
+		return -ENODEV;
+	}
+
+	kcs_bmc->ioreg = ast_kcs_bmc_ioregs[chan - 1];
+	kcs_bmc->io_inputb = aspeed_kcs_inb;
+	kcs_bmc->io_outputb = aspeed_kcs_outb;
+
+	dev_set_drvdata(dev, kcs_bmc);
+
+	aspeed_kcs_set_address(kcs_bmc, addr);
+	aspeed_kcs_enable_channel(kcs_bmc, true);
+	rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
+	if (rc)
+		return rc;
+
+	rc = misc_register(&kcs_bmc->miscdev);
+	if (rc) {
+		dev_err(dev, "Unable to register device\n");
+		return rc;
+	}
+
+	pr_info("channel=%u addr=0x%x idr=0x%x odr=0x%x str=0x%x\n",
+		chan, addr,
+		kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+
+	return 0;
+}
+
+static int aspeed_kcs_remove(struct platform_device *pdev)
+{
+	struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+
+	misc_deregister(&kcs_bmc->miscdev);
+
+	return 0;
+}
+
+static const struct of_device_id ast_kcs_bmc_match[] = {
+	{ .compatible = "aspeed,ast2400-kcs-bmc" },
+	{ .compatible = "aspeed,ast2500-kcs-bmc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
+
+static struct platform_driver ast_kcs_bmc_driver = {
+	.driver = {
+		.name           = DEVICE_NAME,
+		.of_match_table = ast_kcs_bmc_match,
+	},
+	.probe  = aspeed_kcs_probe,
+	.remove = aspeed_kcs_remove,
+};
+module_platform_driver(ast_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
new file mode 100644
index 0000000..722f739
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, Nuvoton Corporation.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "nuvoton-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc.h"
+
+#define DEVICE_NAME	"npcm-kcs-bmc"
+#define KCS_CHANNEL_MAX	3
+
+#define KCS1ST		0x0C
+#define KCS2ST		0x1E
+#define KCS3ST		0x30
+
+#define KCS1DO		0x0E
+#define KCS2DO		0x20
+#define KCS3DO		0x32
+
+#define KCS1DI		0x10
+#define KCS2DI		0x22
+#define KCS3DI		0x34
+
+#define KCS1CTL		0x18
+#define KCS2CTL		0x2A
+#define KCS3CTL		0x3C
+#define    KCS_CTL_IBFIE	BIT(0)
+
+#define KCS1IE		0x1C
+#define KCS2IE		0x2E
+#define KCS3IE		0x40
+#define    KCS_IE_IRQE          BIT(0)
+#define    KCS_IE_HIRQE         BIT(3)
+
+/*
+ * 7.2.4 Core KCS Registers
+ * Registers in this module are 8 bits. An 8-bit register must be accessed
+ * by an 8-bit read or write.
+ *
+ * sts: KCS Channel n Status Register (KCSnST).
+ * dob: KCS Channel n Data Out Buffer Register (KCSnDO).
+ * dib: KCS Channel n Data In Buffer Register (KCSnDI).
+ * ctl: KCS Channel n Control Register (KCSnCTL).
+ * ie : KCS Channel n  Interrupt Enable Register (KCSnIE).
+ */
+struct npcm7xx_kcs_reg {
+	u32 sts;
+	u32 dob;
+	u32 dib;
+	u32 ctl;
+	u32 ie;
+};
+
+struct npcm7xx_kcs_bmc {
+	struct regmap *map;
+
+	const struct npcm7xx_kcs_reg *reg;
+};
+
+static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = {
+	{ .sts = KCS1ST, .dob = KCS1DO, .dib = KCS1DI, .ctl = KCS1CTL, .ie = KCS1IE },
+	{ .sts = KCS2ST, .dob = KCS2DO, .dib = KCS2DI, .ctl = KCS2CTL, .ie = KCS2IE },
+	{ .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE },
+};
+
+static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+{
+	struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+	u32 val = 0;
+	int rc;
+
+	rc = regmap_read(priv->map, reg, &val);
+	WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+	return rc == 0 ? (u8)val : 0;
+}
+
+static void npcm7xx_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+{
+	struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+	int rc;
+
+	rc = regmap_write(priv->map, reg, data);
+	WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+{
+	struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+
+	regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
+			   enable ? KCS_CTL_IBFIE : 0);
+
+	regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
+			   enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
+}
+
+static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+{
+	struct kcs_bmc *kcs_bmc = arg;
+
+	if (!kcs_bmc_handle_event(kcs_bmc))
+		return IRQ_HANDLED;
+
+	return IRQ_NONE;
+}
+
+static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+				  struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int irq;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	return devm_request_irq(dev, irq, npcm7xx_kcs_irq, IRQF_SHARED,
+				dev_name(dev), kcs_bmc);
+}
+
+static int npcm7xx_kcs_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct npcm7xx_kcs_bmc *priv;
+	struct kcs_bmc *kcs_bmc;
+	u32 chan;
+	int rc;
+
+	rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
+	if (rc != 0 || chan == 0 || chan > KCS_CHANNEL_MAX) {
+		dev_err(dev, "no valid 'kcs_chan' configured\n");
+		return -ENODEV;
+	}
+
+	kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
+	if (!kcs_bmc)
+		return -ENOMEM;
+
+	priv = kcs_bmc_priv(kcs_bmc);
+	priv->map = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(priv->map)) {
+		dev_err(dev, "Couldn't get regmap\n");
+		return -ENODEV;
+	}
+	priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1];
+
+	kcs_bmc->ioreg.idr = priv->reg->dib;
+	kcs_bmc->ioreg.odr = priv->reg->dob;
+	kcs_bmc->ioreg.str = priv->reg->sts;
+	kcs_bmc->io_inputb = npcm7xx_kcs_inb;
+	kcs_bmc->io_outputb = npcm7xx_kcs_outb;
+
+	dev_set_drvdata(dev, kcs_bmc);
+
+	npcm7xx_kcs_enable_channel(kcs_bmc, true);
+	rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
+	if (rc)
+		return rc;
+
+	rc = misc_register(&kcs_bmc->miscdev);
+	if (rc) {
+		dev_err(dev, "Unable to register device\n");
+		return rc;
+	}
+
+	pr_info("channel=%u idr=0x%x odr=0x%x str=0x%x\n",
+		chan,
+		kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+
+	return 0;
+}
+
+static int npcm7xx_kcs_remove(struct platform_device *pdev)
+{
+	struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+
+	misc_deregister(&kcs_bmc->miscdev);
+
+	return 0;
+}
+
+static const struct of_device_id npcm_kcs_bmc_match[] = {
+	{ .compatible = "nuvoton,npcm750-kcs-bmc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, npcm_kcs_bmc_match);
+
+static struct platform_driver npcm_kcs_bmc_driver = {
+	.driver = {
+		.name		= DEVICE_NAME,
+		.of_match_table	= npcm_kcs_bmc_match,
+	},
+	.probe	= npcm7xx_kcs_probe,
+	.remove	= npcm7xx_kcs_remove,
+};
+module_platform_driver(npcm_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Avi Fishman <avifishman70@gmail.com>");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("NPCM7xx device interface to the KCS BMC device");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
new file mode 100644
index 0000000..8c4dd1a
--- /dev/null
+++ b/drivers/char/lp.c
@@ -0,0 +1,1102 @@
+/*
+ * Generic parallel printer driver
+ *
+ * Copyright (C) 1992 by Jim Weigand and Linus Torvalds
+ * Copyright (C) 1992,1993 by Michael K. Johnson
+ * - Thanks much to Gunter Windau for pointing out to me where the error
+ *   checking ought to be.
+ * Copyright (C) 1993 by Nigel Gamble (added interrupt code)
+ * Copyright (C) 1994 by Alan Cox (Modularised it)
+ * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu
+ * Statistics and support for slow printers by Rob Janssen, rob@knoware.nl
+ * "lp=" command line parameters added by Grant Guenther, grant@torque.net
+ * lp_read (Status readback) support added by Carsten Gross,
+ *                                             carsten@sol.wohnheim.uni-ulm.de
+ * Support for parport by Philip Blundell <philb@gnu.org>
+ * Parport sharing hacking by Andrea Arcangeli
+ * Fixed kernel_(to/from)_user memory copy to check for errors
+ * 				by Riccardo Facchetti <fizban@tin.it>
+ * 22-JAN-1998  Added support for devfs  Richard Gooch <rgooch@atnf.csiro.au>
+ * Redesigned interrupt handling for handle printers with buggy handshake
+ *				by Andrea Arcangeli, 11 May 1998
+ * Full efficient handling of printer with buggy irq handshake (now I have
+ * understood the meaning of the strange handshake). This is done sending new
+ * characters if the interrupt is just happened, even if the printer say to
+ * be still BUSY. This is needed at least with Epson Stylus Color. To enable
+ * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below...
+ * Fixed the irq on the rising edge of the strobe case.
+ * Obsoleted the CAREFUL flag since a printer that doesn' t work with
+ * CAREFUL will block a bit after in lp_check_status().
+ *				Andrea Arcangeli, 15 Oct 1998
+ * Obsoleted and removed all the lowlevel stuff implemented in the last
+ * month to use the IEEE1284 functions (that handle the _new_ compatibilty
+ * mode fine).
+ */
+
+/* This driver should, in theory, work with any parallel port that has an
+ * appropriate low-level driver; all I/O is done through the parport
+ * abstraction layer.
+ *
+ * If this driver is built into the kernel, you can configure it using the
+ * kernel command-line.  For example:
+ *
+ *	lp=parport1,none,parport2	(bind lp0 to parport1, disable lp1 and
+ *					 bind lp2 to parport2)
+ *
+ *	lp=auto				(assign lp devices to all ports that
+ *				         have printers attached, as determined
+ *					 by the IEEE-1284 autoprobe)
+ * 
+ *	lp=reset			(reset the printer during 
+ *					 initialisation)
+ *
+ *	lp=off				(disable the printer driver entirely)
+ *
+ * If the driver is loaded as a module, similar functionality is available
+ * using module parameters.  The equivalent of the above commands would be:
+ *
+ *	# insmod lp.o parport=1,none,2
+ *
+ *	# insmod lp.o parport=auto
+ *
+ *	# insmod lp.o reset=1
+ */
+
+/* COMPATIBILITY WITH OLD KERNELS
+ *
+ * Under Linux 2.0 and previous versions, lp devices were bound to ports at
+ * particular I/O addresses, as follows:
+ *
+ *	lp0		0x3bc
+ *	lp1		0x378
+ *	lp2		0x278
+ *
+ * The new driver, by default, binds lp devices to parport devices as it
+ * finds them.  This means that if you only have one port, it will be bound
+ * to lp0 regardless of its I/O address.  If you need the old behaviour, you
+ * can force it using the parameters described above.
+ */
+
+/*
+ * The new interrupt handling code take care of the buggy handshake
+ * of some HP and Epson printer:
+ * ___
+ * ACK    _______________    ___________
+ *                       |__|
+ * ____
+ * BUSY   _________              _______
+ *                 |____________|
+ *
+ * I discovered this using the printer scanner that you can find at:
+ *
+ *	ftp://e-mind.com/pub/linux/pscan/
+ *
+ *					11 May 98, Andrea Arcangeli
+ *
+ * My printer scanner run on an Epson Stylus Color show that such printer
+ * generates the irq on the _rising_ edge of the STROBE. Now lp handle
+ * this case fine too.
+ *
+ *					15 Oct 1998, Andrea Arcangeli
+ *
+ * The so called `buggy' handshake is really the well documented
+ * compatibility mode IEEE1284 handshake. They changed the well known
+ * Centronics handshake acking in the middle of busy expecting to not
+ * break drivers or legacy application, while they broken linux lp
+ * until I fixed it reverse engineering the protocol by hand some
+ * month ago...
+ *
+ *                                     14 Dec 1998, Andrea Arcangeli
+ *
+ * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+
+#include <linux/parport.h>
+#undef LP_STATS
+#include <linux/lp.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+/* if you have more than 8 printers, remember to increase LP_NO */
+#define LP_NO 8
+
+static DEFINE_MUTEX(lp_mutex);
+static struct lp_struct lp_table[LP_NO];
+
+static unsigned int lp_count = 0;
+static struct class *lp_class;
+
+#ifdef CONFIG_LP_CONSOLE
+static struct parport *console_registered;
+#endif /* CONFIG_LP_CONSOLE */
+
+#undef LP_DEBUG
+
+/* Bits used to manage claiming the parport device */
+#define LP_PREEMPT_REQUEST 1
+#define LP_PARPORT_CLAIMED 2
+
+/* --- low-level port access ----------------------------------- */
+
+#define r_dtr(x)	(parport_read_data(lp_table[(x)].dev->port))
+#define r_str(x)	(parport_read_status(lp_table[(x)].dev->port))
+#define w_ctr(x,y)	do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0)
+#define w_dtr(x,y)	do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0)
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_claim_parport_or_block(struct lp_struct *this_lp)
+{
+	if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+		parport_claim_or_block (this_lp->dev);
+	}
+}
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_release_parport(struct lp_struct *this_lp)
+{
+	if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+		parport_release (this_lp->dev);
+	}
+}
+
+
+
+static int lp_preempt(void *handle)
+{
+	struct lp_struct *this_lp = (struct lp_struct *)handle;
+	set_bit(LP_PREEMPT_REQUEST, &this_lp->bits);
+	return (1);
+}
+
+
+/* 
+ * Try to negotiate to a new mode; if unsuccessful negotiate to
+ * compatibility mode.  Return the mode we ended up in.
+ */
+static int lp_negotiate(struct parport * port, int mode)
+{
+	if (parport_negotiate (port, mode) != 0) {
+		mode = IEEE1284_MODE_COMPAT;
+		parport_negotiate (port, mode);
+	}
+
+	return (mode);
+}
+
+static int lp_reset(int minor)
+{
+	int retval;
+	lp_claim_parport_or_block (&lp_table[minor]);
+	w_ctr(minor, LP_PSELECP);
+	udelay (LP_DELAY);
+	w_ctr(minor, LP_PSELECP | LP_PINITP);
+	retval = r_str(minor);
+	lp_release_parport (&lp_table[minor]);
+	return retval;
+}
+
+static void lp_error (int minor)
+{
+	DEFINE_WAIT(wait);
+	int polling;
+
+	if (LP_F(minor) & LP_ABORT)
+		return;
+
+	polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
+	if (polling) lp_release_parport (&lp_table[minor]);
+	prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+	schedule_timeout(LP_TIMEOUT_POLLED);
+	finish_wait(&lp_table[minor].waitq, &wait);
+	if (polling) lp_claim_parport_or_block (&lp_table[minor]);
+	else parport_yield_blocking (lp_table[minor].dev);
+}
+
+static int lp_check_status(int minor)
+{
+	int error = 0;
+	unsigned int last = lp_table[minor].last_error;
+	unsigned char status = r_str(minor);
+	if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL))
+		/* No error. */
+		last = 0;
+	else if ((status & LP_POUTPA)) {
+		if (last != LP_POUTPA) {
+			last = LP_POUTPA;
+			printk(KERN_INFO "lp%d out of paper\n", minor);
+		}
+		error = -ENOSPC;
+	} else if (!(status & LP_PSELECD)) {
+		if (last != LP_PSELECD) {
+			last = LP_PSELECD;
+			printk(KERN_INFO "lp%d off-line\n", minor);
+		}
+		error = -EIO;
+	} else if (!(status & LP_PERRORP)) {
+		if (last != LP_PERRORP) {
+			last = LP_PERRORP;
+			printk(KERN_INFO "lp%d on fire\n", minor);
+		}
+		error = -EIO;
+	} else {
+		last = 0; /* Come here if LP_CAREFUL is set and no
+                             errors are reported. */
+	}
+
+	lp_table[minor].last_error = last;
+
+	if (last != 0)
+		lp_error(minor);
+
+	return error;
+}
+
+static int lp_wait_ready(int minor, int nonblock)
+{
+	int error = 0;
+
+	/* If we're not in compatibility mode, we're ready now! */
+	if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) {
+	  return (0);
+	}
+
+	do {
+		error = lp_check_status (minor);
+		if (error && (nonblock || (LP_F(minor) & LP_ABORT)))
+			break;
+		if (signal_pending (current)) {
+			error = -EINTR;
+			break;
+		}
+	} while (error);
+	return error;
+}
+
+static ssize_t lp_write(struct file * file, const char __user * buf,
+		        size_t count, loff_t *ppos)
+{
+	unsigned int minor = iminor(file_inode(file));
+	struct parport *port = lp_table[minor].dev->port;
+	char *kbuf = lp_table[minor].lp_buffer;
+	ssize_t retv = 0;
+	ssize_t written;
+	size_t copy_size = count;
+	int nonblock = ((file->f_flags & O_NONBLOCK) ||
+			(LP_F(minor) & LP_ABORT));
+
+#ifdef LP_STATS
+	if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
+		lp_table[minor].runchars = 0;
+
+	lp_table[minor].lastcall = jiffies;
+#endif
+
+	/* Need to copy the data from user-space. */
+	if (copy_size > LP_BUFFER_SIZE)
+		copy_size = LP_BUFFER_SIZE;
+
+	if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+		return -EINTR;
+
+	if (copy_from_user (kbuf, buf, copy_size)) {
+		retv = -EFAULT;
+		goto out_unlock;
+	}
+
+ 	/* Claim Parport or sleep until it becomes available
+ 	 */
+	lp_claim_parport_or_block (&lp_table[minor]);
+	/* Go to the proper mode. */
+	lp_table[minor].current_mode = lp_negotiate (port, 
+						     lp_table[minor].best_mode);
+
+	parport_set_timeout (lp_table[minor].dev,
+			     (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+			      : lp_table[minor].timeout));
+
+	if ((retv = lp_wait_ready (minor, nonblock)) == 0)
+	do {
+		/* Write the data. */
+		written = parport_write (port, kbuf, copy_size);
+		if (written > 0) {
+			copy_size -= written;
+			count -= written;
+			buf  += written;
+			retv += written;
+		}
+
+		if (signal_pending (current)) {
+			if (retv == 0)
+				retv = -EINTR;
+
+			break;
+		}
+
+		if (copy_size > 0) {
+			/* incomplete write -> check error ! */
+			int error;
+
+			parport_negotiate (lp_table[minor].dev->port, 
+					   IEEE1284_MODE_COMPAT);
+			lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+
+			error = lp_wait_ready (minor, nonblock);
+
+			if (error) {
+				if (retv == 0)
+					retv = error;
+				break;
+			} else if (nonblock) {
+				if (retv == 0)
+					retv = -EAGAIN;
+				break;
+			}
+
+			parport_yield_blocking (lp_table[minor].dev);
+			lp_table[minor].current_mode 
+			  = lp_negotiate (port, 
+					  lp_table[minor].best_mode);
+
+		} else if (need_resched())
+			schedule ();
+
+		if (count) {
+			copy_size = count;
+			if (copy_size > LP_BUFFER_SIZE)
+				copy_size = LP_BUFFER_SIZE;
+
+			if (copy_from_user(kbuf, buf, copy_size)) {
+				if (retv == 0)
+					retv = -EFAULT;
+				break;
+			}
+		}	
+	} while (count > 0);
+
+	if (test_and_clear_bit(LP_PREEMPT_REQUEST, 
+			       &lp_table[minor].bits)) {
+		printk(KERN_INFO "lp%d releasing parport\n", minor);
+		parport_negotiate (lp_table[minor].dev->port, 
+				   IEEE1284_MODE_COMPAT);
+		lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+		lp_release_parport (&lp_table[minor]);
+	}
+out_unlock:
+	mutex_unlock(&lp_table[minor].port_mutex);
+
+ 	return retv;
+}
+
+#ifdef CONFIG_PARPORT_1284
+
+/* Status readback conforming to ieee1284 */
+static ssize_t lp_read(struct file * file, char __user * buf,
+		       size_t count, loff_t *ppos)
+{
+	DEFINE_WAIT(wait);
+	unsigned int minor=iminor(file_inode(file));
+	struct parport *port = lp_table[minor].dev->port;
+	ssize_t retval = 0;
+	char *kbuf = lp_table[minor].lp_buffer;
+	int nonblock = ((file->f_flags & O_NONBLOCK) ||
+			(LP_F(minor) & LP_ABORT));
+
+	if (count > LP_BUFFER_SIZE)
+		count = LP_BUFFER_SIZE;
+
+	if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+		return -EINTR;
+
+	lp_claim_parport_or_block (&lp_table[minor]);
+
+	parport_set_timeout (lp_table[minor].dev,
+			     (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+			      : lp_table[minor].timeout));
+
+	parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+	if (parport_negotiate (lp_table[minor].dev->port,
+			       IEEE1284_MODE_NIBBLE)) {
+		retval = -EIO;
+		goto out;
+	}
+
+	while (retval == 0) {
+		retval = parport_read (port, kbuf, count);
+
+		if (retval > 0)
+			break;
+
+		if (nonblock) {
+			retval = -EAGAIN;
+			break;
+		}
+
+		/* Wait for data. */
+
+		if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) {
+			parport_negotiate (lp_table[minor].dev->port,
+					   IEEE1284_MODE_COMPAT);
+			lp_error (minor);
+			if (parport_negotiate (lp_table[minor].dev->port,
+					       IEEE1284_MODE_NIBBLE)) {
+				retval = -EIO;
+				goto out;
+			}
+		} else {
+			prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+			schedule_timeout(LP_TIMEOUT_POLLED);
+			finish_wait(&lp_table[minor].waitq, &wait);
+		}
+
+		if (signal_pending (current)) {
+			retval = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched ();
+	}
+	parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ out:
+	lp_release_parport (&lp_table[minor]);
+
+	if (retval > 0 && copy_to_user (buf, kbuf, retval))
+		retval = -EFAULT;
+
+	mutex_unlock(&lp_table[minor].port_mutex);
+
+	return retval;
+}
+
+#endif /* IEEE 1284 support */
+
+static int lp_open(struct inode * inode, struct file * file)
+{
+	unsigned int minor = iminor(inode);
+	int ret = 0;
+
+	mutex_lock(&lp_mutex);
+	if (minor >= LP_NO) {
+		ret = -ENXIO;
+		goto out;
+	}
+	if ((LP_F(minor) & LP_EXIST) == 0) {
+		ret = -ENXIO;
+		goto out;
+	}
+	if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	/* If ABORTOPEN is set and the printer is offline or out of paper,
+	   we may still want to open it to perform ioctl()s.  Therefore we
+	   have commandeered O_NONBLOCK, even though it is being used in
+	   a non-standard manner.  This is strictly a Linux hack, and
+	   should most likely only ever be used by the tunelp application. */
+	if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
+		int status;
+		lp_claim_parport_or_block (&lp_table[minor]);
+		status = r_str(minor);
+		lp_release_parport (&lp_table[minor]);
+		if (status & LP_POUTPA) {
+			printk(KERN_INFO "lp%d out of paper\n", minor);
+			LP_F(minor) &= ~LP_BUSY;
+			ret = -ENOSPC;
+			goto out;
+		} else if (!(status & LP_PSELECD)) {
+			printk(KERN_INFO "lp%d off-line\n", minor);
+			LP_F(minor) &= ~LP_BUSY;
+			ret = -EIO;
+			goto out;
+		} else if (!(status & LP_PERRORP)) {
+			printk(KERN_ERR "lp%d printer error\n", minor);
+			LP_F(minor) &= ~LP_BUSY;
+			ret = -EIO;
+			goto out;
+		}
+	}
+	lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
+	if (!lp_table[minor].lp_buffer) {
+		LP_F(minor) &= ~LP_BUSY;
+		ret = -ENOMEM;
+		goto out;
+	}
+	/* Determine if the peripheral supports ECP mode */
+	lp_claim_parport_or_block (&lp_table[minor]);
+	if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
+             !parport_negotiate (lp_table[minor].dev->port, 
+                                 IEEE1284_MODE_ECP)) {
+		printk (KERN_INFO "lp%d: ECP mode\n", minor);
+		lp_table[minor].best_mode = IEEE1284_MODE_ECP;
+	} else {
+		lp_table[minor].best_mode = IEEE1284_MODE_COMPAT;
+	}
+	/* Leave peripheral in compatibility mode */
+	parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+	lp_release_parport (&lp_table[minor]);
+	lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+out:
+	mutex_unlock(&lp_mutex);
+	return ret;
+}
+
+static int lp_release(struct inode * inode, struct file * file)
+{
+	unsigned int minor = iminor(inode);
+
+	lp_claim_parport_or_block (&lp_table[minor]);
+	parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+	lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+	lp_release_parport (&lp_table[minor]);
+	kfree(lp_table[minor].lp_buffer);
+	lp_table[minor].lp_buffer = NULL;
+	LP_F(minor) &= ~LP_BUSY;
+	return 0;
+}
+
+static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
+	unsigned long arg, void __user *argp)
+{
+	int status;
+	int retval = 0;
+
+#ifdef LP_DEBUG
+	printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+#endif
+	if (minor >= LP_NO)
+		return -ENODEV;
+	if ((LP_F(minor) & LP_EXIST) == 0)
+		return -ENODEV;
+	switch ( cmd ) {
+		case LPTIME:
+			if (arg > UINT_MAX / HZ)
+				return -EINVAL;
+			LP_TIME(minor) = arg * HZ/100;
+			break;
+		case LPCHAR:
+			LP_CHAR(minor) = arg;
+			break;
+		case LPABORT:
+			if (arg)
+				LP_F(minor) |= LP_ABORT;
+			else
+				LP_F(minor) &= ~LP_ABORT;
+			break;
+		case LPABORTOPEN:
+			if (arg)
+				LP_F(minor) |= LP_ABORTOPEN;
+			else
+				LP_F(minor) &= ~LP_ABORTOPEN;
+			break;
+		case LPCAREFUL:
+			if (arg)
+				LP_F(minor) |= LP_CAREFUL;
+			else
+				LP_F(minor) &= ~LP_CAREFUL;
+			break;
+		case LPWAIT:
+			LP_WAIT(minor) = arg;
+			break;
+		case LPSETIRQ: 
+			return -EINVAL;
+			break;
+		case LPGETIRQ:
+			if (copy_to_user(argp, &LP_IRQ(minor),
+					sizeof(int)))
+				return -EFAULT;
+			break;
+		case LPGETSTATUS:
+			if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+				return -EINTR;
+			lp_claim_parport_or_block (&lp_table[minor]);
+			status = r_str(minor);
+			lp_release_parport (&lp_table[minor]);
+			mutex_unlock(&lp_table[minor].port_mutex);
+
+			if (copy_to_user(argp, &status, sizeof(int)))
+				return -EFAULT;
+			break;
+		case LPRESET:
+			lp_reset(minor);
+			break;
+#ifdef LP_STATS
+		case LPGETSTATS:
+			if (copy_to_user(argp, &LP_STAT(minor),
+					sizeof(struct lp_stats)))
+				return -EFAULT;
+			if (capable(CAP_SYS_ADMIN))
+				memset(&LP_STAT(minor), 0,
+						sizeof(struct lp_stats));
+			break;
+#endif
+ 		case LPGETFLAGS:
+ 			status = LP_F(minor);
+			if (copy_to_user(argp, &status, sizeof(int)))
+				return -EFAULT;
+			break;
+
+		default:
+			retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int lp_set_timeout(unsigned int minor, s64 tv_sec, long tv_usec)
+{
+	long to_jiffies;
+
+	/* Convert to jiffies, place in lp_table */
+	if (tv_sec < 0 || tv_usec < 0)
+		return -EINVAL;
+
+	/*
+	 * we used to not check, so let's not make this fatal,
+	 * but deal with user space passing a 32-bit tv_nsec in
+	 * a 64-bit field, capping the timeout to 1 second
+	 * worth of microseconds, and capping the total at
+	 * MAX_JIFFY_OFFSET.
+	 */
+	if (tv_usec > 999999)
+		tv_usec = 999999;
+
+	if (tv_sec >= MAX_SEC_IN_JIFFIES - 1) {
+		to_jiffies = MAX_JIFFY_OFFSET;
+	} else {
+		to_jiffies = DIV_ROUND_UP(tv_usec, 1000000/HZ);
+		to_jiffies += tv_sec * (long) HZ;
+	}
+
+	if (to_jiffies <= 0) {
+		return -EINVAL;
+	}
+	lp_table[minor].timeout = to_jiffies;
+	return 0;
+}
+
+static int lp_set_timeout32(unsigned int minor, void __user *arg)
+{
+	s32 karg[2];
+
+	if (copy_from_user(karg, arg, sizeof(karg)))
+		return -EFAULT;
+
+	return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static int lp_set_timeout64(unsigned int minor, void __user *arg)
+{
+	s64 karg[2];
+
+	if (copy_from_user(karg, arg, sizeof(karg)))
+		return -EFAULT;
+
+	return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static long lp_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	unsigned int minor;
+	int ret;
+
+	minor = iminor(file_inode(file));
+	mutex_lock(&lp_mutex);
+	switch (cmd) {
+	case LPSETTIMEOUT_OLD:
+		if (BITS_PER_LONG == 32) {
+			ret = lp_set_timeout32(minor, (void __user *)arg);
+			break;
+		}
+		/* fallthrough for 64-bit */
+	case LPSETTIMEOUT_NEW:
+		ret = lp_set_timeout64(minor, (void __user *)arg);
+		break;
+	default:
+		ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
+		break;
+	}
+	mutex_unlock(&lp_mutex);
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long lp_compat_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	unsigned int minor;
+	int ret;
+
+	minor = iminor(file_inode(file));
+	mutex_lock(&lp_mutex);
+	switch (cmd) {
+	case LPSETTIMEOUT_OLD:
+		if (!COMPAT_USE_64BIT_TIME) {
+			ret = lp_set_timeout32(minor, (void __user *)arg);
+			break;
+		}
+		/* fallthrough for x32 mode */
+	case LPSETTIMEOUT_NEW:
+		ret = lp_set_timeout64(minor, (void __user *)arg);
+		break;
+#ifdef LP_STATS
+	case LPGETSTATS:
+		/* FIXME: add an implementation if you set LP_STATS */
+		ret = -EINVAL;
+		break;
+#endif
+	default:
+		ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg));
+		break;
+	}
+	mutex_unlock(&lp_mutex);
+
+	return ret;
+}
+#endif
+
+static const struct file_operations lp_fops = {
+	.owner		= THIS_MODULE,
+	.write		= lp_write,
+	.unlocked_ioctl	= lp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= lp_compat_ioctl,
+#endif
+	.open		= lp_open,
+	.release	= lp_release,
+#ifdef CONFIG_PARPORT_1284
+	.read		= lp_read,
+#endif
+	.llseek		= noop_llseek,
+};
+
+/* --- support for console on the line printer ----------------- */
+
+#ifdef CONFIG_LP_CONSOLE
+
+#define CONSOLE_LP 0
+
+/* If the printer is out of paper, we can either lose the messages or
+ * stall until the printer is happy again.  Define CONSOLE_LP_STRICT
+ * non-zero to get the latter behaviour. */
+#define CONSOLE_LP_STRICT 1
+
+/* The console must be locked when we get here. */
+
+static void lp_console_write (struct console *co, const char *s,
+			      unsigned count)
+{
+	struct pardevice *dev = lp_table[CONSOLE_LP].dev;
+	struct parport *port = dev->port;
+	ssize_t written;
+
+	if (parport_claim (dev))
+		/* Nothing we can do. */
+		return;
+
+	parport_set_timeout (dev, 0);
+
+	/* Go to compatibility mode. */
+	parport_negotiate (port, IEEE1284_MODE_COMPAT);
+
+	do {
+		/* Write the data, converting LF->CRLF as we go. */
+		ssize_t canwrite = count;
+		char *lf = memchr (s, '\n', count);
+		if (lf)
+			canwrite = lf - s;
+
+		if (canwrite > 0) {
+			written = parport_write (port, s, canwrite);
+
+			if (written <= 0)
+				continue;
+
+			s += written;
+			count -= written;
+			canwrite -= written;
+		}
+
+		if (lf && canwrite <= 0) {
+			const char *crlf = "\r\n";
+			int i = 2;
+
+			/* Dodge the original '\n', and put '\r\n' instead. */
+			s++;
+			count--;
+			do {
+				written = parport_write (port, crlf, i);
+				if (written > 0)
+					i -= written, crlf += written;
+			} while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
+		}
+	} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
+
+	parport_release (dev);
+}
+
+static struct console lpcons = {
+	.name		= "lp",
+	.write		= lp_console_write,
+	.flags		= CON_PRINTBUFFER,
+};
+
+#endif /* console on line printer */
+
+/* --- initialisation code ------------------------------------- */
+
+static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
+static char *parport[LP_NO];
+static bool reset;
+
+module_param_array(parport, charp, NULL, 0);
+module_param(reset, bool, 0);
+
+#ifndef MODULE
+static int __init lp_setup (char *str)
+{
+	static int parport_ptr;
+	int x;
+
+	if (get_option(&str, &x)) {
+		if (x == 0) {
+			/* disable driver on "lp=" or "lp=0" */
+			parport_nr[0] = LP_PARPORT_OFF;
+		} else {
+			printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x);
+			return 0;
+		}
+	} else if (!strncmp(str, "parport", 7)) {
+		int n = simple_strtoul(str+7, NULL, 10);
+		if (parport_ptr < LP_NO)
+			parport_nr[parport_ptr++] = n;
+		else
+			printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+			       str);
+	} else if (!strcmp(str, "auto")) {
+		parport_nr[0] = LP_PARPORT_AUTO;
+	} else if (!strcmp(str, "none")) {
+		if (parport_ptr < LP_NO)
+			parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		else
+			printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+			       str);
+	} else if (!strcmp(str, "reset")) {
+		reset = true;
+	}
+	return 1;
+}
+#endif
+
+static int lp_register(int nr, struct parport *port)
+{
+	lp_table[nr].dev = parport_register_device(port, "lp", 
+						   lp_preempt, NULL, NULL, 0,
+						   (void *) &lp_table[nr]);
+	if (lp_table[nr].dev == NULL)
+		return 1;
+	lp_table[nr].flags |= LP_EXIST;
+
+	if (reset)
+		lp_reset(nr);
+
+	device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
+		      "lp%d", nr);
+
+	printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, 
+	       (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
+
+#ifdef CONFIG_LP_CONSOLE
+	if (!nr) {
+		if (port->modes & PARPORT_MODE_SAFEININT) {
+			register_console(&lpcons);
+			console_registered = port;
+			printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
+		} else
+			printk (KERN_ERR "lp%d: cannot run console on %s\n",
+				CONSOLE_LP, port->name);
+	}
+#endif
+
+	return 0;
+}
+
+static void lp_attach (struct parport *port)
+{
+	unsigned int i;
+
+	switch (parport_nr[0]) {
+	case LP_PARPORT_UNSPEC:
+	case LP_PARPORT_AUTO:
+		if (parport_nr[0] == LP_PARPORT_AUTO &&
+		    port->probe_info[0].class != PARPORT_CLASS_PRINTER)
+			return;
+		if (lp_count == LP_NO) {
+			printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO);
+			return;
+		}
+		if (!lp_register(lp_count, port))
+			lp_count++;
+		break;
+
+	default:
+		for (i = 0; i < LP_NO; i++) {
+			if (port->number == parport_nr[i]) {
+				if (!lp_register(i, port))
+					lp_count++;
+				break;
+			}
+		}
+		break;
+	}
+}
+
+static void lp_detach (struct parport *port)
+{
+	/* Write this some day. */
+#ifdef CONFIG_LP_CONSOLE
+	if (console_registered == port) {
+		unregister_console(&lpcons);
+		console_registered = NULL;
+	}
+#endif /* CONFIG_LP_CONSOLE */
+}
+
+static struct parport_driver lp_driver = {
+	.name = "lp",
+	.attach = lp_attach,
+	.detach = lp_detach,
+};
+
+static int __init lp_init (void)
+{
+	int i, err = 0;
+
+	if (parport_nr[0] == LP_PARPORT_OFF)
+		return 0;
+
+	for (i = 0; i < LP_NO; i++) {
+		lp_table[i].dev = NULL;
+		lp_table[i].flags = 0;
+		lp_table[i].chars = LP_INIT_CHAR;
+		lp_table[i].time = LP_INIT_TIME;
+		lp_table[i].wait = LP_INIT_WAIT;
+		lp_table[i].lp_buffer = NULL;
+#ifdef LP_STATS
+		lp_table[i].lastcall = 0;
+		lp_table[i].runchars = 0;
+		memset (&lp_table[i].stats, 0, sizeof (struct lp_stats));
+#endif
+		lp_table[i].last_error = 0;
+		init_waitqueue_head (&lp_table[i].waitq);
+		init_waitqueue_head (&lp_table[i].dataq);
+		mutex_init(&lp_table[i].port_mutex);
+		lp_table[i].timeout = 10 * HZ;
+	}
+
+	if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) {
+		printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR);
+		return -EIO;
+	}
+
+	lp_class = class_create(THIS_MODULE, "printer");
+	if (IS_ERR(lp_class)) {
+		err = PTR_ERR(lp_class);
+		goto out_reg;
+	}
+
+	if (parport_register_driver (&lp_driver)) {
+		printk (KERN_ERR "lp: unable to register with parport\n");
+		err = -EIO;
+		goto out_class;
+	}
+
+	if (!lp_count) {
+		printk (KERN_INFO "lp: driver loaded but no devices found\n");
+#ifndef CONFIG_PARPORT_1284
+		if (parport_nr[0] == LP_PARPORT_AUTO)
+			printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
+#endif
+	}
+
+	return 0;
+
+out_class:
+	class_destroy(lp_class);
+out_reg:
+	unregister_chrdev(LP_MAJOR, "lp");
+	return err;
+}
+
+static int __init lp_init_module (void)
+{
+	if (parport[0]) {
+		/* The user gave some parameters.  Let's see what they were.  */
+		if (!strncmp(parport[0], "auto", 4))
+			parport_nr[0] = LP_PARPORT_AUTO;
+		else {
+			int n;
+			for (n = 0; n < LP_NO && parport[n]; n++) {
+				if (!strncmp(parport[n], "none", 4))
+					parport_nr[n] = LP_PARPORT_NONE;
+				else {
+					char *ep;
+					unsigned long r = simple_strtoul(parport[n], &ep, 0);
+					if (ep != parport[n]) 
+						parport_nr[n] = r;
+					else {
+						printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]);
+						return -ENODEV;
+					}
+				}
+			}
+		}
+	}
+
+	return lp_init();
+}
+
+static void lp_cleanup_module (void)
+{
+	unsigned int offset;
+
+	parport_unregister_driver (&lp_driver);
+
+#ifdef CONFIG_LP_CONSOLE
+	unregister_console (&lpcons);
+#endif
+
+	unregister_chrdev(LP_MAJOR, "lp");
+	for (offset = 0; offset < LP_NO; offset++) {
+		if (lp_table[offset].dev == NULL)
+			continue;
+		parport_unregister_device(lp_table[offset].dev);
+		device_destroy(lp_class, MKDEV(LP_MAJOR, offset));
+	}
+	class_destroy(lp_class);
+}
+
+__setup("lp=", lp_setup);
+module_init(lp_init_module);
+module_exit(lp_cleanup_module);
+
+MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
new file mode 100644
index 0000000..8c9216a
--- /dev/null
+++ b/drivers/char/mbcs.c
@@ -0,0 +1,830 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+/*
+ *	MOATB Core Services driver.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/tiocx.h>
+#include "mbcs.h"
+
+#define MBCS_DEBUG 0
+#if MBCS_DEBUG
+#define DBG(fmt...)    printk(KERN_ALERT fmt)
+#else
+#define DBG(fmt...)
+#endif
+static DEFINE_MUTEX(mbcs_mutex);
+static int mbcs_major;
+
+static LIST_HEAD(soft_list);
+
+/*
+ * file operations
+ */
+static const struct file_operations mbcs_ops = {
+	.open = mbcs_open,
+	.llseek = mbcs_sram_llseek,
+	.read = mbcs_sram_read,
+	.write = mbcs_sram_write,
+	.mmap = mbcs_gscr_mmap,
+};
+
+struct mbcs_callback_arg {
+	int minor;
+	struct cx_dev *cx_dev;
+};
+
+static inline void mbcs_getdma_init(struct getdma *gdma)
+{
+	memset(gdma, 0, sizeof(struct getdma));
+	gdma->DoneIntEnable = 1;
+}
+
+static inline void mbcs_putdma_init(struct putdma *pdma)
+{
+	memset(pdma, 0, sizeof(struct putdma));
+	pdma->DoneIntEnable = 1;
+}
+
+static inline void mbcs_algo_init(struct algoblock *algo_soft)
+{
+	memset(algo_soft, 0, sizeof(struct algoblock));
+}
+
+static inline void mbcs_getdma_set(void *mmr,
+		       uint64_t hostAddr,
+		       uint64_t localAddr,
+		       uint64_t localRamSel,
+		       uint64_t numPkts,
+		       uint64_t amoEnable,
+		       uint64_t intrEnable,
+		       uint64_t peerIO,
+		       uint64_t amoHostDest,
+		       uint64_t amoModType, uint64_t intrHostDest,
+		       uint64_t intrVector)
+{
+	union dma_control rdma_control;
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union dma_localaddr local_addr;
+	union dma_hostaddr host_addr;
+
+	rdma_control.dma_control_reg = 0;
+	amo_dest.dma_amo_dest_reg = 0;
+	intr_dest.intr_dest_reg = 0;
+	local_addr.dma_localaddr_reg = 0;
+	host_addr.dma_hostaddr_reg = 0;
+
+	host_addr.dma_sys_addr = hostAddr;
+	MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
+
+	local_addr.dma_ram_addr = localAddr;
+	local_addr.dma_ram_sel = localRamSel;
+	MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
+
+	rdma_control.dma_op_length = numPkts;
+	rdma_control.done_amo_en = amoEnable;
+	rdma_control.done_int_en = intrEnable;
+	rdma_control.pio_mem_n = peerIO;
+	MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
+
+	amo_dest.dma_amo_sys_addr = amoHostDest;
+	amo_dest.dma_amo_mod_type = amoModType;
+	MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+	intr_dest.address = intrHostDest;
+	intr_dest.int_vector = intrVector;
+	MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
+
+}
+
+static inline void mbcs_putdma_set(void *mmr,
+		       uint64_t hostAddr,
+		       uint64_t localAddr,
+		       uint64_t localRamSel,
+		       uint64_t numPkts,
+		       uint64_t amoEnable,
+		       uint64_t intrEnable,
+		       uint64_t peerIO,
+		       uint64_t amoHostDest,
+		       uint64_t amoModType,
+		       uint64_t intrHostDest, uint64_t intrVector)
+{
+	union dma_control wdma_control;
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union dma_localaddr local_addr;
+	union dma_hostaddr host_addr;
+
+	wdma_control.dma_control_reg = 0;
+	amo_dest.dma_amo_dest_reg = 0;
+	intr_dest.intr_dest_reg = 0;
+	local_addr.dma_localaddr_reg = 0;
+	host_addr.dma_hostaddr_reg = 0;
+
+	host_addr.dma_sys_addr = hostAddr;
+	MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
+
+	local_addr.dma_ram_addr = localAddr;
+	local_addr.dma_ram_sel = localRamSel;
+	MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
+
+	wdma_control.dma_op_length = numPkts;
+	wdma_control.done_amo_en = amoEnable;
+	wdma_control.done_int_en = intrEnable;
+	wdma_control.pio_mem_n = peerIO;
+	MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
+
+	amo_dest.dma_amo_sys_addr = amoHostDest;
+	amo_dest.dma_amo_mod_type = amoModType;
+	MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+	intr_dest.address = intrHostDest;
+	intr_dest.int_vector = intrVector;
+	MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
+
+}
+
+static inline void mbcs_algo_set(void *mmr,
+		     uint64_t amoHostDest,
+		     uint64_t amoModType,
+		     uint64_t intrHostDest,
+		     uint64_t intrVector, uint64_t algoStepCount)
+{
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union algo_step step;
+
+	step.algo_step_reg = 0;
+	intr_dest.intr_dest_reg = 0;
+	amo_dest.dma_amo_dest_reg = 0;
+
+	amo_dest.dma_amo_sys_addr = amoHostDest;
+	amo_dest.dma_amo_mod_type = amoModType;
+	MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+	intr_dest.address = intrHostDest;
+	intr_dest.int_vector = intrVector;
+	MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
+
+	step.alg_step_cnt = algoStepCount;
+	MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
+}
+
+static inline int mbcs_getdma_start(struct mbcs_soft *soft)
+{
+	void *mmr_base;
+	struct getdma *gdma;
+	uint64_t numPkts;
+	union cm_control cm_control;
+
+	mmr_base = soft->mmr_base;
+	gdma = &soft->getdma;
+
+	/* check that host address got setup */
+	if (!gdma->hostAddr)
+		return -1;
+
+	numPkts =
+	    (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
+
+	/* program engine */
+	mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
+		   gdma->localAddr,
+		   (gdma->localAddr < MB2) ? 0 :
+		   (gdma->localAddr < MB4) ? 1 :
+		   (gdma->localAddr < MB6) ? 2 : 3,
+		   numPkts,
+		   gdma->DoneAmoEnable,
+		   gdma->DoneIntEnable,
+		   gdma->peerIO,
+		   gdma->amoHostDest,
+		   gdma->amoModType,
+		   gdma->intrHostDest, gdma->intrVector);
+
+	/* start engine */
+	cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+	cm_control.rd_dma_go = 1;
+	MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+	return 0;
+
+}
+
+static inline int mbcs_putdma_start(struct mbcs_soft *soft)
+{
+	void *mmr_base;
+	struct putdma *pdma;
+	uint64_t numPkts;
+	union cm_control cm_control;
+
+	mmr_base = soft->mmr_base;
+	pdma = &soft->putdma;
+
+	/* check that host address got setup */
+	if (!pdma->hostAddr)
+		return -1;
+
+	numPkts =
+	    (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
+
+	/* program engine */
+	mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
+		   pdma->localAddr,
+		   (pdma->localAddr < MB2) ? 0 :
+		   (pdma->localAddr < MB4) ? 1 :
+		   (pdma->localAddr < MB6) ? 2 : 3,
+		   numPkts,
+		   pdma->DoneAmoEnable,
+		   pdma->DoneIntEnable,
+		   pdma->peerIO,
+		   pdma->amoHostDest,
+		   pdma->amoModType,
+		   pdma->intrHostDest, pdma->intrVector);
+
+	/* start engine */
+	cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+	cm_control.wr_dma_go = 1;
+	MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+	return 0;
+
+}
+
+static inline int mbcs_algo_start(struct mbcs_soft *soft)
+{
+	struct algoblock *algo_soft = &soft->algo;
+	void *mmr_base = soft->mmr_base;
+	union cm_control cm_control;
+
+	if (mutex_lock_interruptible(&soft->algolock))
+		return -ERESTARTSYS;
+
+	atomic_set(&soft->algo_done, 0);
+
+	mbcs_algo_set(mmr_base,
+		 algo_soft->amoHostDest,
+		 algo_soft->amoModType,
+		 algo_soft->intrHostDest,
+		 algo_soft->intrVector, algo_soft->algoStepCount);
+
+	/* start algorithm */
+	cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+	cm_control.alg_done_int_en = 1;
+	cm_control.alg_go = 1;
+	MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+	mutex_unlock(&soft->algolock);
+
+	return 0;
+}
+
+static inline ssize_t
+do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
+		      size_t len, loff_t * off)
+{
+	int rv = 0;
+
+	if (mutex_lock_interruptible(&soft->dmawritelock))
+		return -ERESTARTSYS;
+
+	atomic_set(&soft->dmawrite_done, 0);
+
+	soft->putdma.hostAddr = hostAddr;
+	soft->putdma.localAddr = *off;
+	soft->putdma.bytes = len;
+
+	if (mbcs_putdma_start(soft) < 0) {
+		DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
+					"mbcs_putdma_start failed\n");
+		rv = -EAGAIN;
+		goto dmawrite_exit;
+	}
+
+	if (wait_event_interruptible(soft->dmawrite_queue,
+					atomic_read(&soft->dmawrite_done))) {
+		rv = -ERESTARTSYS;
+		goto dmawrite_exit;
+	}
+
+	rv = len;
+	*off += len;
+
+dmawrite_exit:
+	mutex_unlock(&soft->dmawritelock);
+
+	return rv;
+}
+
+static inline ssize_t
+do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
+		     size_t len, loff_t * off)
+{
+	int rv = 0;
+
+	if (mutex_lock_interruptible(&soft->dmareadlock))
+		return -ERESTARTSYS;
+
+	atomic_set(&soft->dmawrite_done, 0);
+
+	soft->getdma.hostAddr = hostAddr;
+	soft->getdma.localAddr = *off;
+	soft->getdma.bytes = len;
+
+	if (mbcs_getdma_start(soft) < 0) {
+		DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
+		rv = -EAGAIN;
+		goto dmaread_exit;
+	}
+
+	if (wait_event_interruptible(soft->dmaread_queue,
+					atomic_read(&soft->dmaread_done))) {
+		rv = -ERESTARTSYS;
+		goto dmaread_exit;
+	}
+
+	rv = len;
+	*off += len;
+
+dmaread_exit:
+	mutex_unlock(&soft->dmareadlock);
+
+	return rv;
+}
+
+static int mbcs_open(struct inode *ip, struct file *fp)
+{
+	struct mbcs_soft *soft;
+	int minor;
+
+	mutex_lock(&mbcs_mutex);
+	minor = iminor(ip);
+
+	/* Nothing protects access to this list... */
+	list_for_each_entry(soft, &soft_list, list) {
+		if (soft->nasid == minor) {
+			fp->private_data = soft->cxdev;
+			mutex_unlock(&mbcs_mutex);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&mbcs_mutex);
+	return -ENODEV;
+}
+
+static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
+{
+	struct cx_dev *cx_dev = fp->private_data;
+	struct mbcs_soft *soft = cx_dev->soft;
+	uint64_t hostAddr;
+	int rv = 0;
+
+	hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
+	if (hostAddr == 0)
+		return -ENOMEM;
+
+	rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
+	if (rv < 0)
+		goto exit;
+
+	if (copy_to_user(buf, (void *)hostAddr, len))
+		rv = -EFAULT;
+
+      exit:
+	free_pages(hostAddr, get_order(len));
+
+	return rv;
+}
+
+static ssize_t
+mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
+{
+	struct cx_dev *cx_dev = fp->private_data;
+	struct mbcs_soft *soft = cx_dev->soft;
+	uint64_t hostAddr;
+	int rv = 0;
+
+	hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
+	if (hostAddr == 0)
+		return -ENOMEM;
+
+	if (copy_from_user((void *)hostAddr, buf, len)) {
+		rv = -EFAULT;
+		goto exit;
+	}
+
+	rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
+
+      exit:
+	free_pages(hostAddr, get_order(len));
+
+	return rv;
+}
+
+static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
+{
+	return generic_file_llseek_size(filp, off, whence, MAX_LFS_FILESIZE,
+					MBCS_SRAM_SIZE);
+}
+
+static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
+{
+	uint64_t mmr_base;
+
+	mmr_base = (uint64_t) (soft->mmr_base + offset);
+
+	return mmr_base;
+}
+
+static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
+{
+	soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
+}
+
+static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
+{
+	soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
+}
+
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+	struct cx_dev *cx_dev = fp->private_data;
+	struct mbcs_soft *soft = cx_dev->soft;
+
+	if (vma->vm_pgoff != 0)
+		return -EINVAL;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	/* Remap-pfn-range will mark the range VM_IO */
+	if (remap_pfn_range(vma,
+			    vma->vm_start,
+			    __pa(soft->gscr_addr) >> PAGE_SHIFT,
+			    PAGE_SIZE,
+			    vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
+
+/**
+ * mbcs_completion_intr_handler - Primary completion handler.
+ * @irq: irq
+ * @arg: soft struct for device
+ *
+ */
+static irqreturn_t
+mbcs_completion_intr_handler(int irq, void *arg)
+{
+	struct mbcs_soft *soft = (struct mbcs_soft *)arg;
+	void *mmr_base;
+	union cm_status cm_status;
+	union cm_control cm_control;
+
+	mmr_base = soft->mmr_base;
+	cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
+
+	if (cm_status.rd_dma_done) {
+		/* stop dma-read engine, clear status */
+		cm_control.cm_control_reg =
+		    MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+		cm_control.rd_dma_clr = 1;
+		MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+			     cm_control.cm_control_reg);
+		atomic_set(&soft->dmaread_done, 1);
+		wake_up(&soft->dmaread_queue);
+	}
+	if (cm_status.wr_dma_done) {
+		/* stop dma-write engine, clear status */
+		cm_control.cm_control_reg =
+		    MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+		cm_control.wr_dma_clr = 1;
+		MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+			     cm_control.cm_control_reg);
+		atomic_set(&soft->dmawrite_done, 1);
+		wake_up(&soft->dmawrite_queue);
+	}
+	if (cm_status.alg_done) {
+		/* clear status */
+		cm_control.cm_control_reg =
+		    MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+		cm_control.alg_done_clr = 1;
+		MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+			     cm_control.cm_control_reg);
+		atomic_set(&soft->algo_done, 1);
+		wake_up(&soft->algo_queue);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * mbcs_intr_alloc - Allocate interrupts.
+ * @dev: device pointer
+ *
+ */
+static int mbcs_intr_alloc(struct cx_dev *dev)
+{
+	struct sn_irq_info *sn_irq;
+	struct mbcs_soft *soft;
+	struct getdma *getdma;
+	struct putdma *putdma;
+	struct algoblock *algo;
+
+	soft = dev->soft;
+	getdma = &soft->getdma;
+	putdma = &soft->putdma;
+	algo = &soft->algo;
+
+	soft->get_sn_irq = NULL;
+	soft->put_sn_irq = NULL;
+	soft->algo_sn_irq = NULL;
+
+	sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+	if (sn_irq == NULL)
+		return -EAGAIN;
+	soft->get_sn_irq = sn_irq;
+	getdma->intrHostDest = sn_irq->irq_xtalkaddr;
+	getdma->intrVector = sn_irq->irq_irq;
+	if (request_irq(sn_irq->irq_irq,
+			(void *)mbcs_completion_intr_handler, IRQF_SHARED,
+			"MBCS get intr", (void *)soft)) {
+		tiocx_irq_free(soft->get_sn_irq);
+		return -EAGAIN;
+	}
+
+	sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+	if (sn_irq == NULL) {
+		free_irq(soft->get_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->get_sn_irq);
+		return -EAGAIN;
+	}
+	soft->put_sn_irq = sn_irq;
+	putdma->intrHostDest = sn_irq->irq_xtalkaddr;
+	putdma->intrVector = sn_irq->irq_irq;
+	if (request_irq(sn_irq->irq_irq,
+			(void *)mbcs_completion_intr_handler, IRQF_SHARED,
+			"MBCS put intr", (void *)soft)) {
+		tiocx_irq_free(soft->put_sn_irq);
+		free_irq(soft->get_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->get_sn_irq);
+		return -EAGAIN;
+	}
+
+	sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+	if (sn_irq == NULL) {
+		free_irq(soft->put_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->put_sn_irq);
+		free_irq(soft->get_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->get_sn_irq);
+		return -EAGAIN;
+	}
+	soft->algo_sn_irq = sn_irq;
+	algo->intrHostDest = sn_irq->irq_xtalkaddr;
+	algo->intrVector = sn_irq->irq_irq;
+	if (request_irq(sn_irq->irq_irq,
+			(void *)mbcs_completion_intr_handler, IRQF_SHARED,
+			"MBCS algo intr", (void *)soft)) {
+		tiocx_irq_free(soft->algo_sn_irq);
+		free_irq(soft->put_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->put_sn_irq);
+		free_irq(soft->get_sn_irq->irq_irq, soft);
+		tiocx_irq_free(soft->get_sn_irq);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/**
+ * mbcs_intr_dealloc - Remove interrupts.
+ * @dev: device pointer
+ *
+ */
+static void mbcs_intr_dealloc(struct cx_dev *dev)
+{
+	struct mbcs_soft *soft;
+
+	soft = dev->soft;
+
+	free_irq(soft->get_sn_irq->irq_irq, soft);
+	tiocx_irq_free(soft->get_sn_irq);
+	free_irq(soft->put_sn_irq->irq_irq, soft);
+	tiocx_irq_free(soft->put_sn_irq);
+	free_irq(soft->algo_sn_irq->irq_irq, soft);
+	tiocx_irq_free(soft->algo_sn_irq);
+}
+
+static inline int mbcs_hw_init(struct mbcs_soft *soft)
+{
+	void *mmr_base = soft->mmr_base;
+	union cm_control cm_control;
+	union cm_req_timeout cm_req_timeout;
+	uint64_t err_stat;
+
+	cm_req_timeout.cm_req_timeout_reg =
+	    MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
+
+	cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
+	MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
+		     cm_req_timeout.cm_req_timeout_reg);
+
+	mbcs_gscr_pioaddr_set(soft);
+	mbcs_debug_pioaddr_set(soft);
+
+	/* clear errors */
+	err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
+	MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
+	MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
+
+	/* enable interrupts */
+	/* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
+	MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
+
+	/* arm status regs and clear engines */
+	cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+	cm_control.rearm_stat_regs = 1;
+	cm_control.alg_clr = 1;
+	cm_control.wr_dma_clr = 1;
+	cm_control.rd_dma_clr = 1;
+
+	MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+	return 0;
+}
+
+static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct cx_dev *cx_dev = to_cx_dev(dev);
+	struct mbcs_soft *soft = cx_dev->soft;
+	uint64_t debug0;
+
+	/*
+	 * By convention, the first debug register contains the
+	 * algorithm number and revision.
+	 */
+	debug0 = *(uint64_t *) soft->debug_addr;
+
+	return sprintf(buf, "0x%x 0x%x\n",
+		       upper_32_bits(debug0), lower_32_bits(debug0));
+}
+
+static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	int n;
+	struct cx_dev *cx_dev = to_cx_dev(dev);
+	struct mbcs_soft *soft = cx_dev->soft;
+
+	if (count <= 0)
+		return 0;
+
+	n = simple_strtoul(buf, NULL, 0);
+
+	if (n == 1) {
+		mbcs_algo_start(soft);
+		if (wait_event_interruptible(soft->algo_queue,
+					atomic_read(&soft->algo_done)))
+			return -ERESTARTSYS;
+	}
+
+	return count;
+}
+
+DEVICE_ATTR(algo, 0644, show_algo, store_algo);
+
+/**
+ * mbcs_probe - Initialize for device
+ * @dev: device pointer
+ * @device_id: id table pointer
+ *
+ */
+static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
+{
+	struct mbcs_soft *soft;
+
+	dev->soft = NULL;
+
+	soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
+	if (soft == NULL)
+		return -ENOMEM;
+
+	soft->nasid = dev->cx_id.nasid;
+	list_add(&soft->list, &soft_list);
+	soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
+	dev->soft = soft;
+	soft->cxdev = dev;
+
+	init_waitqueue_head(&soft->dmawrite_queue);
+	init_waitqueue_head(&soft->dmaread_queue);
+	init_waitqueue_head(&soft->algo_queue);
+
+	mutex_init(&soft->dmawritelock);
+	mutex_init(&soft->dmareadlock);
+	mutex_init(&soft->algolock);
+
+	mbcs_getdma_init(&soft->getdma);
+	mbcs_putdma_init(&soft->putdma);
+	mbcs_algo_init(&soft->algo);
+
+	mbcs_hw_init(soft);
+
+	/* Allocate interrupts */
+	mbcs_intr_alloc(dev);
+
+	device_create_file(&dev->dev, &dev_attr_algo);
+
+	return 0;
+}
+
+static int mbcs_remove(struct cx_dev *dev)
+{
+	if (dev->soft) {
+		mbcs_intr_dealloc(dev);
+		kfree(dev->soft);
+	}
+
+	device_remove_file(&dev->dev, &dev_attr_algo);
+
+	return 0;
+}
+
+static const struct cx_device_id mbcs_id_table[] = {
+	{
+	 .part_num = MBCS_PART_NUM,
+	 .mfg_num = MBCS_MFG_NUM,
+	 },
+	{
+	 .part_num = MBCS_PART_NUM_ALG0,
+	 .mfg_num = MBCS_MFG_NUM,
+	 },
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(cx, mbcs_id_table);
+
+static struct cx_drv mbcs_driver = {
+	.name = DEVICE_NAME,
+	.id_table = mbcs_id_table,
+	.probe = mbcs_probe,
+	.remove = mbcs_remove,
+};
+
+static void __exit mbcs_exit(void)
+{
+	unregister_chrdev(mbcs_major, DEVICE_NAME);
+	cx_driver_unregister(&mbcs_driver);
+}
+
+static int __init mbcs_init(void)
+{
+	int rv;
+
+	if (!ia64_platform_is("sn2"))
+		return -ENODEV;
+
+	// Put driver into chrdevs[].  Get major number.
+	rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
+	if (rv < 0) {
+		DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
+		return rv;
+	}
+	mbcs_major = rv;
+
+	return cx_driver_register(&mbcs_driver);
+}
+
+module_init(mbcs_init);
+module_exit(mbcs_exit);
+
+MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
+MODULE_DESCRIPTION("Driver for MOATB Core Services");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
new file mode 100644
index 0000000..1a36884
--- /dev/null
+++ b/drivers/char/mbcs.h
@@ -0,0 +1,553 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+#ifndef __MBCS_H__
+#define __MBCS_H__
+
+/*
+ * General macros
+ */
+#define MB	(1024*1024)
+#define MB2	(2*MB)
+#define MB4	(4*MB)
+#define MB6	(6*MB)
+
+/*
+ * Offsets and masks
+ */
+#define MBCS_CM_ID		0x0000	/* Identification */
+#define MBCS_CM_STATUS		0x0008	/* Status */
+#define MBCS_CM_ERROR_DETAIL1	0x0010	/* Error Detail1 */
+#define MBCS_CM_ERROR_DETAIL2	0x0018	/* Error Detail2 */
+#define MBCS_CM_CONTROL		0x0020	/* Control */
+#define MBCS_CM_REQ_TOUT	0x0028	/* Request Time-out */
+#define MBCS_CM_ERR_INT_DEST	0x0038	/* Error Interrupt Destination */
+#define MBCS_CM_TARG_FL		0x0050	/* Target Flush */
+#define MBCS_CM_ERR_STAT	0x0060	/* Error Status */
+#define MBCS_CM_CLR_ERR_STAT	0x0068	/* Clear Error Status */
+#define MBCS_CM_ERR_INT_EN	0x0070	/* Error Interrupt Enable */
+#define MBCS_RD_DMA_SYS_ADDR	0x0100	/* Read DMA System Address */
+#define MBCS_RD_DMA_LOC_ADDR	0x0108	/* Read DMA Local Address */
+#define MBCS_RD_DMA_CTRL	0x0110	/* Read DMA Control */
+#define MBCS_RD_DMA_AMO_DEST	0x0118	/* Read DMA AMO Destination */
+#define MBCS_RD_DMA_INT_DEST	0x0120	/* Read DMA Interrupt Destination */
+#define MBCS_RD_DMA_AUX_STAT	0x0130	/* Read DMA Auxiliary Status */
+#define MBCS_WR_DMA_SYS_ADDR	0x0200	/* Write DMA System Address */
+#define MBCS_WR_DMA_LOC_ADDR	0x0208	/* Write DMA Local Address */
+#define MBCS_WR_DMA_CTRL	0x0210	/* Write DMA Control */
+#define MBCS_WR_DMA_AMO_DEST	0x0218	/* Write DMA AMO Destination */
+#define MBCS_WR_DMA_INT_DEST	0x0220	/* Write DMA Interrupt Destination */
+#define MBCS_WR_DMA_AUX_STAT	0x0230	/* Write DMA Auxiliary Status */
+#define MBCS_ALG_AMO_DEST	0x0300	/* Algorithm AMO Destination */
+#define MBCS_ALG_INT_DEST	0x0308	/* Algorithm Interrupt Destination */
+#define MBCS_ALG_OFFSETS	0x0310
+#define MBCS_ALG_STEP		0x0318	/* Algorithm Step */
+
+#define MBCS_GSCR_START		0x0000000
+#define MBCS_DEBUG_START	0x0100000
+#define MBCS_RAM0_START		0x0200000
+#define MBCS_RAM1_START		0x0400000
+#define MBCS_RAM2_START		0x0600000
+
+#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL
+//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL
+
+#define MBCS_SRAM_SIZE		(1024*1024)
+#define MBCS_CACHELINE_SIZE	128
+
+/*
+ * MMR get's and put's
+ */
+#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset))
+#define MBCS_MMR_SET(mmr_base, offset, value) {			\
+	uint64_t *mbcs_mmr_set_u64p, readback;				\
+	mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset);	\
+	*mbcs_mmr_set_u64p = value;					\
+	readback = *mbcs_mmr_set_u64p; \
+}
+#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset)
+#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0)
+
+/*
+ * MBCS mmr structures
+ */
+union cm_id {
+	uint64_t cm_id_reg;
+	struct {
+		uint64_t always_one:1,	// 0
+		 mfg_id:11,	// 11:1
+		 part_num:16,	// 27:12
+		 bitstream_rev:8,	// 35:28
+		:28;		// 63:36
+	};
+};
+
+union cm_status {
+	uint64_t cm_status_reg;
+	struct {
+		uint64_t pending_reads:8,	// 7:0
+		 pending_writes:8,	// 15:8
+		 ice_rsp_credits:8,	// 23:16
+		 ice_req_credits:8,	// 31:24
+		 cm_req_credits:8,	// 39:32
+		:1,		// 40
+		 rd_dma_in_progress:1,	// 41
+		 rd_dma_done:1,	// 42
+		:1,		// 43
+		 wr_dma_in_progress:1,	// 44
+		 wr_dma_done:1,	// 45
+		 alg_waiting:1,	// 46
+		 alg_pipe_running:1,	// 47
+		 alg_done:1,	// 48
+		:3,		// 51:49
+		 pending_int_reqs:8,	// 59:52
+		:3,		// 62:60
+		 alg_half_speed_sel:1;	// 63
+	};
+};
+
+union cm_error_detail1 {
+	uint64_t cm_error_detail1_reg;
+	struct {
+		uint64_t packet_type:4,	// 3:0
+		 source_id:2,	// 5:4
+		 data_size:2,	// 7:6
+		 tnum:8,	// 15:8
+		 byte_enable:8,	// 23:16
+		 gfx_cred:8,	// 31:24
+		 read_type:2,	// 33:32
+		 pio_or_memory:1,	// 34
+		 head_cw_error:1,	// 35
+		:12,		// 47:36
+		 head_error_bit:1,	// 48
+		 data_error_bit:1,	// 49
+		:13,		// 62:50
+		 valid:1;	// 63
+	};
+};
+
+union cm_error_detail2 {
+	uint64_t cm_error_detail2_reg;
+	struct {
+		uint64_t address:56,	// 55:0
+		:8;		// 63:56
+	};
+};
+
+union cm_control {
+	uint64_t cm_control_reg;
+	struct {
+		uint64_t cm_id:2,	// 1:0
+		:2,		// 3:2
+		 max_trans:5,	// 8:4
+		:3,		// 11:9
+		 address_mode:1,	// 12
+		:7,		// 19:13
+		 credit_limit:8,	// 27:20
+		:5,		// 32:28
+		 rearm_stat_regs:1,	// 33
+		 prescalar_byp:1,	// 34
+		 force_gap_war:1,	// 35
+		 rd_dma_go:1,	// 36
+		 wr_dma_go:1,	// 37
+		 alg_go:1,	// 38
+		 rd_dma_clr:1,	// 39
+		 wr_dma_clr:1,	// 40
+		 alg_clr:1,	// 41
+		:2,		// 43:42
+		 alg_wait_step:1,	// 44
+		 alg_done_amo_en:1,	// 45
+		 alg_done_int_en:1,	// 46
+		:1,		// 47
+		 alg_sram0_locked:1,	// 48
+		 alg_sram1_locked:1,	// 49
+		 alg_sram2_locked:1,	// 50
+		 alg_done_clr:1,	// 51
+		:12;		// 63:52
+	};
+};
+
+union cm_req_timeout {
+	uint64_t cm_req_timeout_reg;
+	struct {
+		uint64_t time_out:24,	// 23:0
+		:40;		// 63:24
+	};
+};
+
+union intr_dest {
+	uint64_t intr_dest_reg;
+	struct {
+		uint64_t address:56,	// 55:0
+		 int_vector:8;	// 63:56
+	};
+};
+
+union cm_error_status {
+	uint64_t cm_error_status_reg;
+	struct {
+		uint64_t ecc_sbe:1,	// 0
+		 ecc_mbe:1,	// 1
+		 unsupported_req:1,	// 2
+		 unexpected_rsp:1,	// 3
+		 bad_length:1,	// 4
+		 bad_datavalid:1,	// 5
+		 buffer_overflow:1,	// 6
+		 request_timeout:1,	// 7
+		:8,		// 15:8
+		 head_inv_data_size:1,	// 16
+		 rsp_pactype_inv:1,	// 17
+		 head_sb_err:1,	// 18
+		 missing_head:1,	// 19
+		 head_inv_rd_type:1,	// 20
+		 head_cmd_err_bit:1,	// 21
+		 req_addr_align_inv:1,	// 22
+		 pio_req_addr_inv:1,	// 23
+		 req_range_dsize_inv:1,	// 24
+		 early_term:1,	// 25
+		 early_tail:1,	// 26
+		 missing_tail:1,	// 27
+		 data_flit_sb_err:1,	// 28
+		 cm2hcm_req_cred_of:1,	// 29
+		 cm2hcm_rsp_cred_of:1,	// 30
+		 rx_bad_didn:1,	// 31
+		 rd_dma_err_rsp:1,	// 32
+		 rd_dma_tnum_tout:1,	// 33
+		 rd_dma_multi_tnum_tou:1,	// 34
+		 wr_dma_err_rsp:1,	// 35
+		 wr_dma_tnum_tout:1,	// 36
+		 wr_dma_multi_tnum_tou:1,	// 37
+		 alg_data_overflow:1,	// 38
+		 alg_data_underflow:1,	// 39
+		 ram0_access_conflict:1,	// 40
+		 ram1_access_conflict:1,	// 41
+		 ram2_access_conflict:1,	// 42
+		 ram0_perr:1,	// 43
+		 ram1_perr:1,	// 44
+		 ram2_perr:1,	// 45
+		 int_gen_rsp_err:1,	// 46
+		 int_gen_tnum_tout:1,	// 47
+		 rd_dma_prog_err:1,	// 48
+		 wr_dma_prog_err:1,	// 49
+		:14;		// 63:50
+	};
+};
+
+union cm_clr_error_status {
+	uint64_t cm_clr_error_status_reg;
+	struct {
+		uint64_t clr_ecc_sbe:1,	// 0
+		 clr_ecc_mbe:1,	// 1
+		 clr_unsupported_req:1,	// 2
+		 clr_unexpected_rsp:1,	// 3
+		 clr_bad_length:1,	// 4
+		 clr_bad_datavalid:1,	// 5
+		 clr_buffer_overflow:1,	// 6
+		 clr_request_timeout:1,	// 7
+		:8,		// 15:8
+		 clr_head_inv_data_siz:1,	// 16
+		 clr_rsp_pactype_inv:1,	// 17
+		 clr_head_sb_err:1,	// 18
+		 clr_missing_head:1,	// 19
+		 clr_head_inv_rd_type:1,	// 20
+		 clr_head_cmd_err_bit:1,	// 21
+		 clr_req_addr_align_in:1,	// 22
+		 clr_pio_req_addr_inv:1,	// 23
+		 clr_req_range_dsize_i:1,	// 24
+		 clr_early_term:1,	// 25
+		 clr_early_tail:1,	// 26
+		 clr_missing_tail:1,	// 27
+		 clr_data_flit_sb_err:1,	// 28
+		 clr_cm2hcm_req_cred_o:1,	// 29
+		 clr_cm2hcm_rsp_cred_o:1,	// 30
+		 clr_rx_bad_didn:1,	// 31
+		 clr_rd_dma_err_rsp:1,	// 32
+		 clr_rd_dma_tnum_tout:1,	// 33
+		 clr_rd_dma_multi_tnum:1,	// 34
+		 clr_wr_dma_err_rsp:1,	// 35
+		 clr_wr_dma_tnum_tout:1,	// 36
+		 clr_wr_dma_multi_tnum:1,	// 37
+		 clr_alg_data_overflow:1,	// 38
+		 clr_alg_data_underflo:1,	// 39
+		 clr_ram0_access_confl:1,	// 40
+		 clr_ram1_access_confl:1,	// 41
+		 clr_ram2_access_confl:1,	// 42
+		 clr_ram0_perr:1,	// 43
+		 clr_ram1_perr:1,	// 44
+		 clr_ram2_perr:1,	// 45
+		 clr_int_gen_rsp_err:1,	// 46
+		 clr_int_gen_tnum_tout:1,	// 47
+		 clr_rd_dma_prog_err:1,	// 48
+		 clr_wr_dma_prog_err:1,	// 49
+		:14;		// 63:50
+	};
+};
+
+union cm_error_intr_enable {
+	uint64_t cm_error_intr_enable_reg;
+	struct {
+		uint64_t int_en_ecc_sbe:1,	// 0
+		 int_en_ecc_mbe:1,	// 1
+		 int_en_unsupported_re:1,	// 2
+		 int_en_unexpected_rsp:1,	// 3
+		 int_en_bad_length:1,	// 4
+		 int_en_bad_datavalid:1,	// 5
+		 int_en_buffer_overflo:1,	// 6
+		 int_en_request_timeou:1,	// 7
+		:8,		// 15:8
+		 int_en_head_inv_data_:1,	// 16
+		 int_en_rsp_pactype_in:1,	// 17
+		 int_en_head_sb_err:1,	// 18
+		 int_en_missing_head:1,	// 19
+		 int_en_head_inv_rd_ty:1,	// 20
+		 int_en_head_cmd_err_b:1,	// 21
+		 int_en_req_addr_align:1,	// 22
+		 int_en_pio_req_addr_i:1,	// 23
+		 int_en_req_range_dsiz:1,	// 24
+		 int_en_early_term:1,	// 25
+		 int_en_early_tail:1,	// 26
+		 int_en_missing_tail:1,	// 27
+		 int_en_data_flit_sb_e:1,	// 28
+		 int_en_cm2hcm_req_cre:1,	// 29
+		 int_en_cm2hcm_rsp_cre:1,	// 30
+		 int_en_rx_bad_didn:1,	// 31
+		 int_en_rd_dma_err_rsp:1,	// 32
+		 int_en_rd_dma_tnum_to:1,	// 33
+		 int_en_rd_dma_multi_t:1,	// 34
+		 int_en_wr_dma_err_rsp:1,	// 35
+		 int_en_wr_dma_tnum_to:1,	// 36
+		 int_en_wr_dma_multi_t:1,	// 37
+		 int_en_alg_data_overf:1,	// 38
+		 int_en_alg_data_under:1,	// 39
+		 int_en_ram0_access_co:1,	// 40
+		 int_en_ram1_access_co:1,	// 41
+		 int_en_ram2_access_co:1,	// 42
+		 int_en_ram0_perr:1,	// 43
+		 int_en_ram1_perr:1,	// 44
+		 int_en_ram2_perr:1,	// 45
+		 int_en_int_gen_rsp_er:1,	// 46
+		 int_en_int_gen_tnum_t:1,	// 47
+		 int_en_rd_dma_prog_er:1,	// 48
+		 int_en_wr_dma_prog_er:1,	// 49
+		:14;		// 63:50
+	};
+};
+
+struct cm_mmr {
+	union cm_id id;
+	union cm_status status;
+	union cm_error_detail1 err_detail1;
+	union cm_error_detail2 err_detail2;
+	union cm_control control;
+	union cm_req_timeout req_timeout;
+	uint64_t reserved1[1];
+	union intr_dest int_dest;
+	uint64_t reserved2[2];
+	uint64_t targ_flush;
+	uint64_t reserved3[1];
+	union cm_error_status err_status;
+	union cm_clr_error_status clr_err_status;
+	union cm_error_intr_enable int_enable;
+};
+
+union dma_hostaddr {
+	uint64_t dma_hostaddr_reg;
+	struct {
+		uint64_t dma_sys_addr:56,	// 55:0
+		:8;		// 63:56
+	};
+};
+
+union dma_localaddr {
+	uint64_t dma_localaddr_reg;
+	struct {
+		uint64_t dma_ram_addr:21,	// 20:0
+		 dma_ram_sel:2,	// 22:21
+		:41;		// 63:23
+	};
+};
+
+union dma_control {
+	uint64_t dma_control_reg;
+	struct {
+		uint64_t dma_op_length:16,	// 15:0
+		:18,		// 33:16
+		 done_amo_en:1,	// 34
+		 done_int_en:1,	// 35
+		:1,		// 36
+		 pio_mem_n:1,	// 37
+		:26;		// 63:38
+	};
+};
+
+union dma_amo_dest {
+	uint64_t dma_amo_dest_reg;
+	struct {
+		uint64_t dma_amo_sys_addr:56,	// 55:0
+		 dma_amo_mod_type:3,	// 58:56
+		:5;		// 63:59
+	};
+};
+
+union rdma_aux_status {
+	uint64_t rdma_aux_status_reg;
+	struct {
+		uint64_t op_num_pacs_left:17,	// 16:0
+		:5,		// 21:17
+		 lrsp_buff_empty:1,	// 22
+		:17,		// 39:23
+		 pending_reqs_left:6,	// 45:40
+		:18;		// 63:46
+	};
+};
+
+struct rdma_mmr {
+	union dma_hostaddr host_addr;
+	union dma_localaddr local_addr;
+	union dma_control control;
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union rdma_aux_status aux_status;
+};
+
+union wdma_aux_status {
+	uint64_t wdma_aux_status_reg;
+	struct {
+		uint64_t op_num_pacs_left:17,	// 16:0
+		:4,		// 20:17
+		 lreq_buff_empty:1,	// 21
+		:18,		// 39:22
+		 pending_reqs_left:6,	// 45:40
+		:18;		// 63:46
+	};
+};
+
+struct wdma_mmr {
+	union dma_hostaddr host_addr;
+	union dma_localaddr local_addr;
+	union dma_control control;
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union wdma_aux_status aux_status;
+};
+
+union algo_step {
+	uint64_t algo_step_reg;
+	struct {
+		uint64_t alg_step_cnt:16,	// 15:0
+		:48;		// 63:16
+	};
+};
+
+struct algo_mmr {
+	union dma_amo_dest amo_dest;
+	union intr_dest intr_dest;
+	union {
+		uint64_t algo_offset_reg;
+		struct {
+			uint64_t sram0_offset:7,	// 6:0
+			reserved0:1,	// 7
+			sram1_offset:7,	// 14:8
+			reserved1:1,	// 15
+			sram2_offset:7,	// 22:16
+			reserved2:14;	// 63:23
+		};
+	} sram_offset;
+	union algo_step step;
+};
+
+struct mbcs_mmr {
+	struct cm_mmr cm;
+	uint64_t reserved1[17];
+	struct rdma_mmr rdDma;
+	uint64_t reserved2[25];
+	struct wdma_mmr wrDma;
+	uint64_t reserved3[25];
+	struct algo_mmr algo;
+	uint64_t reserved4[156];
+};
+
+/*
+ * defines
+ */
+#define DEVICE_NAME "mbcs"
+#define MBCS_PART_NUM 0xfff0
+#define MBCS_PART_NUM_ALG0 0xf001
+#define MBCS_MFG_NUM  0x1
+
+struct algoblock {
+	uint64_t amoHostDest;
+	uint64_t amoModType;
+	uint64_t intrHostDest;
+	uint64_t intrVector;
+	uint64_t algoStepCount;
+};
+
+struct getdma {
+	uint64_t hostAddr;
+	uint64_t localAddr;
+	uint64_t bytes;
+	uint64_t DoneAmoEnable;
+	uint64_t DoneIntEnable;
+	uint64_t peerIO;
+	uint64_t amoHostDest;
+	uint64_t amoModType;
+	uint64_t intrHostDest;
+	uint64_t intrVector;
+};
+
+struct putdma {
+	uint64_t hostAddr;
+	uint64_t localAddr;
+	uint64_t bytes;
+	uint64_t DoneAmoEnable;
+	uint64_t DoneIntEnable;
+	uint64_t peerIO;
+	uint64_t amoHostDest;
+	uint64_t amoModType;
+	uint64_t intrHostDest;
+	uint64_t intrVector;
+};
+
+struct mbcs_soft {
+	struct list_head list;
+	struct cx_dev *cxdev;
+	int major;
+	int nasid;
+	void *mmr_base;
+	wait_queue_head_t dmawrite_queue;
+	wait_queue_head_t dmaread_queue;
+	wait_queue_head_t algo_queue;
+	struct sn_irq_info *get_sn_irq;
+	struct sn_irq_info *put_sn_irq;
+	struct sn_irq_info *algo_sn_irq;
+	struct getdma getdma;
+	struct putdma putdma;
+	struct algoblock algo;
+	uint64_t gscr_addr;	// pio addr
+	uint64_t ram0_addr;	// pio addr
+	uint64_t ram1_addr;	// pio addr
+	uint64_t ram2_addr;	// pio addr
+	uint64_t debug_addr;	// pio addr
+	atomic_t dmawrite_done;
+	atomic_t dmaread_done;
+	atomic_t algo_done;
+	struct mutex dmawritelock;
+	struct mutex dmareadlock;
+	struct mutex algolock;
+};
+
+static int mbcs_open(struct inode *ip, struct file *fp);
+static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
+			      loff_t * off);
+static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
+			       loff_t * off);
+static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
+
+#endif				// __MBCS_H__
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
new file mode 100644
index 0000000..7b4e4de
--- /dev/null
+++ b/drivers/char/mem.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/drivers/char/mem.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Added devfs support.
+ *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/backing-dev.h>
+#include <linux/shmem_fs.h>
+#include <linux/splice.h>
+#include <linux/pfn.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+#define DEVPORT_MINOR	4
+
+static inline unsigned long size_inside_page(unsigned long start,
+					     unsigned long size)
+{
+	unsigned long sz;
+
+	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+	return min(sz, size);
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+	return addr + count <= __pa(high_memory);
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+	return 1;
+}
+#endif
+
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+	return devmem_is_allowed(pfn);
+}
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+	u64 from = ((u64)pfn) << PAGE_SHIFT;
+	u64 to = from + size;
+	u64 cursor = from;
+
+	while (cursor < to) {
+		if (!devmem_is_allowed(pfn))
+			return 0;
+		cursor += PAGE_SIZE;
+		pfn++;
+	}
+	return 1;
+}
+#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+	return 1;
+}
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+	return 1;
+}
+#endif
+
+#ifndef unxlate_dev_mem_ptr
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+{
+}
+#endif
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	phys_addr_t p = *ppos;
+	ssize_t read, sz;
+	void *ptr;
+	char *bounce;
+	int err;
+
+	if (p != *ppos)
+		return 0;
+
+	if (!valid_phys_addr_range(p, count))
+		return -EFAULT;
+	read = 0;
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+	/* we don't have page 0 mapped on sparc and m68k.. */
+	if (p < PAGE_SIZE) {
+		sz = size_inside_page(p, count);
+		if (sz > 0) {
+			if (clear_user(buf, sz))
+				return -EFAULT;
+			buf += sz;
+			p += sz;
+			count -= sz;
+			read += sz;
+		}
+	}
+#endif
+
+	bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bounce)
+		return -ENOMEM;
+
+	while (count > 0) {
+		unsigned long remaining;
+		int allowed, probe;
+
+		sz = size_inside_page(p, count);
+
+		err = -EPERM;
+		allowed = page_is_allowed(p >> PAGE_SHIFT);
+		if (!allowed)
+			goto failed;
+
+		err = -EFAULT;
+		if (allowed == 2) {
+			/* Show zeros for restricted memory. */
+			remaining = clear_user(buf, sz);
+		} else {
+			/*
+			 * On ia64 if a page has been mapped somewhere as
+			 * uncached, then it must also be accessed uncached
+			 * by the kernel or data corruption may occur.
+			 */
+			ptr = xlate_dev_mem_ptr(p);
+			if (!ptr)
+				goto failed;
+
+			probe = probe_kernel_read(bounce, ptr, sz);
+			unxlate_dev_mem_ptr(p, ptr);
+			if (probe)
+				goto failed;
+
+			remaining = copy_to_user(buf, bounce, sz);
+		}
+
+		if (remaining)
+			goto failed;
+
+		buf += sz;
+		p += sz;
+		count -= sz;
+		read += sz;
+	}
+	kfree(bounce);
+
+	*ppos += read;
+	return read;
+
+failed:
+	kfree(bounce);
+	return err;
+}
+
+static ssize_t write_mem(struct file *file, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	phys_addr_t p = *ppos;
+	ssize_t written, sz;
+	unsigned long copied;
+	void *ptr;
+
+	if (p != *ppos)
+		return -EFBIG;
+
+	if (!valid_phys_addr_range(p, count))
+		return -EFAULT;
+
+	written = 0;
+
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+	/* we don't have page 0 mapped on sparc and m68k.. */
+	if (p < PAGE_SIZE) {
+		sz = size_inside_page(p, count);
+		/* Hmm. Do something? */
+		buf += sz;
+		p += sz;
+		count -= sz;
+		written += sz;
+	}
+#endif
+
+	while (count > 0) {
+		int allowed;
+
+		sz = size_inside_page(p, count);
+
+		allowed = page_is_allowed(p >> PAGE_SHIFT);
+		if (!allowed)
+			return -EPERM;
+
+		/* Skip actual writing when a page is marked as restricted. */
+		if (allowed == 1) {
+			/*
+			 * On ia64 if a page has been mapped somewhere as
+			 * uncached, then it must also be accessed uncached
+			 * by the kernel or data corruption may occur.
+			 */
+			ptr = xlate_dev_mem_ptr(p);
+			if (!ptr) {
+				if (written)
+					break;
+				return -EFAULT;
+			}
+
+			copied = copy_from_user(ptr, buf, sz);
+			unxlate_dev_mem_ptr(p, ptr);
+			if (copied) {
+				written += sz - copied;
+				if (written)
+					break;
+				return -EFAULT;
+			}
+		}
+
+		buf += sz;
+		p += sz;
+		count -= sz;
+		written += sz;
+	}
+
+	*ppos += written;
+	return written;
+}
+
+int __weak phys_mem_access_prot_allowed(struct file *file,
+	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
+{
+	return 1;
+}
+
+#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+#ifdef pgprot_noncached
+static int uncached_access(struct file *file, phys_addr_t addr)
+{
+#if defined(CONFIG_IA64)
+	/*
+	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
+	 * attribute aliases.
+	 */
+	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#elif defined(CONFIG_MIPS)
+	{
+		extern int __uncached_access(struct file *file,
+					     unsigned long addr);
+
+		return __uncached_access(file, addr);
+	}
+#else
+	/*
+	 * Accessing memory above the top the kernel knows about or through a
+	 * file pointer
+	 * that was marked O_DSYNC will be done non-cached.
+	 */
+	if (file->f_flags & O_DSYNC)
+		return 1;
+	return addr >= __pa(high_memory);
+#endif
+}
+#endif
+
+static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot)
+{
+#ifdef pgprot_noncached
+	phys_addr_t offset = pfn << PAGE_SHIFT;
+
+	if (uncached_access(file, offset))
+		return pgprot_noncached(vma_prot);
+#endif
+	return vma_prot;
+}
+#endif
+
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_mem(struct file *file,
+					   unsigned long addr,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags)
+{
+	if (!valid_mmap_phys_addr_range(pgoff, len))
+		return (unsigned long) -EINVAL;
+	return pgoff << PAGE_SHIFT;
+}
+
+/* permit direct mmap, for read, write or exec */
+static unsigned memory_mmap_capabilities(struct file *file)
+{
+	return NOMMU_MAP_DIRECT |
+		NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
+}
+
+static unsigned zero_mmap_capabilities(struct file *file)
+{
+	return NOMMU_MAP_COPY;
+}
+
+/* can't do an in-place private mapping if there's no MMU */
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & VM_MAYSHARE;
+}
+#else
+
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+	return 1;
+}
+#endif
+
+static const struct vm_operations_struct mmap_mem_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+	.access = generic_access_phys
+#endif
+};
+
+static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+{
+	size_t size = vma->vm_end - vma->vm_start;
+	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+	/* Does it even fit in phys_addr_t? */
+	if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+		return -EINVAL;
+
+	/* It's illegal to wrap around the end of the physical address space. */
+	if (offset + (phys_addr_t)size - 1 < offset)
+		return -EINVAL;
+
+	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+		return -EINVAL;
+
+	if (!private_mapping_ok(vma))
+		return -ENOSYS;
+
+	if (!range_is_allowed(vma->vm_pgoff, size))
+		return -EPERM;
+
+	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
+						&vma->vm_page_prot))
+		return -EINVAL;
+
+	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+						 size,
+						 vma->vm_page_prot);
+
+	vma->vm_ops = &mmap_mem_ops;
+
+	/* Remap-pfn-range will mark the range VM_IO */
+	if (remap_pfn_range(vma,
+			    vma->vm_start,
+			    vma->vm_pgoff,
+			    size,
+			    vma->vm_page_prot)) {
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long pfn;
+
+	/* Turn a kernel-virtual address into a physical page frame */
+	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+
+	/*
+	 * RED-PEN: on some architectures there is more mapped memory than
+	 * available in mem_map which pfn_valid checks for. Perhaps should add a
+	 * new macro here.
+	 *
+	 * RED-PEN: vmalloc is not supported right now.
+	 */
+	if (!pfn_valid(pfn))
+		return -EIO;
+
+	vma->vm_pgoff = pfn;
+	return mmap_mem(file, vma);
+}
+
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	unsigned long p = *ppos;
+	ssize_t low_count, read, sz;
+	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+	int err = 0;
+
+	read = 0;
+	if (p < (unsigned long) high_memory) {
+		low_count = count;
+		if (count > (unsigned long)high_memory - p)
+			low_count = (unsigned long)high_memory - p;
+
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+		/* we don't have page 0 mapped on sparc and m68k.. */
+		if (p < PAGE_SIZE && low_count > 0) {
+			sz = size_inside_page(p, low_count);
+			if (clear_user(buf, sz))
+				return -EFAULT;
+			buf += sz;
+			p += sz;
+			read += sz;
+			low_count -= sz;
+			count -= sz;
+		}
+#endif
+		while (low_count > 0) {
+			sz = size_inside_page(p, low_count);
+
+			/*
+			 * On ia64 if a page has been mapped somewhere as
+			 * uncached, then it must also be accessed uncached
+			 * by the kernel or data corruption may occur
+			 */
+			kbuf = xlate_dev_kmem_ptr((void *)p);
+			if (!virt_addr_valid(kbuf))
+				return -ENXIO;
+
+			if (copy_to_user(buf, kbuf, sz))
+				return -EFAULT;
+			buf += sz;
+			p += sz;
+			read += sz;
+			low_count -= sz;
+			count -= sz;
+		}
+	}
+
+	if (count > 0) {
+		kbuf = (char *)__get_free_page(GFP_KERNEL);
+		if (!kbuf)
+			return -ENOMEM;
+		while (count > 0) {
+			sz = size_inside_page(p, count);
+			if (!is_vmalloc_or_module_addr((void *)p)) {
+				err = -ENXIO;
+				break;
+			}
+			sz = vread(kbuf, (char *)p, sz);
+			if (!sz)
+				break;
+			if (copy_to_user(buf, kbuf, sz)) {
+				err = -EFAULT;
+				break;
+			}
+			count -= sz;
+			buf += sz;
+			read += sz;
+			p += sz;
+		}
+		free_page((unsigned long)kbuf);
+	}
+	*ppos = p;
+	return read ? read : err;
+}
+
+
+static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	ssize_t written, sz;
+	unsigned long copied;
+
+	written = 0;
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+	/* we don't have page 0 mapped on sparc and m68k.. */
+	if (p < PAGE_SIZE) {
+		sz = size_inside_page(p, count);
+		/* Hmm. Do something? */
+		buf += sz;
+		p += sz;
+		count -= sz;
+		written += sz;
+	}
+#endif
+
+	while (count > 0) {
+		void *ptr;
+
+		sz = size_inside_page(p, count);
+
+		/*
+		 * On ia64 if a page has been mapped somewhere as uncached, then
+		 * it must also be accessed uncached by the kernel or data
+		 * corruption may occur.
+		 */
+		ptr = xlate_dev_kmem_ptr((void *)p);
+		if (!virt_addr_valid(ptr))
+			return -ENXIO;
+
+		copied = copy_from_user(ptr, buf, sz);
+		if (copied) {
+			written += sz - copied;
+			if (written)
+				break;
+			return -EFAULT;
+		}
+		buf += sz;
+		p += sz;
+		count -= sz;
+		written += sz;
+	}
+
+	*ppos += written;
+	return written;
+}
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned long p = *ppos;
+	ssize_t wrote = 0;
+	ssize_t virtr = 0;
+	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+	int err = 0;
+
+	if (p < (unsigned long) high_memory) {
+		unsigned long to_write = min_t(unsigned long, count,
+					       (unsigned long)high_memory - p);
+		wrote = do_write_kmem(p, buf, to_write, ppos);
+		if (wrote != to_write)
+			return wrote;
+		p += wrote;
+		buf += wrote;
+		count -= wrote;
+	}
+
+	if (count > 0) {
+		kbuf = (char *)__get_free_page(GFP_KERNEL);
+		if (!kbuf)
+			return wrote ? wrote : -ENOMEM;
+		while (count > 0) {
+			unsigned long sz = size_inside_page(p, count);
+			unsigned long n;
+
+			if (!is_vmalloc_or_module_addr((void *)p)) {
+				err = -ENXIO;
+				break;
+			}
+			n = copy_from_user(kbuf, buf, sz);
+			if (n) {
+				err = -EFAULT;
+				break;
+			}
+			vwrite(kbuf, (char *)p, sz);
+			count -= sz;
+			buf += sz;
+			virtr += sz;
+			p += sz;
+		}
+		free_page((unsigned long)kbuf);
+	}
+
+	*ppos = p;
+	return virtr + wrote ? : err;
+}
+
+static ssize_t read_port(struct file *file, char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	unsigned long i = *ppos;
+	char __user *tmp = buf;
+
+	if (!access_ok(VERIFY_WRITE, buf, count))
+		return -EFAULT;
+	while (count-- > 0 && i < 65536) {
+		if (__put_user(inb(i), tmp) < 0)
+			return -EFAULT;
+		i++;
+		tmp++;
+	}
+	*ppos = i;
+	return tmp-buf;
+}
+
+static ssize_t write_port(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned long i = *ppos;
+	const char __user *tmp = buf;
+
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+	while (count-- > 0 && i < 65536) {
+		char c;
+
+		if (__get_user(c, tmp)) {
+			if (tmp > buf)
+				break;
+			return -EFAULT;
+		}
+		outb(c, i);
+		i++;
+		tmp++;
+	}
+	*ppos = i;
+	return tmp-buf;
+}
+
+static ssize_t read_null(struct file *file, char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+static ssize_t write_null(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	return count;
+}
+
+static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
+{
+	return 0;
+}
+
+static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
+{
+	size_t count = iov_iter_count(from);
+	iov_iter_advance(from, count);
+	return count;
+}
+
+static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
+			struct splice_desc *sd)
+{
+	return sd->len;
+}
+
+static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
+				 loff_t *ppos, size_t len, unsigned int flags)
+{
+	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
+}
+
+static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
+{
+	size_t written = 0;
+
+	while (iov_iter_count(iter)) {
+		size_t chunk = iov_iter_count(iter), n;
+
+		if (chunk > PAGE_SIZE)
+			chunk = PAGE_SIZE;	/* Just for latency reasons */
+		n = iov_iter_zero(chunk, iter);
+		if (!n && iov_iter_count(iter))
+			return written ? written : -EFAULT;
+		written += n;
+		if (signal_pending(current))
+			return written ? written : -ERESTARTSYS;
+		cond_resched();
+	}
+	return written;
+}
+
+static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+{
+#ifndef CONFIG_MMU
+	return -ENOSYS;
+#endif
+	if (vma->vm_flags & VM_SHARED)
+		return shmem_zero_setup(vma);
+	vma_set_anonymous(vma);
+	return 0;
+}
+
+static unsigned long get_unmapped_area_zero(struct file *file,
+				unsigned long addr, unsigned long len,
+				unsigned long pgoff, unsigned long flags)
+{
+#ifdef CONFIG_MMU
+	if (flags & MAP_SHARED) {
+		/*
+		 * mmap_zero() will call shmem_zero_setup() to create a file,
+		 * so use shmem's get_unmapped_area in case it can be huge;
+		 * and pass NULL for file as in mmap.c's get_unmapped_area(),
+		 * so as not to confuse shmem with our handle on "/dev/zero".
+		 */
+		return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
+	}
+
+	/* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
+	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+#else
+	return -ENOSYS;
+#endif
+}
+
+static ssize_t write_full(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
+ * can fopen() both devices with "a" now.  This was previously impossible.
+ * -- SRB.
+ */
+static loff_t null_lseek(struct file *file, loff_t offset, int orig)
+{
+	return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
+{
+	loff_t ret;
+
+	inode_lock(file_inode(file));
+	switch (orig) {
+	case SEEK_CUR:
+		offset += file->f_pos;
+		/* fall through */
+	case SEEK_SET:
+		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
+		if ((unsigned long long)offset >= -MAX_ERRNO) {
+			ret = -EOVERFLOW;
+			break;
+		}
+		file->f_pos = offset;
+		ret = file->f_pos;
+		force_successful_syscall_return();
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	inode_unlock(file_inode(file));
+	return ret;
+}
+
+static int open_port(struct inode *inode, struct file *filp)
+{
+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+#define zero_lseek	null_lseek
+#define full_lseek      null_lseek
+#define write_zero	write_null
+#define write_iter_zero	write_iter_null
+#define open_mem	open_port
+#define open_kmem	open_mem
+
+static const struct file_operations __maybe_unused mem_fops = {
+	.llseek		= memory_lseek,
+	.read		= read_mem,
+	.write		= write_mem,
+	.mmap		= mmap_mem,
+	.open		= open_mem,
+#ifndef CONFIG_MMU
+	.get_unmapped_area = get_unmapped_area_mem,
+	.mmap_capabilities = memory_mmap_capabilities,
+#endif
+};
+
+static const struct file_operations __maybe_unused kmem_fops = {
+	.llseek		= memory_lseek,
+	.read		= read_kmem,
+	.write		= write_kmem,
+	.mmap		= mmap_kmem,
+	.open		= open_kmem,
+#ifndef CONFIG_MMU
+	.get_unmapped_area = get_unmapped_area_mem,
+	.mmap_capabilities = memory_mmap_capabilities,
+#endif
+};
+
+static const struct file_operations null_fops = {
+	.llseek		= null_lseek,
+	.read		= read_null,
+	.write		= write_null,
+	.read_iter	= read_iter_null,
+	.write_iter	= write_iter_null,
+	.splice_write	= splice_write_null,
+};
+
+static const struct file_operations __maybe_unused port_fops = {
+	.llseek		= memory_lseek,
+	.read		= read_port,
+	.write		= write_port,
+	.open		= open_port,
+};
+
+static const struct file_operations zero_fops = {
+	.llseek		= zero_lseek,
+	.write		= write_zero,
+	.read_iter	= read_iter_zero,
+	.write_iter	= write_iter_zero,
+	.mmap		= mmap_zero,
+	.get_unmapped_area = get_unmapped_area_zero,
+#ifndef CONFIG_MMU
+	.mmap_capabilities = zero_mmap_capabilities,
+#endif
+};
+
+static const struct file_operations full_fops = {
+	.llseek		= full_lseek,
+	.read_iter	= read_iter_zero,
+	.write		= write_full,
+};
+
+static const struct memdev {
+	const char *name;
+	umode_t mode;
+	const struct file_operations *fops;
+	fmode_t fmode;
+} devlist[] = {
+#ifdef CONFIG_DEVMEM
+	 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
+#endif
+#ifdef CONFIG_DEVKMEM
+	 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
+#endif
+	 [3] = { "null", 0666, &null_fops, 0 },
+#ifdef CONFIG_DEVPORT
+	 [4] = { "port", 0, &port_fops, 0 },
+#endif
+	 [5] = { "zero", 0666, &zero_fops, 0 },
+	 [7] = { "full", 0666, &full_fops, 0 },
+	 [8] = { "random", 0666, &random_fops, 0 },
+	 [9] = { "urandom", 0666, &urandom_fops, 0 },
+#ifdef CONFIG_PRINTK
+	[11] = { "kmsg", 0644, &kmsg_fops, 0 },
+#endif
+};
+
+static int memory_open(struct inode *inode, struct file *filp)
+{
+	int minor;
+	const struct memdev *dev;
+
+	minor = iminor(inode);
+	if (minor >= ARRAY_SIZE(devlist))
+		return -ENXIO;
+
+	dev = &devlist[minor];
+	if (!dev->fops)
+		return -ENXIO;
+
+	filp->f_op = dev->fops;
+	filp->f_mode |= dev->fmode;
+
+	if (dev->fops->open)
+		return dev->fops->open(inode, filp);
+
+	return 0;
+}
+
+static const struct file_operations memory_fops = {
+	.open = memory_open,
+	.llseek = noop_llseek,
+};
+
+static char *mem_devnode(struct device *dev, umode_t *mode)
+{
+	if (mode && devlist[MINOR(dev->devt)].mode)
+		*mode = devlist[MINOR(dev->devt)].mode;
+	return NULL;
+}
+
+static struct class *mem_class;
+
+static int __init chr_dev_init(void)
+{
+	int minor;
+
+	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
+		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+	mem_class = class_create(THIS_MODULE, "mem");
+	if (IS_ERR(mem_class))
+		return PTR_ERR(mem_class);
+
+	mem_class->devnode = mem_devnode;
+	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
+		if (!devlist[minor].name)
+			continue;
+
+		/*
+		 * Create /dev/port?
+		 */
+		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
+			continue;
+
+		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+			      NULL, devlist[minor].name);
+	}
+
+	return tty_init();
+}
+
+fs_initcall(chr_dev_init);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
new file mode 100644
index 0000000..53cfe57
--- /dev/null
+++ b/drivers/char/misc.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/char/misc.c
+ *
+ * Generic misc open routine by Johan Myreen
+ *
+ * Based on code from Linus
+ *
+ * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's
+ *   changes incorporated into 0.97pl4
+ *   by Peter Cervasio (pete%q106fm.uucp@wupost.wustl.edu) (08SEP92)
+ *   See busmouse.c for particulars.
+ *
+ * Made things a lot mode modular - easy to compile in just one or two
+ * of the misc drivers, as they are now completely independent. Linus.
+ *
+ * Support for loadable modules. 8-Sep-95 Philip Blundell <pjb27@cam.ac.uk>
+ *
+ * Fixed a failing symbol register to free the device registration
+ *		Alan Cox <alan@lxorguk.ukuu.org.uk> 21-Jan-96
+ *
+ * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96
+ *
+ * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96
+ *
+ * Handling of mouse minor numbers for kerneld:
+ *  Idea by Jacques Gelinas <jack@solucorp.qc.ca>,
+ *  adapted by Bjorn Ekwall <bj0rn@blox.se>
+ *  corrected by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Changes for kmod (from kerneld):
+ *	Cyrus Durgin <cider@speakeasy.org>
+ *
+ * Added devfs support. Richard Gooch <rgooch@atnf.csiro.au>  10-Jan-1998
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/kmod.h>
+#include <linux/gfp.h>
+
+/*
+ * Head entry for the doubly linked miscdevice list
+ */
+static LIST_HEAD(misc_list);
+static DEFINE_MUTEX(misc_mtx);
+
+/*
+ * Assigned numbers, used for dynamic minors
+ */
+#define DYNAMIC_MINORS 64 /* like dynamic majors */
+static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
+
+#ifdef CONFIG_PROC_FS
+static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	mutex_lock(&misc_mtx);
+	return seq_list_start(&misc_list, *pos);
+}
+
+static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &misc_list, pos);
+}
+
+static void misc_seq_stop(struct seq_file *seq, void *v)
+{
+	mutex_unlock(&misc_mtx);
+}
+
+static int misc_seq_show(struct seq_file *seq, void *v)
+{
+	const struct miscdevice *p = list_entry(v, struct miscdevice, list);
+
+	seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : "");
+	return 0;
+}
+
+
+static const struct seq_operations misc_seq_ops = {
+	.start = misc_seq_start,
+	.next  = misc_seq_next,
+	.stop  = misc_seq_stop,
+	.show  = misc_seq_show,
+};
+#endif
+
+static int misc_open(struct inode *inode, struct file *file)
+{
+	int minor = iminor(inode);
+	struct miscdevice *c;
+	int err = -ENODEV;
+	const struct file_operations *new_fops = NULL;
+
+	mutex_lock(&misc_mtx);
+
+	list_for_each_entry(c, &misc_list, list) {
+		if (c->minor == minor) {
+			new_fops = fops_get(c->fops);
+			break;
+		}
+	}
+
+	if (!new_fops) {
+		mutex_unlock(&misc_mtx);
+		request_module("char-major-%d-%d", MISC_MAJOR, minor);
+		mutex_lock(&misc_mtx);
+
+		list_for_each_entry(c, &misc_list, list) {
+			if (c->minor == minor) {
+				new_fops = fops_get(c->fops);
+				break;
+			}
+		}
+		if (!new_fops)
+			goto fail;
+	}
+
+	/*
+	 * Place the miscdevice in the file's
+	 * private_data so it can be used by the
+	 * file operations, including f_op->open below
+	 */
+	file->private_data = c;
+
+	err = 0;
+	replace_fops(file, new_fops);
+	if (file->f_op->open)
+		err = file->f_op->open(inode, file);
+fail:
+	mutex_unlock(&misc_mtx);
+	return err;
+}
+
+static struct class *misc_class;
+
+static const struct file_operations misc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= misc_open,
+	.llseek		= noop_llseek,
+};
+
+/**
+ *	misc_register	-	register a miscellaneous device
+ *	@misc: device structure
+ *
+ *	Register a miscellaneous device with the kernel. If the minor
+ *	number is set to %MISC_DYNAMIC_MINOR a minor number is assigned
+ *	and placed in the minor field of the structure. For other cases
+ *	the minor number requested is used.
+ *
+ *	The structure passed is linked into the kernel and may not be
+ *	destroyed until it has been unregistered. By default, an open()
+ *	syscall to the device sets file->private_data to point to the
+ *	structure. Drivers don't need open in fops for this.
+ *
+ *	A zero is returned on success and a negative errno code for
+ *	failure.
+ */
+
+int misc_register(struct miscdevice *misc)
+{
+	dev_t dev;
+	int err = 0;
+	bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+
+	INIT_LIST_HEAD(&misc->list);
+
+	mutex_lock(&misc_mtx);
+
+	if (is_dynamic) {
+		int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
+
+		if (i >= DYNAMIC_MINORS) {
+			err = -EBUSY;
+			goto out;
+		}
+		misc->minor = DYNAMIC_MINORS - i - 1;
+		set_bit(i, misc_minors);
+	} else {
+		struct miscdevice *c;
+
+		list_for_each_entry(c, &misc_list, list) {
+			if (c->minor == misc->minor) {
+				err = -EBUSY;
+				goto out;
+			}
+		}
+	}
+
+	dev = MKDEV(MISC_MAJOR, misc->minor);
+
+	misc->this_device =
+		device_create_with_groups(misc_class, misc->parent, dev,
+					  misc, misc->groups, "%s", misc->name);
+	if (IS_ERR(misc->this_device)) {
+		if (is_dynamic) {
+			int i = DYNAMIC_MINORS - misc->minor - 1;
+
+			if (i < DYNAMIC_MINORS && i >= 0)
+				clear_bit(i, misc_minors);
+			misc->minor = MISC_DYNAMIC_MINOR;
+		}
+		err = PTR_ERR(misc->this_device);
+		goto out;
+	}
+
+	/*
+	 * Add it to the front, so that later devices can "override"
+	 * earlier defaults
+	 */
+	list_add(&misc->list, &misc_list);
+ out:
+	mutex_unlock(&misc_mtx);
+	return err;
+}
+
+/**
+ *	misc_deregister - unregister a miscellaneous device
+ *	@misc: device to unregister
+ *
+ *	Unregister a miscellaneous device that was previously
+ *	successfully registered with misc_register().
+ */
+
+void misc_deregister(struct miscdevice *misc)
+{
+	int i = DYNAMIC_MINORS - misc->minor - 1;
+
+	if (WARN_ON(list_empty(&misc->list)))
+		return;
+
+	mutex_lock(&misc_mtx);
+	list_del(&misc->list);
+	device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+	if (i < DYNAMIC_MINORS && i >= 0)
+		clear_bit(i, misc_minors);
+	mutex_unlock(&misc_mtx);
+}
+
+EXPORT_SYMBOL(misc_register);
+EXPORT_SYMBOL(misc_deregister);
+
+static char *misc_devnode(struct device *dev, umode_t *mode)
+{
+	struct miscdevice *c = dev_get_drvdata(dev);
+
+	if (mode && c->mode)
+		*mode = c->mode;
+	if (c->nodename)
+		return kstrdup(c->nodename, GFP_KERNEL);
+	return NULL;
+}
+
+static int __init misc_init(void)
+{
+	int err;
+	struct proc_dir_entry *ret;
+
+	ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
+	misc_class = class_create(THIS_MODULE, "misc");
+	err = PTR_ERR(misc_class);
+	if (IS_ERR(misc_class))
+		goto fail_remove;
+
+	err = -EIO;
+	if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+		goto fail_printk;
+	misc_class->devnode = misc_devnode;
+	return 0;
+
+fail_printk:
+	pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
+	class_destroy(misc_class);
+fail_remove:
+	if (ret)
+		remove_proc_entry("misc", NULL);
+	return err;
+}
+subsys_initcall(misc_init);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
new file mode 100644
index 0000000..058876b
--- /dev/null
+++ b/drivers/char/mspec.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc.  All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/*
+ * SN Platform Special Memory (mspec) Support
+ *
+ * This driver exports the SN special memory (mspec) facility to user
+ * processes.
+ * There are three types of memory made available thru this driver:
+ * fetchops, uncached and cached.
+ *
+ * Fetchops are atomic memory operations that are implemented in the
+ * memory controller on SGI SN hardware.
+ *
+ * Uncached are used for memory write combining feature of the ia64
+ * cpu.
+ *
+ * Cached are used for areas of memory that are used as cached addresses
+ * on our partition and used as uncached addresses from other partitions.
+ * Due to a design constraint of the SN2 Shub, you can not have processors
+ * on the same FSB perform both a cached and uncached reference to the
+ * same cache line.  These special memory cached regions prevent the
+ * kernel from ever dropping in a TLB entry and therefore prevent the
+ * processor from ever speculating a cache line from this page.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/numa.h>
+#include <linux/refcount.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <linux/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/uncached.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/io.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/shubio.h>
+
+
+#define FETCHOP_ID	"SGI Fetchop,"
+#define CACHED_ID	"Cached,"
+#define UNCACHED_ID	"Uncached"
+#define REVISION	"4.0"
+#define MSPEC_BASENAME	"mspec"
+
+/*
+ * Page types allocated by the device.
+ */
+enum mspec_page_type {
+	MSPEC_FETCHOP = 1,
+	MSPEC_CACHED,
+	MSPEC_UNCACHED
+};
+
+#ifdef CONFIG_SGI_SN
+static int is_sn2;
+#else
+#define is_sn2		0
+#endif
+
+/*
+ * One of these structures is allocated when an mspec region is mmaped. The
+ * structure is pointed to by the vma->vm_private_data field in the vma struct.
+ * This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_sem does not
+ * protect in fork case where multiple tasks share the vma_data.
+ */
+struct vma_data {
+	refcount_t refcnt;	/* Number of vmas sharing the data. */
+	spinlock_t lock;	/* Serialize access to this structure. */
+	int count;		/* Number of pages allocated. */
+	enum mspec_page_type type; /* Type of pages allocated. */
+	unsigned long vm_start;	/* Original (unsplit) base. */
+	unsigned long vm_end;	/* Original (unsplit) end. */
+	unsigned long maddr[0];	/* Array of MSPEC addresses. */
+};
+
+/* used on shub2 to clear FOP cache in the HUB */
+static unsigned long scratch_page[MAX_NUMNODES];
+#define SH2_AMO_CACHE_ENTRIES	4
+
+static inline int
+mspec_zero_block(unsigned long addr, int len)
+{
+	int status;
+
+	if (is_sn2) {
+		if (is_shub2()) {
+			int nid;
+			void *p;
+			int i;
+
+			nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
+			p = (void *)TO_AMO(scratch_page[nid]);
+
+			for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
+				FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
+				p += FETCHOP_VAR_SIZE;
+			}
+		}
+
+		status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
+				  BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
+	} else {
+		memset((char *) addr, 0, len);
+		status = 0;
+	}
+	return status;
+}
+
+/*
+ * mspec_open
+ *
+ * Called when a device mapping is created by a means other than mmap
+ * (via fork, munmap, etc.).  Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
+ */
+static void
+mspec_open(struct vm_area_struct *vma)
+{
+	struct vma_data *vdata;
+
+	vdata = vma->vm_private_data;
+	refcount_inc(&vdata->refcnt);
+}
+
+/*
+ * mspec_close
+ *
+ * Called when unmapping a device mapping. Frees all mspec pages
+ * belonging to all the vma's sharing this vma_data structure.
+ */
+static void
+mspec_close(struct vm_area_struct *vma)
+{
+	struct vma_data *vdata;
+	int index, last_index;
+	unsigned long my_page;
+
+	vdata = vma->vm_private_data;
+
+	if (!refcount_dec_and_test(&vdata->refcnt))
+		return;
+
+	last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+	for (index = 0; index < last_index; index++) {
+		if (vdata->maddr[index] == 0)
+			continue;
+		/*
+		 * Clear the page before sticking it back
+		 * into the pool.
+		 */
+		my_page = vdata->maddr[index];
+		vdata->maddr[index] = 0;
+		if (!mspec_zero_block(my_page, PAGE_SIZE))
+			uncached_free_page(my_page, 1);
+		else
+			printk(KERN_WARNING "mspec_close(): "
+			       "failed to zero page %ld\n", my_page);
+	}
+
+	kvfree(vdata);
+}
+
+/*
+ * mspec_fault
+ *
+ * Creates a mspec page and maps it to user space.
+ */
+static vm_fault_t
+mspec_fault(struct vm_fault *vmf)
+{
+	unsigned long paddr, maddr;
+	unsigned long pfn;
+	pgoff_t index = vmf->pgoff;
+	struct vma_data *vdata = vmf->vma->vm_private_data;
+
+	maddr = (volatile unsigned long) vdata->maddr[index];
+	if (maddr == 0) {
+		maddr = uncached_alloc_page(numa_node_id(), 1);
+		if (maddr == 0)
+			return VM_FAULT_OOM;
+
+		spin_lock(&vdata->lock);
+		if (vdata->maddr[index] == 0) {
+			vdata->count++;
+			vdata->maddr[index] = maddr;
+		} else {
+			uncached_free_page(maddr, 1);
+			maddr = vdata->maddr[index];
+		}
+		spin_unlock(&vdata->lock);
+	}
+
+	if (vdata->type == MSPEC_FETCHOP)
+		paddr = TO_AMO(maddr);
+	else
+		paddr = maddr & ~__IA64_UNCACHED_OFFSET;
+
+	pfn = paddr >> PAGE_SHIFT;
+
+	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct mspec_vm_ops = {
+	.open = mspec_open,
+	.close = mspec_close,
+	.fault = mspec_fault,
+};
+
+/*
+ * mspec_mmap
+ *
+ * Called when mmapping the device.  Initializes the vma with a fault handler
+ * and private data structure necessary to allocate, track, and free the
+ * underlying pages.
+ */
+static int
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+					enum mspec_page_type type)
+{
+	struct vma_data *vdata;
+	int pages, vdata_size;
+
+	if (vma->vm_pgoff != 0)
+		return -EINVAL;
+
+	if ((vma->vm_flags & VM_SHARED) == 0)
+		return -EINVAL;
+
+	if ((vma->vm_flags & VM_WRITE) == 0)
+		return -EPERM;
+
+	pages = vma_pages(vma);
+	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
+	if (vdata_size <= PAGE_SIZE)
+		vdata = kzalloc(vdata_size, GFP_KERNEL);
+	else
+		vdata = vzalloc(vdata_size);
+	if (!vdata)
+		return -ENOMEM;
+
+	vdata->vm_start = vma->vm_start;
+	vdata->vm_end = vma->vm_end;
+	vdata->type = type;
+	spin_lock_init(&vdata->lock);
+	refcount_set(&vdata->refcnt, 1);
+	vma->vm_private_data = vdata;
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	vma->vm_ops = &mspec_vm_ops;
+
+	return 0;
+}
+
+static int
+fetchop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return mspec_mmap(file, vma, MSPEC_FETCHOP);
+}
+
+static int
+cached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return mspec_mmap(file, vma, MSPEC_CACHED);
+}
+
+static int
+uncached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return mspec_mmap(file, vma, MSPEC_UNCACHED);
+}
+
+static const struct file_operations fetchop_fops = {
+	.owner = THIS_MODULE,
+	.mmap = fetchop_mmap,
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice fetchop_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "sgi_fetchop",
+	.fops = &fetchop_fops
+};
+
+static const struct file_operations cached_fops = {
+	.owner = THIS_MODULE,
+	.mmap = cached_mmap,
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice cached_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "mspec_cached",
+	.fops = &cached_fops
+};
+
+static const struct file_operations uncached_fops = {
+	.owner = THIS_MODULE,
+	.mmap = uncached_mmap,
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice uncached_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "mspec_uncached",
+	.fops = &uncached_fops
+};
+
+/*
+ * mspec_init
+ *
+ * Called at boot time to initialize the mspec facility.
+ */
+static int __init
+mspec_init(void)
+{
+	int ret;
+	int nid;
+
+	/*
+	 * The fetchop device only works on SN2 hardware, uncached and cached
+	 * memory drivers should both be valid on all ia64 hardware
+	 */
+#ifdef CONFIG_SGI_SN
+	if (ia64_platform_is("sn2")) {
+		is_sn2 = 1;
+		if (is_shub2()) {
+			ret = -ENOMEM;
+			for_each_node_state(nid, N_ONLINE) {
+				int actual_nid;
+				int nasid;
+				unsigned long phys;
+
+				scratch_page[nid] = uncached_alloc_page(nid, 1);
+				if (scratch_page[nid] == 0)
+					goto free_scratch_pages;
+				phys = __pa(scratch_page[nid]);
+				nasid = get_node_number(phys);
+				actual_nid = nasid_to_cnodeid(nasid);
+				if (actual_nid != nid)
+					goto free_scratch_pages;
+			}
+		}
+
+		ret = misc_register(&fetchop_miscdev);
+		if (ret) {
+			printk(KERN_ERR
+			       "%s: failed to register device %i\n",
+			       FETCHOP_ID, ret);
+			goto free_scratch_pages;
+		}
+	}
+#endif
+	ret = misc_register(&cached_miscdev);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device %i\n",
+		       CACHED_ID, ret);
+		if (is_sn2)
+			misc_deregister(&fetchop_miscdev);
+		goto free_scratch_pages;
+	}
+	ret = misc_register(&uncached_miscdev);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device %i\n",
+		       UNCACHED_ID, ret);
+		misc_deregister(&cached_miscdev);
+		if (is_sn2)
+			misc_deregister(&fetchop_miscdev);
+		goto free_scratch_pages;
+	}
+
+	printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
+	       MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
+	       CACHED_ID, UNCACHED_ID);
+
+	return 0;
+
+ free_scratch_pages:
+	for_each_node(nid) {
+		if (scratch_page[nid] != 0)
+			uncached_free_page(scratch_page[nid], 1);
+	}
+	return ret;
+}
+
+static void __exit
+mspec_exit(void)
+{
+	int nid;
+
+	misc_deregister(&uncached_miscdev);
+	misc_deregister(&cached_miscdev);
+	if (is_sn2) {
+		misc_deregister(&fetchop_miscdev);
+
+		for_each_node(nid) {
+			if (scratch_page[nid] != 0)
+				uncached_free_page(scratch_page[nid], 1);
+		}
+	}
+}
+
+module_init(mspec_init);
+module_exit(mspec_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
+MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
new file mode 100644
index 0000000..4a8937f
--- /dev/null
+++ b/drivers/char/mwave/3780i.c
@@ -0,0 +1,738 @@
+/*
+*
+* 3780i.c -- helper routines for the 3780i DSP
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>	/* cond_resched() */
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+
+static DEFINE_SPINLOCK(dsp_lock);
+
+static void PaceMsaAccess(unsigned short usDspBaseIO)
+{
+	cond_resched();
+	udelay(100);
+	cond_resched();
+}
+
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+                                   unsigned long ulMsaAddr)
+{
+	unsigned long flags;
+	unsigned short val;
+
+	PRINTK_3(TRACE_3780I,
+		"3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
+		usDspBaseIO, ulMsaAddr);
+
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+	val = InWordDsp(DSP_MsaDataDSISHigh);
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
+
+	return val;
+}
+
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+                          unsigned long ulMsaAddr, unsigned short usValue)
+{
+	unsigned long flags;
+
+	PRINTK_4(TRACE_3780I,
+		"3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
+		usDspBaseIO, ulMsaAddr, usValue);
+
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+	OutWordDsp(DSP_MsaDataDSISHigh, usValue);
+	spin_unlock_irqrestore(&dsp_lock, flags);
+}
+
+static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
+				 unsigned char ucValue)
+{
+	DSP_ISA_SLAVE_CONTROL rSlaveControl;
+	DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+
+
+	PRINTK_4(TRACE_3780I,
+		"3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
+		usDspBaseIO, uIndex, ucValue);
+
+	MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
+		MKBYTE(rSlaveControl));
+
+	rSlaveControl_Save = rSlaveControl;
+	rSlaveControl.ConfigMode = true;
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
+		MKBYTE(rSlaveControl));
+
+	OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+	OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+	OutByteDsp(DSP_ConfigData, ucValue);
+	OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
+
+
+}
+
+#if 0
+unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
+                                  unsigned uIndex)
+{
+	DSP_ISA_SLAVE_CONTROL rSlaveControl;
+	DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+	unsigned char ucValue;
+
+
+	PRINTK_3(TRACE_3780I,
+		"3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
+		usDspBaseIO, uIndex);
+
+	MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+	rSlaveControl_Save = rSlaveControl;
+	rSlaveControl.ConfigMode = true;
+	OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+	OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+	ucValue = InByteDsp(DSP_ConfigData);
+	OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
+
+
+	return ucValue;
+}
+#endif  /*  0  */
+
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+                       unsigned short *pIrqMap,
+                       unsigned short *pDmaMap)
+{
+	unsigned long flags;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	int i;
+	DSP_UART_CFG_1 rUartCfg1;
+	DSP_UART_CFG_2 rUartCfg2;
+	DSP_HBRIDGE_CFG_1 rHBridgeCfg1;
+	DSP_HBRIDGE_CFG_2 rHBridgeCfg2;
+	DSP_BUSMASTER_CFG_1 rBusmasterCfg1;
+	DSP_BUSMASTER_CFG_2 rBusmasterCfg2;
+	DSP_ISA_PROT_CFG rIsaProtCfg;
+	DSP_POWER_MGMT_CFG rPowerMgmtCfg;
+	DSP_HBUS_TIMER_CFG rHBusTimerCfg;
+	DSP_LBUS_TIMEOUT_DISABLE rLBusTimeoutDisable;
+	DSP_CHIP_RESET rChipReset;
+	DSP_CLOCK_CONTROL_1 rClockControl1;
+	DSP_CLOCK_CONTROL_2 rClockControl2;
+	DSP_ISA_SLAVE_CONTROL rSlaveControl;
+	DSP_HBRIDGE_CONTROL rHBridgeControl;
+	unsigned short ChipID = 0;
+	unsigned short tval;
+
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
+		pSettings->bDSPEnabled);
+
+
+	if (!pSettings->bDSPEnabled) {
+		PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+		return -EIO;
+	}
+
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
+		pSettings->bModemEnabled);
+
+	if (pSettings->bModemEnabled) {
+		rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
+		rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
+		rUartCfg1.IrqPulse = pSettings->bUartIrqPulse;
+		rUartCfg1.Irq =
+			(unsigned char) pIrqMap[pSettings->usUartIrq];
+		switch (pSettings->usUartBaseIO) {
+		case 0x03F8:
+			rUartCfg1.BaseIO = 0;
+			break;
+		case 0x02F8:
+			rUartCfg1.BaseIO = 1;
+			break;
+		case 0x03E8:
+			rUartCfg1.BaseIO = 2;
+			break;
+		case 0x02E8:
+			rUartCfg1.BaseIO = 3;
+			break;
+		}
+		rUartCfg2.Enable = true;
+	}
+
+	rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0;
+	rHBridgeCfg1.IrqActiveLow = pSettings->bDspIrqActiveLow;
+	rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse;
+	rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq];
+	rHBridgeCfg1.AccessMode = 1;
+	rHBridgeCfg2.Enable = true;
+
+
+	rBusmasterCfg2.Reserved = 0;
+	rBusmasterCfg1.Dma = (unsigned char) pDmaMap[pSettings->usDspDma];
+	rBusmasterCfg1.NumTransfers =
+		(unsigned char) pSettings->usNumTransfers;
+	rBusmasterCfg1.ReRequest = (unsigned char) pSettings->usReRequest;
+	rBusmasterCfg1.MEMCS16 = pSettings->bEnableMEMCS16;
+	rBusmasterCfg2.IsaMemCmdWidth =
+		(unsigned char) pSettings->usIsaMemCmdWidth;
+
+
+	rIsaProtCfg.Reserved = 0;
+	rIsaProtCfg.GateIOCHRDY = pSettings->bGateIOCHRDY;
+
+	rPowerMgmtCfg.Reserved = 0;
+	rPowerMgmtCfg.Enable = pSettings->bEnablePwrMgmt;
+
+	rHBusTimerCfg.LoadValue =
+		(unsigned char) pSettings->usHBusTimerLoadValue;
+
+	rLBusTimeoutDisable.Reserved = 0;
+	rLBusTimeoutDisable.DisableTimeout =
+		pSettings->bDisableLBusTimeout;
+
+	MKWORD(rChipReset) = ~pSettings->usChipletEnable;
+
+	rClockControl1.Reserved1 = rClockControl1.Reserved2 = 0;
+	rClockControl1.N_Divisor = pSettings->usN_Divisor;
+	rClockControl1.M_Multiplier = pSettings->usM_Multiplier;
+
+	rClockControl2.Reserved = 0;
+	rClockControl2.PllBypass = pSettings->bPllBypass;
+
+	/* Issue a soft reset to the chip */
+	/* Note: Since we may be coming in with 3780i clocks suspended, we must keep
+	* soft-reset active for 10ms.
+	*/
+	rSlaveControl.ClockControl = 0;
+	rSlaveControl.SoftReset = true;
+	rSlaveControl.ConfigMode = false;
+	rSlaveControl.Reserved = 0;
+
+	PRINTK_4(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
+		usDspBaseIO, DSP_IsaSlaveControl,
+		usDspBaseIO + DSP_IsaSlaveControl);
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
+		MKWORD(rSlaveControl));
+
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+	MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
+
+
+	for (i = 0; i < 11; i++)
+		udelay(2000);
+
+	rSlaveControl.SoftReset = false;
+	OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+	MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
+
+
+	/* Program our general configuration registers */
+	WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
+	WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
+	WriteGenCfg(DSP_BusMasterCfg1Index, MKBYTE(rBusmasterCfg1));
+	WriteGenCfg(DSP_BusMasterCfg2Index, MKBYTE(rBusmasterCfg2));
+	WriteGenCfg(DSP_IsaProtCfgIndex, MKBYTE(rIsaProtCfg));
+	WriteGenCfg(DSP_PowerMgCfgIndex, MKBYTE(rPowerMgmtCfg));
+	WriteGenCfg(DSP_HBusTimerCfgIndex, MKBYTE(rHBusTimerCfg));
+
+	if (pSettings->bModemEnabled) {
+		WriteGenCfg(DSP_UartCfg1Index, MKBYTE(rUartCfg1));
+		WriteGenCfg(DSP_UartCfg2Index, MKBYTE(rUartCfg2));
+	}
+
+
+	rHBridgeControl.EnableDspInt = false;
+	rHBridgeControl.MemAutoInc = true;
+	rHBridgeControl.IoAutoInc = false;
+	rHBridgeControl.DiagnosticMode = false;
+
+	PRINTK_3(TRACE_3780I,
+		"3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
+		DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+	OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+	WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
+	WriteMsaCfg(DSP_ClockControl_1, MKWORD(rClockControl1));
+	WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
+	WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
+
+	ChipID = ReadMsaCfg(DSP_ChipID);
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
+		ChipID);
+
+	return 0;
+}
+
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+	unsigned long flags;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	DSP_ISA_SLAVE_CONTROL rSlaveControl;
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
+
+	rSlaveControl.ClockControl = 0;
+	rSlaveControl.SoftReset = true;
+	rSlaveControl.ConfigMode = false;
+	rSlaveControl.Reserved = 0;
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+	udelay(5);
+
+	rSlaveControl.ClockControl = 1;
+	OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	udelay(5);
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
+
+	return 0;
+}
+
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+	unsigned long flags;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	DSP_BOOT_DOMAIN rBootDomain;
+	DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
+
+	spin_lock_irqsave(&dsp_lock, flags);
+	/* Mask DSP to PC interrupt */
+	MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+
+	PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
+		MKWORD(rHBridgeControl));
+
+	rHBridgeControl.EnableDspInt = false;
+	OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Reset the core via the boot domain register */
+	rBootDomain.ResetCore = true;
+	rBootDomain.Halt = true;
+	rBootDomain.NMI = true;
+	rBootDomain.Reserved = 0;
+
+	PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
+		MKWORD(rBootDomain));
+
+	WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+	/* Reset all the chiplets and then reactivate them */
+	WriteMsaCfg(DSP_ChipReset, 0xFFFF);
+	udelay(5);
+	WriteMsaCfg(DSP_ChipReset,
+			(unsigned short) (~pSettings->usChipletEnable));
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
+
+	return 0;
+}
+
+
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+	unsigned long flags;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	DSP_BOOT_DOMAIN rBootDomain;
+	DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
+
+
+	/* Transition the core to a running state */
+	rBootDomain.ResetCore = true;
+	rBootDomain.Halt = false;
+	rBootDomain.NMI = true;
+	rBootDomain.Reserved = 0;
+	WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+	udelay(5);
+
+	rBootDomain.ResetCore = false;
+	WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+	udelay(5);
+
+	rBootDomain.NMI = false;
+	WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+	udelay(5);
+
+	/* Enable DSP to PC interrupt */
+	spin_lock_irqsave(&dsp_lock, flags);
+	MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+	rHBridgeControl.EnableDspInt = true;
+
+	PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
+		MKWORD(rHBridgeControl));
+
+	OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+	PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
+
+	return 0;
+}
+
+
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                        unsigned uCount, unsigned long ulDSPAddr)
+{
+	unsigned long flags;
+	unsigned short __user *pusBuffer = pvBuffer;
+	unsigned short val;
+
+
+	PRINTK_5(TRACE_3780I,
+		"3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+	/* Set the initial MSA address. No adjustments need to be made to data store addresses */
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Transfer the memory block */
+	while (uCount-- != 0) {
+		spin_lock_irqsave(&dsp_lock, flags);
+		val = InWordDsp(DSP_MsaDataDSISHigh);
+		spin_unlock_irqrestore(&dsp_lock, flags);
+		if(put_user(val, pusBuffer++))
+			return -EFAULT;
+
+		PRINTK_3(TRACE_3780I,
+			"3780I::dsp3780I_ReadDStore uCount %x val %x\n",
+			uCount, val);
+
+		PaceMsaAccess(usDspBaseIO);
+	}
+
+
+	PRINTK_1(TRACE_3780I,
+		"3780I::dsp3780I_ReadDStore exit bRC=true\n");
+
+	return 0;
+}
+
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+                                void __user *pvBuffer, unsigned uCount,
+                                unsigned long ulDSPAddr)
+{
+	unsigned long flags;
+	unsigned short __user *pusBuffer = pvBuffer;
+	unsigned short val;
+
+
+	PRINTK_5(TRACE_3780I,
+		"3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+	/* Set the initial MSA address. No adjustments need to be made to data store addresses */
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Transfer the memory block */
+	while (uCount-- != 0) {
+		spin_lock_irqsave(&dsp_lock, flags);
+		val = InWordDsp(DSP_ReadAndClear);
+		spin_unlock_irqrestore(&dsp_lock, flags);
+		if(put_user(val, pusBuffer++))
+			return -EFAULT;
+
+		PRINTK_3(TRACE_3780I,
+			"3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
+			uCount, val);
+
+		PaceMsaAccess(usDspBaseIO);
+	}
+
+
+	PRINTK_1(TRACE_3780I,
+		"3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
+
+	return 0;
+}
+
+
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                         unsigned uCount, unsigned long ulDSPAddr)
+{
+	unsigned long flags;
+	unsigned short __user *pusBuffer = pvBuffer;
+
+
+	PRINTK_5(TRACE_3780I,
+		"3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+	/* Set the initial MSA address. No adjustments need to be made to data store addresses */
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Transfer the memory block */
+	while (uCount-- != 0) {
+		unsigned short val;
+		if(get_user(val, pusBuffer++))
+			return -EFAULT;
+		spin_lock_irqsave(&dsp_lock, flags);
+		OutWordDsp(DSP_MsaDataDSISHigh, val);
+		spin_unlock_irqrestore(&dsp_lock, flags);
+
+		PRINTK_3(TRACE_3780I,
+			"3780I::dsp3780I_WriteDStore uCount %x val %x\n",
+			uCount, val);
+
+		PaceMsaAccess(usDspBaseIO);
+	}
+
+
+	PRINTK_1(TRACE_3780I,
+		"3780I::dsp3780D_WriteDStore exit bRC=true\n");
+
+	return 0;
+}
+
+
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                        unsigned uCount, unsigned long ulDSPAddr)
+{
+	unsigned long flags;
+	unsigned short __user *pusBuffer = pvBuffer;
+
+	PRINTK_5(TRACE_3780I,
+		"3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+	/*
+	* Set the initial MSA address. To convert from an instruction store
+	* address to an MSA address
+	* shift the address two bits to the left and set bit 22
+	*/
+	ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Transfer the memory block */
+	while (uCount-- != 0) {
+		unsigned short val_lo, val_hi;
+		spin_lock_irqsave(&dsp_lock, flags);
+		val_lo = InWordDsp(DSP_MsaDataISLow);
+		val_hi = InWordDsp(DSP_MsaDataDSISHigh);
+		spin_unlock_irqrestore(&dsp_lock, flags);
+		if(put_user(val_lo, pusBuffer++))
+			return -EFAULT;
+		if(put_user(val_hi, pusBuffer++))
+			return -EFAULT;
+
+		PRINTK_4(TRACE_3780I,
+			"3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
+			uCount, val_lo, val_hi);
+
+		PaceMsaAccess(usDspBaseIO);
+
+	}
+
+	PRINTK_1(TRACE_3780I,
+		"3780I::dsp3780I_ReadIStore exit bRC=true\n");
+
+	return 0;
+}
+
+
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                         unsigned uCount, unsigned long ulDSPAddr)
+{
+	unsigned long flags;
+	unsigned short __user *pusBuffer = pvBuffer;
+
+	PRINTK_5(TRACE_3780I,
+		"3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+	/*
+	* Set the initial MSA address. To convert from an instruction store
+	* address to an MSA address
+	* shift the address two bits to the left and set bit 22
+	*/
+	ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+	spin_lock_irqsave(&dsp_lock, flags);
+	OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+	OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+	/* Transfer the memory block */
+	while (uCount-- != 0) {
+		unsigned short val_lo, val_hi;
+		if(get_user(val_lo, pusBuffer++))
+			return -EFAULT;
+		if(get_user(val_hi, pusBuffer++))
+			return -EFAULT;
+		spin_lock_irqsave(&dsp_lock, flags);
+		OutWordDsp(DSP_MsaDataISLow, val_lo);
+		OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
+		spin_unlock_irqrestore(&dsp_lock, flags);
+
+		PRINTK_4(TRACE_3780I,
+			"3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
+			uCount, val_lo, val_hi);
+
+		PaceMsaAccess(usDspBaseIO);
+
+	}
+
+	PRINTK_1(TRACE_3780I,
+		"3780I::dsp3780I_WriteIStore exit bRC=true\n");
+
+	return 0;
+}
+
+
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+                          unsigned short *pusIPCSource)
+{
+	unsigned long flags;
+	DSP_HBRIDGE_CONTROL rHBridgeControl;
+	unsigned short temp;
+
+
+	PRINTK_3(TRACE_3780I,
+		"3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
+		usDspBaseIO, pusIPCSource);
+
+	/*
+	* Disable DSP to PC interrupts, read the interrupt register,
+	* clear the pending IPC bits, and reenable DSP to PC interrupts
+	*/
+	spin_lock_irqsave(&dsp_lock, flags);
+	MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+	rHBridgeControl.EnableDspInt = false;
+	OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+	*pusIPCSource = InWordDsp(DSP_Interrupt);
+	temp = (unsigned short) ~(*pusIPCSource);
+
+	PRINTK_3(TRACE_3780I,
+		"3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
+		*pusIPCSource, temp);
+
+	OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
+
+	rHBridgeControl.EnableDspInt = true;
+	OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+	spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+	PRINTK_2(TRACE_3780I,
+		"3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
+		*pusIPCSource);
+
+	return 0;
+}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
new file mode 100644
index 0000000..9ccb6b2
--- /dev/null
+++ b/drivers/char/mwave/3780i.h
@@ -0,0 +1,358 @@
+/*
+*
+* 3780i.h -- declarations for 3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#ifndef _LINUX_3780I_H
+#define _LINUX_3780I_H
+
+#include <asm/io.h>
+
+/* DSP I/O port offsets and definitions */
+#define DSP_IsaSlaveControl        0x0000	/* ISA slave control register */
+#define DSP_IsaSlaveStatus         0x0001	/* ISA slave status register */
+#define DSP_ConfigAddress          0x0002	/* General config address register */
+#define DSP_ConfigData             0x0003	/* General config data register */
+#define DSP_HBridgeControl         0x0002	/* HBridge control register */
+#define DSP_MsaAddrLow             0x0004	/* MSP System Address, low word */
+#define DSP_MsaAddrHigh            0x0006	/* MSP System Address, high word */
+#define DSP_MsaDataDSISHigh        0x0008	/* MSA data register: d-store word or high byte of i-store */
+#define DSP_MsaDataISLow           0x000A	/* MSA data register: low word of i-store */
+#define DSP_ReadAndClear           0x000C	/* MSA read and clear data register */
+#define DSP_Interrupt              0x000E	/* Interrupt register (IPC source) */
+
+typedef struct {
+	unsigned char ClockControl:1;	/* RW: Clock control: 0=normal, 1=stop 3780i clocks */
+	unsigned char SoftReset:1;	/* RW: Soft reset 0=normal, 1=soft reset active */
+	unsigned char ConfigMode:1;	/* RW: Configuration mode, 0=normal, 1=config mode */
+	unsigned char Reserved:5;	/* 0: Reserved */
+} DSP_ISA_SLAVE_CONTROL;
+
+
+typedef struct {
+	unsigned short EnableDspInt:1;	/* RW: Enable DSP to X86 ISA interrupt 0=mask it, 1=enable it */
+	unsigned short MemAutoInc:1;	/* RW: Memory address auto increment, 0=disable, 1=enable */
+	unsigned short IoAutoInc:1;	/* RW: I/O address auto increment, 0=disable, 1=enable */
+	unsigned short DiagnosticMode:1;	/* RW: Disgnostic mode 0=nromal, 1=diagnostic mode */
+	unsigned short IsaPacingTimer:12;	/* R: ISA access pacing timer: count of core cycles stolen */
+} DSP_HBRIDGE_CONTROL;
+
+
+/* DSP register indexes used with the configuration register address (index) register */
+#define DSP_UartCfg1Index          0x0003	/* UART config register 1 */
+#define DSP_UartCfg2Index          0x0004	/* UART config register 2 */
+#define DSP_HBridgeCfg1Index       0x0007	/* HBridge config register 1 */
+#define DSP_HBridgeCfg2Index       0x0008	/* HBridge config register 2 */
+#define DSP_BusMasterCfg1Index     0x0009	/* ISA bus master config register 1 */
+#define DSP_BusMasterCfg2Index     0x000A	/* ISA bus master config register 2 */
+#define DSP_IsaProtCfgIndex        0x000F	/* ISA protocol control register */
+#define DSP_PowerMgCfgIndex        0x0010	/* Low poser suspend/resume enable */
+#define DSP_HBusTimerCfgIndex      0x0011	/* HBUS timer load value */
+
+typedef struct {
+	unsigned char IrqActiveLow:1;	/* RW: IRQ active high or low: 0=high, 1=low */
+	unsigned char IrqPulse:1;	/* RW: IRQ pulse or level: 0=level, 1=pulse  */
+	unsigned char Irq:3;	/* RW: IRQ selection */
+	unsigned char BaseIO:2;	/* RW: Base I/O selection */
+	unsigned char Reserved:1;	/* 0: Reserved */
+} DSP_UART_CFG_1;
+
+typedef struct {
+	unsigned char Enable:1;	/* RW: Enable I/O and IRQ: 0=false, 1=true */
+	unsigned char Reserved:7;	/* 0: Reserved */
+} DSP_UART_CFG_2;
+
+typedef struct {
+	unsigned char IrqActiveLow:1;	/* RW: IRQ active high=0 or low=1 */
+	unsigned char IrqPulse:1;	/* RW: IRQ pulse=1 or level=0 */
+	unsigned char Irq:3;	/* RW: IRQ selection */
+	unsigned char AccessMode:1;	/* RW: 16-bit register access method 0=byte, 1=word */
+	unsigned char Reserved:2;	/* 0: Reserved */
+} DSP_HBRIDGE_CFG_1;
+
+typedef struct {
+	unsigned char Enable:1;	/* RW: enable I/O and IRQ: 0=false, 1=true */
+	unsigned char Reserved:7;	/* 0: Reserved */
+} DSP_HBRIDGE_CFG_2;
+
+
+typedef struct {
+	unsigned char Dma:3;	/* RW: DMA channel selection */
+	unsigned char NumTransfers:2;	/* RW: Maximum # of transfers once being granted the ISA bus */
+	unsigned char ReRequest:2;	/* RW: Minimum delay between releasing the ISA bus and requesting it again */
+	unsigned char MEMCS16:1;	/* RW: ISA signal MEMCS16: 0=disabled, 1=enabled */
+} DSP_BUSMASTER_CFG_1;
+
+typedef struct {
+	unsigned char IsaMemCmdWidth:2;	/* RW: ISA memory command width */
+	unsigned char Reserved:6;	/* 0: Reserved */
+} DSP_BUSMASTER_CFG_2;
+
+
+typedef struct {
+	unsigned char GateIOCHRDY:1;	/* RW: Enable IOCHRDY gating: 0=false, 1=true */
+	unsigned char Reserved:7;	/* 0: Reserved */
+} DSP_ISA_PROT_CFG;
+
+typedef struct {
+	unsigned char Enable:1;	/* RW: Enable low power suspend/resume 0=false, 1=true */
+	unsigned char Reserved:7;	/* 0: Reserved */
+} DSP_POWER_MGMT_CFG;
+
+typedef struct {
+	unsigned char LoadValue:8;	/* RW: HBUS timer load value */
+} DSP_HBUS_TIMER_CFG;
+
+
+
+/* DSP registers that exist in MSA I/O space */
+#define DSP_ChipID                 0x80000000
+#define DSP_MspBootDomain          0x80000580
+#define DSP_LBusTimeoutDisable     0x80000580
+#define DSP_ClockControl_1         0x8000058A
+#define DSP_ClockControl_2         0x8000058C
+#define DSP_ChipReset              0x80000588
+#define DSP_GpioModeControl_15_8   0x80000082
+#define DSP_GpioDriverEnable_15_8  0x80000076
+#define DSP_GpioOutputData_15_8    0x80000072
+
+typedef struct {
+	unsigned short NMI:1;	/* RW: non maskable interrupt */
+	unsigned short Halt:1;	/* RW: Halt MSP clock */
+	unsigned short ResetCore:1;	/* RW: Reset MSP core interface */
+	unsigned short Reserved:13;	/* 0: Reserved */
+} DSP_BOOT_DOMAIN;
+
+typedef struct {
+	unsigned short DisableTimeout:1;	/* RW: Disable LBus timeout */
+	unsigned short Reserved:15;	/* 0: Reserved */
+} DSP_LBUS_TIMEOUT_DISABLE;
+
+typedef struct {
+	unsigned short Memory:1;	/* RW: Reset memory interface */
+	unsigned short SerialPort1:1;	/* RW: Reset serial port 1 interface */
+	unsigned short SerialPort2:1;	/* RW: Reset serial port 2 interface */
+	unsigned short SerialPort3:1;	/* RW: Reset serial port 3 interface */
+	unsigned short Gpio:1;	/* RW: Reset GPIO interface */
+	unsigned short Dma:1;	/* RW: Reset DMA interface */
+	unsigned short SoundBlaster:1;	/* RW: Reset soundblaster interface */
+	unsigned short Uart:1;	/* RW: Reset UART interface */
+	unsigned short Midi:1;	/* RW: Reset MIDI interface */
+	unsigned short IsaMaster:1;	/* RW: Reset ISA master interface */
+	unsigned short Reserved:6;	/* 0: Reserved */
+} DSP_CHIP_RESET;
+
+typedef struct {
+	unsigned short N_Divisor:6;	/* RW: (N) PLL output clock divisor */
+	unsigned short Reserved1:2;	/* 0: reserved */
+	unsigned short M_Multiplier:6;	/* RW: (M) PLL feedback clock multiplier */
+	unsigned short Reserved2:2;	/* 0: reserved */
+} DSP_CLOCK_CONTROL_1;
+
+typedef struct {
+	unsigned short PllBypass:1;	/* RW: PLL Bypass */
+	unsigned short Reserved:15;	/* 0: Reserved */
+} DSP_CLOCK_CONTROL_2;
+
+typedef struct {
+	unsigned short Latch8:1;
+	unsigned short Latch9:1;
+	unsigned short Latch10:1;
+	unsigned short Latch11:1;
+	unsigned short Latch12:1;
+	unsigned short Latch13:1;
+	unsigned short Latch14:1;
+	unsigned short Latch15:1;
+	unsigned short Mask8:1;
+	unsigned short Mask9:1;
+	unsigned short Mask10:1;
+	unsigned short Mask11:1;
+	unsigned short Mask12:1;
+	unsigned short Mask13:1;
+	unsigned short Mask14:1;
+	unsigned short Mask15:1;
+} DSP_GPIO_OUTPUT_DATA_15_8;
+
+typedef struct {
+	unsigned short Enable8:1;
+	unsigned short Enable9:1;
+	unsigned short Enable10:1;
+	unsigned short Enable11:1;
+	unsigned short Enable12:1;
+	unsigned short Enable13:1;
+	unsigned short Enable14:1;
+	unsigned short Enable15:1;
+	unsigned short Mask8:1;
+	unsigned short Mask9:1;
+	unsigned short Mask10:1;
+	unsigned short Mask11:1;
+	unsigned short Mask12:1;
+	unsigned short Mask13:1;
+	unsigned short Mask14:1;
+	unsigned short Mask15:1;
+} DSP_GPIO_DRIVER_ENABLE_15_8;
+
+typedef struct {
+	unsigned short GpioMode8:2;
+	unsigned short GpioMode9:2;
+	unsigned short GpioMode10:2;
+	unsigned short GpioMode11:2;
+	unsigned short GpioMode12:2;
+	unsigned short GpioMode13:2;
+	unsigned short GpioMode14:2;
+	unsigned short GpioMode15:2;
+} DSP_GPIO_MODE_15_8;
+
+/* Component masks that are defined in dspmgr.h */
+#define MW_ADC_MASK    0x0001
+#define MW_AIC2_MASK   0x0006
+#define MW_MIDI_MASK   0x0008
+#define MW_CDDAC_MASK  0x8001
+#define MW_AIC1_MASK   0xE006
+#define MW_UART_MASK   0xE00A
+#define MW_ACI_MASK    0xE00B
+
+/*
+* Definition of 3780i configuration structure.  Unless otherwise stated,
+* these values are provided as input to the 3780i support layer.  At present,
+* the only values maintained by the 3780i support layer are the saved UART
+* registers.
+*/
+typedef struct _DSP_3780I_CONFIG_SETTINGS {
+
+	/* Location of base configuration register */
+	unsigned short usBaseConfigIO;
+
+	/* Enables for various DSP components */
+	int bDSPEnabled;
+	int bModemEnabled;
+	int bInterruptClaimed;
+
+	/* IRQ, DMA, and Base I/O addresses for various DSP components */
+	unsigned short usDspIrq;
+	unsigned short usDspDma;
+	unsigned short usDspBaseIO;
+	unsigned short usUartIrq;
+	unsigned short usUartBaseIO;
+
+	/* IRQ modes for various DSP components */
+	int bDspIrqActiveLow;
+	int bUartIrqActiveLow;
+	int bDspIrqPulse;
+	int bUartIrqPulse;
+
+	/* Card abilities */
+	unsigned uIps;
+	unsigned uDStoreSize;
+	unsigned uIStoreSize;
+	unsigned uDmaBandwidth;
+
+	/* Adapter specific 3780i settings */
+	unsigned short usNumTransfers;
+	unsigned short usReRequest;
+	int bEnableMEMCS16;
+	unsigned short usIsaMemCmdWidth;
+	int bGateIOCHRDY;
+	int bEnablePwrMgmt;
+	unsigned short usHBusTimerLoadValue;
+	int bDisableLBusTimeout;
+	unsigned short usN_Divisor;
+	unsigned short usM_Multiplier;
+	int bPllBypass;
+	unsigned short usChipletEnable;	/* Used with the chip reset register to enable specific chiplets */
+
+	/* Saved UART registers. These are maintained by the 3780i support layer. */
+	int bUartSaved;		/* True after a successful save of the UART registers */
+	unsigned char ucIER;	/* Interrupt enable register */
+	unsigned char ucFCR;	/* FIFO control register */
+	unsigned char ucLCR;	/* Line control register */
+	unsigned char ucMCR;	/* Modem control register */
+	unsigned char ucSCR;	/* Scratch register */
+	unsigned char ucDLL;	/* Divisor latch, low byte */
+	unsigned char ucDLM;	/* Divisor latch, high byte */
+} DSP_3780I_CONFIG_SETTINGS;
+
+
+/* 3780i support functions */
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+                       unsigned short *pIrqMap,
+                       unsigned short *pDmaMap);
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                        unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+                                void __user *pvBuffer, unsigned uCount,
+                                unsigned long ulDSPAddr);
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                         unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                        unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+                         unsigned uCount, unsigned long ulDSPAddr);
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+                                   unsigned long ulMsaAddr);
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+                          unsigned long ulMsaAddr, unsigned short usValue);
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+                          unsigned short *pusIPCSource);
+
+/* I/O port access macros */
+#define MKWORD(var) (*((unsigned short *)(&var)))
+#define MKBYTE(var) (*((unsigned char *)(&var)))
+
+#define WriteMsaCfg(addr,value) dsp3780I_WriteMsaCfg(usDspBaseIO,addr,value)
+#define ReadMsaCfg(addr) dsp3780I_ReadMsaCfg(usDspBaseIO,addr)
+#define WriteGenCfg(index,value) dsp3780I_WriteGenCfg(usDspBaseIO,index,value)
+#define ReadGenCfg(index) dsp3780I_ReadGenCfg(usDspBaseIO,index)
+
+#define InWordDsp(index)          inw(usDspBaseIO+index)
+#define InByteDsp(index)          inb(usDspBaseIO+index)
+#define OutWordDsp(index,value)   outw(value,usDspBaseIO+index)
+#define OutByteDsp(index,value)   outb(value,usDspBaseIO+index)
+
+#endif
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
new file mode 100644
index 0000000..efa6a82
--- /dev/null
+++ b/drivers/char/mwave/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for ACP Modem (Mwave).
+#
+# See the README file in this directory for more info. <paulsch@us.ibm.com>
+#
+
+obj-$(CONFIG_MWAVE) += mwave.o
+
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
+
+# To have the mwave driver disable other uarts if necessary
+# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
+
+# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
+ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
new file mode 100644
index 0000000..c2a58f4
--- /dev/null
+++ b/drivers/char/mwave/README
@@ -0,0 +1,47 @@
+Module options
+--------------
+
+The mwave module takes the following options.  Note that these options
+are not saved by the BIOS and so do not persist after unload and reload.
+
+  mwave_debug=value, where value is bitwise OR of trace flags:
+	0x0001 mwavedd api tracing
+	0x0002 smapi api tracing
+	0x0004 3780i tracing
+	0x0008 tp3780i tracing
+
+        Tracing only occurs if the driver has been compiled with the
+        MW_TRACE macro #defined  (i.e. let ccflags-y := -DMW_TRACE
+        in the Makefile).
+
+  mwave_3780i_irq=5/7/10/11/15
+	If the dsp irq has not been setup and stored in bios by the 
+	thinkpad configuration utility then this parameter allows the
+	irq used by the dsp to be configured.
+
+  mwave_3780i_io=0x130/0x350/0x0070/0xDB0
+	If the dsp io range has not been setup and stored in bios by the 
+	thinkpad configuration utility then this parameter allows the
+	io range used by the dsp to be configured.
+
+  mwave_uart_irq=3/4
+	If the mwave's uart irq has not been setup and stored in bios by the 
+	thinkpad configuration utility then this parameter allows the
+	irq used by the mwave uart to be configured.
+
+  mwave_uart_io=0x3f8/0x2f8/0x3E8/0x2E8
+	If the uart io range has not been setup and stored in bios by the 
+	thinkpad configuration utility then this parameter allows the
+	io range used by the mwave uart to be configured.
+
+Example to enable the 3780i DSP using ttyS1 resources:
+	
+  insmod mwave mwave_3780i_irq=10 mwave_3780i_io=0x0130 mwave_uart_irq=3 mwave_uart_io=0x2f8
+
+Accessing the driver
+--------------------
+
+You must also create a node for the driver:
+  mkdir -p /dev/modems
+  mknod --mode=660 /dev/modems/mwave c 10 219
+
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
new file mode 100644
index 0000000..b5e3103
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.c
@@ -0,0 +1,697 @@
+/*
+*
+* mwavedd.c -- mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/serial_8250.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+#include "tp3780i.h"
+
+MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver");
+MODULE_AUTHOR("Mike Sullivan and Paul Schroeder");
+MODULE_LICENSE("GPL");
+
+/*
+* These parameters support the setting of MWave resources. Note that no
+* checks are made against other devices (ie. superio) for conflicts.
+* We'll depend on users using the tpctl utility to do that for now
+*/
+static DEFINE_MUTEX(mwave_mutex);
+int mwave_debug = 0;
+int mwave_3780i_irq = 0;
+int mwave_3780i_io = 0;
+int mwave_uart_irq = 0;
+int mwave_uart_io = 0;
+module_param(mwave_debug, int, 0);
+module_param_hw(mwave_3780i_irq, int, irq, 0);
+module_param_hw(mwave_3780i_io, int, ioport, 0);
+module_param_hw(mwave_uart_irq, int, irq, 0);
+module_param_hw(mwave_uart_io, int, ioport, 0);
+
+static int mwave_open(struct inode *inode, struct file *file);
+static int mwave_close(struct inode *inode, struct file *file);
+static long mwave_ioctl(struct file *filp, unsigned int iocmd,
+							unsigned long ioarg);
+
+MWAVE_DEVICE_DATA mwave_s_mdd;
+
+static int mwave_open(struct inode *inode, struct file *file)
+{
+	unsigned int retval = 0;
+
+	PRINTK_3(TRACE_MWAVE,
+		"mwavedd::mwave_open, entry inode %p file %p\n",
+		 inode, file);
+	PRINTK_2(TRACE_MWAVE,
+		"mwavedd::mwave_open, exit return retval %x\n", retval);
+
+	return retval;
+}
+
+static int mwave_close(struct inode *inode, struct file *file)
+{
+	unsigned int retval = 0;
+
+	PRINTK_3(TRACE_MWAVE,
+		"mwavedd::mwave_close, entry inode %p file %p\n",
+		 inode,  file);
+
+	PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
+		retval);
+
+	return retval;
+}
+
+static long mwave_ioctl(struct file *file, unsigned int iocmd,
+							unsigned long ioarg)
+{
+	unsigned int retval = 0;
+	pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+	void __user *arg = (void __user *)ioarg;
+
+	PRINTK_4(TRACE_MWAVE,
+		"mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
+		file, iocmd, (int) ioarg);
+
+	switch (iocmd) {
+
+		case IOCTL_MW_RESET:
+			PRINTK_1(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+				" calling tp3780I_ResetDSP\n");
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_ResetDSP(&pDrvData->rBDData);
+			mutex_unlock(&mwave_mutex);
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+				" retval %x from tp3780I_ResetDSP\n",
+				retval);
+			break;
+	
+		case IOCTL_MW_RUN:
+			PRINTK_1(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+				" calling tp3780I_StartDSP\n");
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_StartDSP(&pDrvData->rBDData);
+			mutex_unlock(&mwave_mutex);
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+				" retval %x from tp3780I_StartDSP\n",
+				retval);
+			break;
+	
+		case IOCTL_MW_DSP_ABILITIES: {
+			MW_ABILITIES rAbilities;
+	
+			PRINTK_1(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl,"
+				" IOCTL_MW_DSP_ABILITIES calling"
+				" tp3780I_QueryAbilities\n");
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
+					&rAbilities);
+			mutex_unlock(&mwave_mutex);
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+				" retval %x from tp3780I_QueryAbilities\n",
+				retval);
+			if (retval == 0) {
+				if( copy_to_user(arg, &rAbilities,
+							sizeof(MW_ABILITIES)) )
+					return -EFAULT;
+			}
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+				" exit retval %x\n",
+				retval);
+		}
+			break;
+	
+		case IOCTL_MW_READ_DATA:
+		case IOCTL_MW_READCLEAR_DATA: {
+			MW_READWRITE rReadData;
+			unsigned short __user *pusBuffer = NULL;
+	
+			if( copy_from_user(&rReadData, arg,
+						sizeof(MW_READWRITE)) )
+				return -EFAULT;
+			pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+	
+			PRINTK_4(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
+				" size %lx, ioarg %lx pusBuffer %p\n",
+				rReadData.ulDataLength, ioarg, pusBuffer);
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+					iocmd,
+					pusBuffer,
+					rReadData.ulDataLength,
+					rReadData.usDspAddress);
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		case IOCTL_MW_READ_INST: {
+			MW_READWRITE rReadData;
+			unsigned short __user *pusBuffer = NULL;
+	
+			if( copy_from_user(&rReadData, arg,
+						sizeof(MW_READWRITE)) )
+				return -EFAULT;
+			pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+	
+			PRINTK_4(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
+				" size %lx, ioarg %lx pusBuffer %p\n",
+				rReadData.ulDataLength / 2, ioarg,
+				pusBuffer);
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+				iocmd, pusBuffer,
+				rReadData.ulDataLength / 2,
+				rReadData.usDspAddress);
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		case IOCTL_MW_WRITE_DATA: {
+			MW_READWRITE rWriteData;
+			unsigned short __user *pusBuffer = NULL;
+	
+			if( copy_from_user(&rWriteData, arg,
+						sizeof(MW_READWRITE)) )
+				return -EFAULT;
+			pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
+	
+			PRINTK_4(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
+				" size %lx, ioarg %lx pusBuffer %p\n",
+				rWriteData.ulDataLength, ioarg,
+				pusBuffer);
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+					iocmd, pusBuffer,
+					rWriteData.ulDataLength,
+					rWriteData.usDspAddress);
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		case IOCTL_MW_WRITE_INST: {
+			MW_READWRITE rWriteData;
+			unsigned short __user *pusBuffer = NULL;
+	
+			if( copy_from_user(&rWriteData, arg,
+						sizeof(MW_READWRITE)) )
+				return -EFAULT;
+			pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
+	
+			PRINTK_4(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
+				" size %lx, ioarg %lx pusBuffer %p\n",
+				rWriteData.ulDataLength, ioarg,
+				pusBuffer);
+			mutex_lock(&mwave_mutex);
+			retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
+					iocmd, pusBuffer,
+					rWriteData.ulDataLength,
+					rWriteData.usDspAddress);
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		case IOCTL_MW_REGISTER_IPC: {
+			unsigned int ipcnum = (unsigned int) ioarg;
+	
+			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+				PRINTK_ERROR(KERN_ERR_MWAVE
+						"mwavedd::mwave_ioctl:"
+						" IOCTL_MW_REGISTER_IPC:"
+						" Error: Invalid ipcnum %x\n",
+						ipcnum);
+				return -EINVAL;
+			}
+			PRINTK_3(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+				" ipcnum %x entry usIntCount %x\n",
+				ipcnum,
+				pDrvData->IPCs[ipcnum].usIntCount);
+
+			mutex_lock(&mwave_mutex);
+			pDrvData->IPCs[ipcnum].bIsHere = false;
+			pDrvData->IPCs[ipcnum].bIsEnabled = true;
+			mutex_unlock(&mwave_mutex);
+	
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+				" ipcnum %x exit\n",
+				ipcnum);
+		}
+			break;
+	
+		case IOCTL_MW_GET_IPC: {
+			unsigned int ipcnum = (unsigned int) ioarg;
+	
+			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+				PRINTK_ERROR(KERN_ERR_MWAVE
+						"mwavedd::mwave_ioctl:"
+						" IOCTL_MW_GET_IPC: Error:"
+						" Invalid ipcnum %x\n", ipcnum);
+				return -EINVAL;
+			}
+			PRINTK_3(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
+				" ipcnum %x, usIntCount %x\n",
+				ipcnum,
+				pDrvData->IPCs[ipcnum].usIntCount);
+	
+			mutex_lock(&mwave_mutex);
+			if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+				DECLARE_WAITQUEUE(wait, current);
+
+				PRINTK_2(TRACE_MWAVE,
+					"mwavedd::mwave_ioctl, thread for"
+					" ipc %x going to sleep\n",
+					ipcnum);
+				add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+				pDrvData->IPCs[ipcnum].bIsHere = true;
+				set_current_state(TASK_INTERRUPTIBLE);
+				/* check whether an event was signalled by */
+				/* the interrupt handler while we were gone */
+				if (pDrvData->IPCs[ipcnum].usIntCount == 1) {	/* first int has occurred (race condition) */
+					pDrvData->IPCs[ipcnum].usIntCount = 2;	/* first int has been handled */
+					PRINTK_2(TRACE_MWAVE,
+						"mwavedd::mwave_ioctl"
+						" IOCTL_MW_GET_IPC ipcnum %x"
+						" handling first int\n",
+						ipcnum);
+				} else {	/* either 1st int has not yet occurred, or we have already handled the first int */
+					schedule();
+					if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
+						pDrvData->IPCs[ipcnum].usIntCount = 2;
+					}
+					PRINTK_2(TRACE_MWAVE,
+						"mwavedd::mwave_ioctl"
+						" IOCTL_MW_GET_IPC ipcnum %x"
+						" woke up and returning to"
+						" application\n",
+						ipcnum);
+				}
+				pDrvData->IPCs[ipcnum].bIsHere = false;
+				remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+				set_current_state(TASK_RUNNING);
+				PRINTK_2(TRACE_MWAVE,
+					"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
+					" returning thread for ipc %x"
+					" processing\n",
+					ipcnum);
+			}
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		case IOCTL_MW_UNREGISTER_IPC: {
+			unsigned int ipcnum = (unsigned int) ioarg;
+	
+			PRINTK_2(TRACE_MWAVE,
+				"mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
+				" ipcnum %x\n",
+				ipcnum);
+			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+				PRINTK_ERROR(KERN_ERR_MWAVE
+						"mwavedd::mwave_ioctl:"
+						" IOCTL_MW_UNREGISTER_IPC:"
+						" Error: Invalid ipcnum %x\n",
+						ipcnum);
+				return -EINVAL;
+			}
+			mutex_lock(&mwave_mutex);
+			if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+				pDrvData->IPCs[ipcnum].bIsEnabled = false;
+				if (pDrvData->IPCs[ipcnum].bIsHere == true) {
+					wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
+				}
+			}
+			mutex_unlock(&mwave_mutex);
+		}
+			break;
+	
+		default:
+			return -ENOTTY;
+			break;
+	} /* switch */
+
+	PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
+                          loff_t * ppos)
+{
+	PRINTK_5(TRACE_MWAVE,
+		"mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
+		file, buf, count, ppos);
+
+	return -EINVAL;
+}
+
+
+static ssize_t mwave_write(struct file *file, const char __user *buf,
+                           size_t count, loff_t * ppos)
+{
+	PRINTK_5(TRACE_MWAVE,
+		"mwavedd::mwave_write entry file %p, buf %p,"
+		" count %zx ppos %p\n",
+		file, buf, count, ppos);
+
+	return -EINVAL;
+}
+
+
+static int register_serial_portandirq(unsigned int port, int irq)
+{
+	struct uart_8250_port uart;
+	
+	switch ( port ) {
+		case 0x3f8:
+		case 0x2f8:
+		case 0x3e8:
+		case 0x2e8:
+			/* OK */
+			break;
+		default:
+			PRINTK_ERROR(KERN_ERR_MWAVE
+					"mwavedd::register_serial_portandirq:"
+					" Error: Illegal port %x\n", port );
+			return -1;
+	} /* switch */
+	/* port is okay */
+
+	switch ( irq ) {
+		case 3:
+		case 4:
+		case 5:
+		case 7:
+			/* OK */
+			break;
+		default:
+			PRINTK_ERROR(KERN_ERR_MWAVE
+					"mwavedd::register_serial_portandirq:"
+					" Error: Illegal irq %x\n", irq );
+			return -1;
+	} /* switch */
+	/* irq is okay */
+
+	memset(&uart, 0, sizeof(uart));
+	
+	uart.port.uartclk =  1843200;
+	uart.port.iobase = port;
+	uart.port.irq = irq;
+	uart.port.iotype = UPIO_PORT;
+	uart.port.flags =  UPF_SHARE_IRQ;
+	return serial8250_register_8250_port(&uart);
+}
+
+
+static const struct file_operations mwave_fops = {
+	.owner		= THIS_MODULE,
+	.read		= mwave_read,
+	.write		= mwave_write,
+	.unlocked_ioctl	= mwave_ioctl,
+	.open		= mwave_open,
+	.release	= mwave_close,
+	.llseek		= default_llseek,
+};
+
+
+static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
+
+#if 0 /* totally b0rked */
+/*
+ * sysfs support <paulsch@us.ibm.com>
+ */
+
+struct device mwave_device;
+
+/* Prevent code redundancy, create a macro for mwave_show_* functions. */
+#define mwave_show_function(attr_name, format_string, field)		\
+static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf)	\
+{									\
+	DSP_3780I_CONFIG_SETTINGS *pSettings =				\
+		&mwave_s_mdd.rBDData.rDspSettings;			\
+        return sprintf(buf, format_string, pSettings->field);		\
+}
+
+/* All of our attributes are read attributes. */
+#define mwave_dev_rd_attr(attr_name, format_string, field)		\
+	mwave_show_function(attr_name, format_string, field)		\
+static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
+
+mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
+mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
+mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
+mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
+mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
+
+static struct device_attribute * const mwave_dev_attrs[] = {
+	&dev_attr_3780i_dma,
+	&dev_attr_3780i_irq,
+	&dev_attr_3780i_io,
+	&dev_attr_uart_irq,
+	&dev_attr_uart_io,
+};
+#endif
+
+/*
+* mwave_init is called on module load
+*
+* mwave_exit is called on module unload
+* mwave_exit is also used to clean up after an aborted mwave_init
+*/
+static void mwave_exit(void)
+{
+	pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+	PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
+
+#if 0
+	for (i = 0; i < pDrvData->nr_registered_attrs; i++)
+		device_remove_file(&mwave_device, mwave_dev_attrs[i]);
+	pDrvData->nr_registered_attrs = 0;
+
+	if (pDrvData->device_registered) {
+		device_unregister(&mwave_device);
+		pDrvData->device_registered = false;
+	}
+#endif
+
+	if ( pDrvData->sLine >= 0 ) {
+		serial8250_unregister_port(pDrvData->sLine);
+	}
+	if (pDrvData->bMwaveDevRegistered) {
+		misc_deregister(&mwave_misc_dev);
+	}
+	if (pDrvData->bDSPEnabled) {
+		tp3780I_DisableDSP(&pDrvData->rBDData);
+	}
+	if (pDrvData->bResourcesClaimed) {
+		tp3780I_ReleaseResources(&pDrvData->rBDData);
+	}
+	if (pDrvData->bBDInitialized) {
+		tp3780I_Cleanup(&pDrvData->rBDData);
+	}
+
+	PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
+}
+
+module_exit(mwave_exit);
+
+static int __init mwave_init(void)
+{
+	int i;
+	int retval = 0;
+	pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+	PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
+
+	memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+
+	pDrvData->bBDInitialized = false;
+	pDrvData->bResourcesClaimed = false;
+	pDrvData->bDSPEnabled = false;
+	pDrvData->bDSPReset = false;
+	pDrvData->bMwaveDevRegistered = false;
+	pDrvData->sLine = -1;
+
+	for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
+		pDrvData->IPCs[i].bIsEnabled = false;
+		pDrvData->IPCs[i].bIsHere = false;
+		pDrvData->IPCs[i].usIntCount = 0;	/* no ints received yet */
+		init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
+	}
+
+	retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
+	PRINTK_2(TRACE_MWAVE,
+		"mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
+		" retval %x\n",
+		retval);
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd::mwave_init: Error:"
+				" Failed to initialize board data\n");
+		goto cleanup_error;
+	}
+	pDrvData->bBDInitialized = true;
+
+	retval = tp3780I_CalcResources(&pDrvData->rBDData);
+	PRINTK_2(TRACE_MWAVE,
+		"mwavedd::mwave_init, return from tp3780I_CalcResources"
+		" retval %x\n",
+		retval);
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd:mwave_init: Error:"
+				" Failed to calculate resources\n");
+		goto cleanup_error;
+	}
+
+	retval = tp3780I_ClaimResources(&pDrvData->rBDData);
+	PRINTK_2(TRACE_MWAVE,
+		"mwavedd::mwave_init, return from tp3780I_ClaimResources"
+		" retval %x\n",
+		retval);
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd:mwave_init: Error:"
+				" Failed to claim resources\n");
+		goto cleanup_error;
+	}
+	pDrvData->bResourcesClaimed = true;
+
+	retval = tp3780I_EnableDSP(&pDrvData->rBDData);
+	PRINTK_2(TRACE_MWAVE,
+		"mwavedd::mwave_init, return from tp3780I_EnableDSP"
+		" retval %x\n",
+		retval);
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd:mwave_init: Error:"
+				" Failed to enable DSP\n");
+		goto cleanup_error;
+	}
+	pDrvData->bDSPEnabled = true;
+
+	if (misc_register(&mwave_misc_dev) < 0) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd:mwave_init: Error:"
+				" Failed to register misc device\n");
+		goto cleanup_error;
+	}
+	pDrvData->bMwaveDevRegistered = true;
+
+	pDrvData->sLine = register_serial_portandirq(
+		pDrvData->rBDData.rDspSettings.usUartBaseIO,
+		pDrvData->rBDData.rDspSettings.usUartIrq
+	);
+	if (pDrvData->sLine < 0) {
+		PRINTK_ERROR(KERN_ERR_MWAVE
+				"mwavedd:mwave_init: Error:"
+				" Failed to register serial driver\n");
+		goto cleanup_error;
+	}
+	/* uart is registered */
+
+#if 0
+	/* sysfs */
+	memset(&mwave_device, 0, sizeof (struct device));
+	dev_set_name(&mwave_device, "mwave");
+
+	if (device_register(&mwave_device))
+		goto cleanup_error;
+	pDrvData->device_registered = true;
+	for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
+		if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
+			PRINTK_ERROR(KERN_ERR_MWAVE
+					"mwavedd:mwave_init: Error:"
+					" Failed to create sysfs file %s\n",
+					mwave_dev_attrs[i]->attr.name);
+			goto cleanup_error;
+		}
+		pDrvData->nr_registered_attrs++;
+	}
+#endif
+
+	/* SUCCESS! */
+	return 0;
+
+cleanup_error:
+	PRINTK_ERROR(KERN_ERR_MWAVE
+			"mwavedd::mwave_init: Error:"
+			" Failed to initialize\n");
+	mwave_exit(); /* clean up */
+
+	return -EIO;
+}
+
+module_init(mwave_init);
+
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
new file mode 100644
index 0000000..21cb09c
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.h
@@ -0,0 +1,152 @@
+/*
+*
+* mwavedd.h -- declarations for mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#ifndef _LINUX_MWAVEDD_H
+#define _LINUX_MWAVEDD_H
+#include "3780i.h"
+#include "tp3780i.h"
+#include "smapi.h"
+#include "mwavepub.h"
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+extern int mwave_debug;
+extern int mwave_3780i_irq;
+extern int mwave_3780i_io;
+extern int mwave_uart_irq;
+extern int mwave_uart_io;
+
+#define PRINTK_ERROR printk
+#define KERN_ERR_MWAVE KERN_ERR "mwave: "
+
+#define TRACE_MWAVE     0x0001
+#define TRACE_SMAPI     0x0002
+#define TRACE_3780I     0x0004
+#define TRACE_TP3780I   0x0008
+
+#ifdef MW_TRACE
+#define PRINTK_1(f,s)                       \
+  if (f & (mwave_debug)) {                  \
+    printk(s);                              \
+  }
+
+#define PRINTK_2(f,s,v1)                    \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1);                           \
+  }
+
+#define PRINTK_3(f,s,v1,v2)                 \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2);                        \
+  }
+
+#define PRINTK_4(f,s,v1,v2,v3)              \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2,v3);                     \
+  }
+
+#define PRINTK_5(f,s,v1,v2,v3,v4)           \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2,v3,v4);                  \
+  }
+
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5)        \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2,v3,v4,v5);               \
+  }
+
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)     \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2,v3,v4,v5,v6);            \
+  }
+
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)  \
+  if (f & (mwave_debug)) {                  \
+    printk(s,v1,v2,v3,v4,v5,v6,v7);         \
+  }
+
+#else
+#define PRINTK_1(f,s)
+#define PRINTK_2(f,s,v1)
+#define PRINTK_3(f,s,v1,v2)
+#define PRINTK_4(f,s,v1,v2,v3)
+#define PRINTK_5(f,s,v1,v2,v3,v4)
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
+#endif
+
+
+typedef struct _MWAVE_IPC {
+	unsigned short usIntCount;	/* 0=none, 1=first, 2=greater than 1st */
+	bool bIsEnabled;
+	bool bIsHere;
+	/* entry spin lock */
+	wait_queue_head_t ipc_wait_queue;
+} MWAVE_IPC;
+
+typedef struct _MWAVE_DEVICE_DATA {
+	THINKPAD_BD_DATA rBDData;	/* board driver's data area */
+	unsigned long ulIPCSource_ISR;	/* IPC source bits for recently processed intr, set during ISR processing */
+	unsigned long ulIPCSource_DPC;	/* IPC source bits for recently processed intr, set during DPC processing */
+	bool bBDInitialized;
+	bool bResourcesClaimed;
+	bool bDSPEnabled;
+	bool bDSPReset;
+	MWAVE_IPC IPCs[16];
+	bool bMwaveDevRegistered;
+	short sLine;
+	int nr_registered_attrs;
+	int device_registered;
+
+} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+
+extern MWAVE_DEVICE_DATA mwave_s_mdd;
+
+#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
new file mode 100644
index 0000000..60c961a
--- /dev/null
+++ b/drivers/char/mwave/mwavepub.h
@@ -0,0 +1,89 @@
+/*
+*
+* mwavepub.h -- PUBLIC declarations for the mwave driver
+*               and applications using it
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#ifndef _LINUX_MWAVEPUB_H
+#define _LINUX_MWAVEPUB_H
+
+#include <linux/miscdevice.h>
+
+
+typedef struct _MW_ABILITIES {
+	unsigned long instr_per_sec;
+	unsigned long data_size;
+	unsigned long inst_size;
+	unsigned long bus_dma_bw;
+	unsigned short uart_enable;
+	short component_count;
+	unsigned long component_list[7];
+	char mwave_os_name[16];
+	char bios_task_name[16];
+} MW_ABILITIES, *pMW_ABILITIES;
+
+
+typedef struct _MW_READWRITE {
+	unsigned short usDspAddress;	/* The dsp address */
+	unsigned long ulDataLength;	/* The size in bytes of the data or user buffer */
+	void __user *pBuf;		/* Input:variable sized buffer */
+} MW_READWRITE, *pMW_READWRITE;
+
+#define IOCTL_MW_RESET           _IO(MWAVE_MINOR,1)
+#define IOCTL_MW_RUN             _IO(MWAVE_MINOR,2)
+#define IOCTL_MW_DSP_ABILITIES   _IOR(MWAVE_MINOR,3,MW_ABILITIES)
+#define IOCTL_MW_READ_DATA       _IOR(MWAVE_MINOR,4,MW_READWRITE)
+#define IOCTL_MW_READCLEAR_DATA  _IOR(MWAVE_MINOR,5,MW_READWRITE)
+#define IOCTL_MW_READ_INST       _IOR(MWAVE_MINOR,6,MW_READWRITE)
+#define IOCTL_MW_WRITE_DATA      _IOW(MWAVE_MINOR,7,MW_READWRITE)
+#define IOCTL_MW_WRITE_INST      _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_REGISTER_IPC    _IOW(MWAVE_MINOR,9,int)
+#define IOCTL_MW_UNREGISTER_IPC  _IOW(MWAVE_MINOR,10,int)
+#define IOCTL_MW_GET_IPC         _IOW(MWAVE_MINOR,11,int)
+#define IOCTL_MW_TRACE           _IOR(MWAVE_MINOR,12,MW_READWRITE)
+
+
+#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
new file mode 100644
index 0000000..691f589
--- /dev/null
+++ b/drivers/char/mwave/smapi.c
@@ -0,0 +1,572 @@
+/*
+*
+* smapi.c -- SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>	/* CMOS defines */
+#include "smapi.h"
+#include "mwavedd.h"
+
+static unsigned short g_usSmapiPort = 0;
+
+
+static int smapi_request(unsigned short inBX, unsigned short inCX,
+			 unsigned short inDI, unsigned short inSI,
+			 unsigned short *outAX, unsigned short *outBX,
+			 unsigned short *outCX, unsigned short *outDX,
+			 unsigned short *outDI, unsigned short *outSI)
+{
+	unsigned short myoutAX = 2, *pmyoutAX = &myoutAX;
+	unsigned short myoutBX = 3, *pmyoutBX = &myoutBX;
+	unsigned short myoutCX = 4, *pmyoutCX = &myoutCX;
+	unsigned short myoutDX = 5, *pmyoutDX = &myoutDX;
+	unsigned short myoutDI = 6, *pmyoutDI = &myoutDI;
+	unsigned short myoutSI = 7, *pmyoutSI = &myoutSI;
+	unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
+	unsigned int inBXCX = (inBX << 16) | inCX;
+	unsigned int inDISI = (inDI << 16) | inSI;
+	int retval = 0;
+
+	PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
+		inBX, inCX, inDI, inSI);
+
+	__asm__ __volatile__("movw  $0x5380,%%ax\n\t"
+			    "movl  %7,%%ebx\n\t"
+			    "shrl  $16, %%ebx\n\t"
+			    "movw  %7,%%cx\n\t"
+			    "movl  %8,%%edi\n\t"
+			    "shrl  $16,%%edi\n\t"
+			    "movw  %8,%%si\n\t"
+			    "movw  %9,%%dx\n\t"
+			    "out   %%al,%%dx\n\t"
+			    "out   %%al,$0x4F\n\t"
+			    "cmpb  $0x53,%%ah\n\t"
+			    "je    2f\n\t"
+			    "1:\n\t"
+			    "orb   %%ah,%%ah\n\t"
+			    "jnz   2f\n\t"
+			    "movw  %%ax,%0\n\t"
+			    "movw  %%bx,%1\n\t"
+			    "movw  %%cx,%2\n\t"
+			    "movw  %%dx,%3\n\t"
+			    "movw  %%di,%4\n\t"
+			    "movw  %%si,%5\n\t"
+			    "movw  $1,%6\n\t"
+			    "2:\n\t":"=m"(*(unsigned short *) pmyoutAX),
+			    "=m"(*(unsigned short *) pmyoutBX),
+			    "=m"(*(unsigned short *) pmyoutCX),
+			    "=m"(*(unsigned short *) pmyoutDX),
+			    "=m"(*(unsigned short *) pmyoutDI),
+			    "=m"(*(unsigned short *) pmyoutSI),
+			    "=m"(*(unsigned short *) pusSmapiOK)
+			    :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort)
+			    :"%eax", "%ebx", "%ecx", "%edx", "%edi",
+			    "%esi");
+
+	PRINTK_8(TRACE_SMAPI,
+		"myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
+		myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
+		usSmapiOK);
+	*outAX = myoutAX;
+	*outBX = myoutBX;
+	*outCX = myoutCX;
+	*outDX = myoutDX;
+	*outDI = myoutDI;
+	*outSI = myoutSI;
+
+	retval = (usSmapiOK == 1) ? 0 : -EIO;
+	PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
+	return retval;
+}
+
+
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+{
+	int bRC = -EIO;
+	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+	static const unsigned short ausDspBases[] = {
+		0x0030, 0x4E30, 0x8E30, 0xCE30,
+		0x0130, 0x0350, 0x0070, 0x0DB0 };
+	static const unsigned short ausUartBases[] = {
+		0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+
+	PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
+
+	bRC = smapi_request(0x1802, 0x0000, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+		return bRC;
+	}
+
+	PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+	pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
+	pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
+	pSettings->usDspIRQ = usSI & 0x00FF;
+	pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
+	if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
+		pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
+	} else {
+		pSettings->usDspBaseIO = 0;
+	}
+	PRINTK_6(TRACE_SMAPI,
+		"smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
+		pSettings->bDSPPresent, pSettings->bDSPEnabled,
+		pSettings->usDspIRQ, pSettings->usDspDMA,
+		pSettings->usDspBaseIO);
+
+	/* check for illegal values */
+	if ( pSettings->usDspBaseIO == 0 ) 
+		PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+	if ( pSettings->usDspIRQ == 0 )
+		PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+
+	bRC = smapi_request(0x1804, 0x0000, 0, 0,
+	   	&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) {
+		PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+		return bRC;
+	} 
+
+	PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+	pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
+	pSettings->usUartIRQ = usSI & 0x000F;
+	if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
+		pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
+	} else {
+		pSettings->usUartBaseIO = 0;
+	}
+
+	PRINTK_4(TRACE_SMAPI,
+		"smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
+		pSettings->bModemEnabled,
+		pSettings->usUartIRQ,
+		pSettings->usUartBaseIO);
+
+	/* check for illegal values */
+	if ( pSettings->usUartBaseIO == 0 ) 
+		PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+	if ( pSettings->usUartIRQ == 0 )
+		PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
+
+	PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+
+	return bRC;
+}
+
+
+int smapi_set_DSP_cfg(void)
+{
+	int bRC = -EIO;
+	int i;
+	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+	static const unsigned short ausDspBases[] = {
+		0x0030, 0x4E30, 0x8E30, 0xCE30,
+		0x0130, 0x0350, 0x0070, 0x0DB0 };
+	static const unsigned short ausUartBases[] = {
+		0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+	static const unsigned short ausDspIrqs[] = {
+		5, 7, 10, 11, 15 };
+	static const unsigned short ausUartIrqs[] = {
+		3, 4 };
+
+	unsigned short dspio_index = 0, uartio_index = 0;
+
+	PRINTK_5(TRACE_SMAPI,
+		"smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
+		mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
+
+	if (mwave_3780i_io) {
+		for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
+			if (mwave_3780i_io == ausDspBases[i])
+				break;
+		}
+		if (i == ARRAY_SIZE(ausDspBases)) {
+			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+			return bRC;
+		}
+		dspio_index = i;
+	}
+
+	if (mwave_3780i_irq) {
+		for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
+			if (mwave_3780i_irq == ausDspIrqs[i])
+				break;
+		}
+		if (i == ARRAY_SIZE(ausDspIrqs)) {
+			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+			return bRC;
+		}
+	}
+
+	if (mwave_uart_io) {
+		for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
+			if (mwave_uart_io == ausUartBases[i])
+				break;
+		}
+		if (i == ARRAY_SIZE(ausUartBases)) {
+			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+			return bRC;
+		}
+		uartio_index = i;
+	}
+
+
+	if (mwave_uart_irq) {
+		for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
+			if (mwave_uart_irq == ausUartIrqs[i])
+				break;
+		}
+		if (i == ARRAY_SIZE(ausUartIrqs)) {
+			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+			return bRC;
+		}
+	}
+
+	if (mwave_uart_irq || mwave_uart_io) {
+
+		/* Check serial port A */
+		bRC = smapi_request(0x1402, 0x0000, 0, 0,
+			&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+		if (bRC) goto exit_smapi_request_error;
+		/* bRC == 0 */
+		if (usBX & 0x0100) {	/* serial port A is present */
+			if (usCX & 1) {	/* serial port is enabled */
+				if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_ERROR(KERN_ERR_MWAVE
+						"smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+					PRINTK_3(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_1(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
+					bRC = smapi_request(0x1403, 0x0100, 0, usSI,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+					bRC = smapi_request(0x1402, 0x0000, 0, 0,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+#else
+					goto exit_conflict;
+#endif
+				} else {
+					if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+						PRINTK_ERROR(KERN_ERR_MWAVE
+							"smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+						PRINTK_3(TRACE_SMAPI,
+							"smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+						PRINTK_1(TRACE_SMAPI,
+							"smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
+						bRC = smapi_request (0x1403, 0x0100, 0, usSI,
+							&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+						if (bRC) goto exit_smapi_request_error;
+						bRC = smapi_request (0x1402, 0x0000, 0, 0,
+							&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+						if (bRC) goto exit_smapi_request_error;
+#else
+						goto exit_conflict;
+#endif
+					}
+				}
+			}
+		}
+
+		/* Check serial port B */
+		bRC = smapi_request(0x1404, 0x0000, 0, 0,
+			&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+		if (bRC) goto exit_smapi_request_error;
+		/* bRC == 0 */
+		if (usBX & 0x0100) {	/* serial port B is present */
+			if (usCX & 1) {	/* serial port is enabled */
+				if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_ERROR(KERN_ERR_MWAVE
+						"smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+					PRINTK_3(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_1(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+					bRC = smapi_request(0x1405, 0x0100, 0, usSI,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+					bRC = smapi_request(0x1404, 0x0000, 0, 0,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+#else
+					goto exit_conflict;
+#endif
+				} else {
+					if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+						PRINTK_ERROR(KERN_ERR_MWAVE
+							"smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+						PRINTK_3(TRACE_SMAPI,
+							"smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+						PRINTK_1 (TRACE_SMAPI,
+						    "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+						bRC = smapi_request (0x1405, 0x0100, 0, usSI,
+							&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+						if (bRC) goto exit_smapi_request_error;
+						bRC = smapi_request (0x1404, 0x0000, 0, 0,
+							&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+						if (bRC) goto exit_smapi_request_error;
+#else
+						goto exit_conflict;
+#endif
+					}
+				}
+			}
+		}
+
+		/* Check IR port */
+		bRC = smapi_request(0x1700, 0x0000, 0, 0,
+			&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+		if (bRC) goto exit_smapi_request_error;
+		bRC = smapi_request(0x1704, 0x0000, 0, 0,
+			&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+		if (bRC) goto exit_smapi_request_error;
+		/* bRC == 0 */
+		if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
+			if ((usCX & 0xff) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+				PRINTK_ERROR(KERN_ERR_MWAVE
+					"smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#else
+				PRINTK_3(TRACE_SMAPI,
+					"smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+				PRINTK_1(TRACE_SMAPI,
+					"smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+				bRC = smapi_request(0x1701, 0x0100, 0, 0,
+					&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+				if (bRC) goto exit_smapi_request_error;
+				bRC = smapi_request(0x1700, 0, 0, 0,
+					&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+				if (bRC) goto exit_smapi_request_error;
+				bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+					&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+				if (bRC) goto exit_smapi_request_error;
+				bRC = smapi_request(0x1704, 0x0000, 0, 0,
+					&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+				if (bRC) goto exit_smapi_request_error;
+#else
+				goto exit_conflict;
+#endif
+			} else {
+				if ((usSI & 0xff) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_ERROR(KERN_ERR_MWAVE
+						"smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#else
+					PRINTK_3(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+					PRINTK_1(TRACE_SMAPI,
+						"smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+					bRC = smapi_request(0x1701, 0x0100, 0, 0,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+					bRC = smapi_request(0x1700, 0, 0, 0,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+					bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+					bRC = smapi_request(0x1704, 0x0000, 0, 0,
+						&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+					if (bRC) goto exit_smapi_request_error;
+#else
+					goto exit_conflict;
+#endif
+				}
+			}
+		}
+	}
+
+	bRC = smapi_request(0x1802, 0x0000, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+	if (mwave_3780i_io) {
+		usDI = dspio_index;
+	}
+	if (mwave_3780i_irq) {
+		usSI = (usSI & 0xff00) | mwave_3780i_irq;
+	}
+
+	bRC = smapi_request(0x1803, 0x0101, usDI, usSI,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+	bRC = smapi_request(0x1804, 0x0000, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+	if (mwave_uart_io) {
+		usSI = (usSI & 0x00ff) | (uartio_index << 8);
+	}
+	if (mwave_uart_irq) {
+		usSI = (usSI & 0xff00) | mwave_uart_irq;
+	}
+	bRC = smapi_request(0x1805, 0x0101, 0, usSI,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+	bRC = smapi_request(0x1802, 0x0000, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+	bRC = smapi_request(0x1804, 0x0000, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+	if (bRC) goto exit_smapi_request_error;
+
+/* normal exit: */
+	PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
+	return 0;
+
+exit_conflict:
+	/* Message has already been printed */
+	return -EIO;
+
+exit_smapi_request_error:
+	PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+	return bRC;
+}
+
+
+int smapi_set_DSP_power_state(bool bOn)
+{
+	int bRC = -EIO;
+	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+	unsigned short usPowerFunction;
+
+	PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
+
+	usPowerFunction = (bOn) ? 1 : 0;
+
+	bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+	PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
+
+	return bRC;
+}
+
+#if 0
+static int SmapiQuerySystemID(void)
+{
+	int bRC = -EIO;
+	unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
+		usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
+
+	printk("smapi::SmapiQUerySystemID entry\n");
+	bRC = smapi_request(0x0000, 0, 0, 0,
+		&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+	if (bRC == 0) {
+		printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
+			usAX, usBX, usCX, usDX, usDI, usSI);
+	} else {
+		printk("smapi::SmapiQuerySystemID smapi_request error\n");
+	}
+
+	return bRC;
+}
+#endif  /*  0  */
+
+int smapi_init(void)
+{
+	int retval = -EIO;
+	unsigned short usSmapiID = 0;
+	unsigned long flags;
+
+	PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	usSmapiID = CMOS_READ(0x7C);
+	usSmapiID |= (CMOS_READ(0x7D) << 8);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
+
+	if (usSmapiID == 0x5349) {
+		spin_lock_irqsave(&rtc_lock, flags);
+		g_usSmapiPort = CMOS_READ(0x7E);
+		g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+		if (g_usSmapiPort == 0) {
+			PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+		} else {
+			PRINTK_2(TRACE_SMAPI,
+				"smapi::smapi_init, exit true g_usSmapiPort %x\n",
+				g_usSmapiPort);
+			retval = 0;
+			//SmapiQuerySystemID();
+		}
+	} else {
+		PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+		retval = -ENXIO;
+	}
+
+	return retval;
+}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
new file mode 100644
index 0000000..ebc206b
--- /dev/null
+++ b/drivers/char/mwave/smapi.h
@@ -0,0 +1,76 @@
+/*
+*
+* smapi.h -- declarations for SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#ifndef _LINUX_SMAPI_H
+#define _LINUX_SMAPI_H
+
+typedef struct {
+	int bDSPPresent;
+	int bDSPEnabled;
+	int bModemEnabled;
+	int bMIDIEnabled;
+	int bSblstEnabled;
+	unsigned short usDspIRQ;
+	unsigned short usDspDMA;
+	unsigned short usDspBaseIO;
+	unsigned short usUartIRQ;
+	unsigned short usUartBaseIO;
+	unsigned short usMidiIRQ;
+	unsigned short usMidiBaseIO;
+	unsigned short usSndblstIRQ;
+	unsigned short usSndblstDMA;
+	unsigned short usSndblstBaseIO;
+} SMAPI_DSP_SETTINGS;
+
+int smapi_init(void);
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_set_DSP_cfg(void);
+int smapi_set_DSP_power_state(bool bOn);
+
+
+#endif
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
new file mode 100644
index 0000000..5e1618a
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.c
@@ -0,0 +1,580 @@
+/*
+*
+* tp3780i.c -- board driver for 3780i on ThinkPads
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "tp3780i.h"
+#include "3780i.h"
+#include "mwavepub.h"
+
+static unsigned short s_ausThinkpadIrqToField[16] =
+	{ 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004,
+	0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 };
+static unsigned short s_ausThinkpadDmaToField[8] =
+	{ 0x0001, 0x0002, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0003, 0x0004 };
+static unsigned short s_numIrqs = 16, s_numDmas = 8;
+
+
+static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+{
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
+	DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
+	DSP_GPIO_MODE_15_8 rGpioMode;
+
+	PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
+
+	MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
+	rGpioMode.GpioMode10 = 0;
+	WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
+
+	MKWORD(rGpioDriverEnable) = 0;
+	rGpioDriverEnable.Enable10 = true;
+	rGpioDriverEnable.Mask10 = true;
+	WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable));
+
+	MKWORD(rGpioOutputData) = 0;
+	rGpioOutputData.Latch10 = 0;
+	rGpioOutputData.Mask10 = true;
+	WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
+
+	PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
+}
+
+
+static irqreturn_t UartInterrupt(int irq, void *dev_id)
+{
+	PRINTK_3(TRACE_TP3780I,
+		"tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t DspInterrupt(int irq, void *dev_id)
+{
+	pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
+
+	PRINTK_3(TRACE_TP3780I,
+		"tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+
+	if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
+		PRINTK_2(TRACE_TP3780I,
+			"tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
+			usIPCSource);
+		usIsolationMask = 1;
+		for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
+			if (usIPCSource & usIsolationMask) {
+				usIPCSource &= ~usIsolationMask;
+				PRINTK_3(TRACE_TP3780I,
+					"tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
+					usPCNum, usIPCSource);
+				if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
+					pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
+				}
+				PRINTK_2(TRACE_TP3780I,
+					"tp3780i::DspInterrupt usIntCount %x\n",
+					pDrvData->IPCs[usPCNum - 1].usIntCount);
+				if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
+					PRINTK_2(TRACE_TP3780I,
+						"tp3780i::DspInterrupt, waking up usPCNum %x\n",
+						usPCNum - 1);
+					wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
+				} else {
+					PRINTK_2(TRACE_TP3780I,
+						"tp3780i::DspInterrupt, no one waiting for IPC %x\n",
+						usPCNum - 1);
+				}
+			}
+			if (usIPCSource == 0)
+				break;
+			/* try next IPC */
+			usIsolationMask = usIsolationMask << 1;
+		}
+	} else {
+		PRINTK_1(TRACE_TP3780I,
+			"tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
+	}
+	PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
+	return IRQ_HANDLED;
+}
+
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+
+	pBDData->bDSPEnabled = false;
+	pSettings->bInterruptClaimed = false;
+
+	retval = smapi_init();
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+	} else {
+		if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
+			retval = smapi_set_DSP_cfg();
+		}
+	}
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
+
+	return retval;
+}
+
+int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
+
+	return retval;
+}
+
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+{
+	SMAPI_DSP_SETTINGS rSmapiInfo;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+
+	if (smapi_query_DSP_cfg(&rSmapiInfo)) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+		return -EIO;
+	}
+
+	/* Sanity check */
+	if (
+		( rSmapiInfo.usDspIRQ == 0 )
+		|| ( rSmapiInfo.usDspBaseIO ==  0 )
+		|| ( rSmapiInfo.usUartIRQ ==  0 )
+		|| ( rSmapiInfo.usUartBaseIO ==  0 )
+	) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+		return -EIO;
+	}
+
+	pSettings->bDSPEnabled = (rSmapiInfo.bDSPEnabled && rSmapiInfo.bDSPPresent);
+	pSettings->bModemEnabled = rSmapiInfo.bModemEnabled;
+	pSettings->usDspIrq = rSmapiInfo.usDspIRQ;
+	pSettings->usDspDma = rSmapiInfo.usDspDMA;
+	pSettings->usDspBaseIO = rSmapiInfo.usDspBaseIO;
+	pSettings->usUartIrq = rSmapiInfo.usUartIRQ;
+	pSettings->usUartBaseIO = rSmapiInfo.usUartBaseIO;
+
+	pSettings->uDStoreSize = TP_ABILITIES_DATA_SIZE;
+	pSettings->uIStoreSize = TP_ABILITIES_INST_SIZE;
+	pSettings->uIps = TP_ABILITIES_INTS_PER_SEC;
+
+	if (pSettings->bDSPEnabled && pSettings->bModemEnabled && pSettings->usDspIrq == pSettings->usUartIrq) {
+		pBDData->bShareDspIrq = pBDData->bShareUartIrq = 1;
+	} else {
+		pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
+	}
+
+	PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
+
+	return 0;
+}
+
+
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+	struct resource *pres;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
+
+	pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
+	if ( pres == NULL ) retval = -EIO;
+
+	if (retval) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
+		retval = -EIO;
+	}
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
+
+	return retval;
+}
+
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+
+	release_region(pSettings->usDspBaseIO & (~3), 16);
+
+	if (pSettings->bInterruptClaimed) {
+		free_irq(pSettings->usDspIrq, NULL);
+		pSettings->bInterruptClaimed = false;
+	}
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+{
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+	bool bDSPPoweredUp = false, bInterruptAllocated = false;
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
+
+	if (pBDData->bDSPEnabled) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+		goto exit_cleanup;
+	}
+
+	if (!pSettings->bDSPEnabled) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+		goto exit_cleanup;
+	}
+
+	if (
+		(pSettings->usDspIrq >= s_numIrqs)
+		|| (pSettings->usDspDma >= s_numDmas)
+		|| (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
+		|| (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
+	) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+		goto exit_cleanup;
+	}
+
+	if (
+		((pSettings->usDspBaseIO & 0xF00F) != 0)
+		|| (pSettings->usDspBaseIO & 0x0FF0) == 0
+	) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+		goto exit_cleanup;
+	}
+
+	if (pSettings->bModemEnabled) {
+		if (
+			pSettings->usUartIrq >= s_numIrqs
+			|| s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
+		) {
+			PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+			goto exit_cleanup;
+		}
+		switch (pSettings->usUartBaseIO) {
+			case 0x03F8:
+			case 0x02F8:
+			case 0x03E8:
+			case 0x02E8:
+				break;
+
+			default:
+				PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+				goto exit_cleanup;
+		}
+	}
+
+	pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = true;
+	pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = true;
+
+	if (pBDData->bShareDspIrq) {
+		pSettings->bDspIrqActiveLow = false;
+	}
+	if (pBDData->bShareUartIrq) {
+		pSettings->bUartIrqActiveLow = false;
+	}
+
+	pSettings->usNumTransfers = TP_CFG_NumTransfers;
+	pSettings->usReRequest = TP_CFG_RerequestTimer;
+	pSettings->bEnableMEMCS16 = TP_CFG_MEMCS16;
+	pSettings->usIsaMemCmdWidth = TP_CFG_IsaMemCmdWidth;
+	pSettings->bGateIOCHRDY = TP_CFG_GateIOCHRDY;
+	pSettings->bEnablePwrMgmt = TP_CFG_EnablePwrMgmt;
+	pSettings->usHBusTimerLoadValue = TP_CFG_HBusTimerValue;
+	pSettings->bDisableLBusTimeout = TP_CFG_DisableLBusTimeout;
+	pSettings->usN_Divisor = TP_CFG_N_Divisor;
+	pSettings->usM_Multiplier = TP_CFG_M_Multiplier;
+	pSettings->bPllBypass = TP_CFG_PllBypass;
+	pSettings->usChipletEnable = TP_CFG_ChipletEnable;
+
+	if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+		goto exit_cleanup;
+	} else {		/* no conflict just release */
+		free_irq(pSettings->usUartIrq, NULL);
+	}
+
+	if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
+		PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+		goto exit_cleanup;
+	} else {
+		PRINTK_3(TRACE_TP3780I,
+			"tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
+			pSettings->usDspIrq, pBDData->bShareDspIrq);
+		bInterruptAllocated = true;
+		pSettings->bInterruptClaimed = true;
+	}
+
+	smapi_set_DSP_power_state(false);
+	if (smapi_set_DSP_power_state(true)) {
+		PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
+		goto exit_cleanup;
+	} else {
+		bDSPPoweredUp = true;
+	}
+
+	if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
+		PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+		goto exit_cleanup;
+	}
+
+	EnableSRAM(pBDData);
+
+	pBDData->bDSPEnabled = true;
+
+	PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
+
+	return 0;
+
+exit_cleanup:
+	PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+	if (bDSPPoweredUp)
+		smapi_set_DSP_power_state(false);
+	if (bInterruptAllocated) {
+		free_irq(pSettings->usDspIrq, NULL);
+		pSettings->bInterruptClaimed = false;
+	}
+	return -EIO;
+}
+
+
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+
+	if (pBDData->bDSPEnabled) {
+		dsp3780I_DisableDSP(&pBDData->rDspSettings);
+		if (pSettings->bInterruptClaimed) {
+			free_irq(pSettings->usDspIrq, NULL);
+			pSettings->bInterruptClaimed = false;
+		}
+		smapi_set_DSP_power_state(false);
+		pBDData->bDSPEnabled = false;
+	}
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
+		pBDData);
+
+	if (dsp3780I_Reset(pSettings) == 0) {
+		EnableSRAM(pBDData);
+	} else {
+		retval = -EIO;
+	}
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+
+	if (dsp3780I_Run(pSettings) == 0) {
+		// @BUG @TBD EnableSRAM(pBDData);
+	} else {
+		retval = -EIO;
+	}
+
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+{
+	int retval = 0;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
+
+	memset(pAbilities, 0, sizeof(*pAbilities));
+	/* fill out standard constant fields */
+	pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
+	pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
+	pAbilities->inst_size = pBDData->rDspSettings.uIStoreSize;
+	pAbilities->bus_dma_bw = pBDData->rDspSettings.uDmaBandwidth;
+
+	/* fill out dynamically determined fields */
+	pAbilities->component_list[0] = 0x00010000 | MW_ADC_MASK;
+	pAbilities->component_list[1] = 0x00010000 | MW_ACI_MASK;
+	pAbilities->component_list[2] = 0x00010000 | MW_AIC1_MASK;
+	pAbilities->component_list[3] = 0x00010000 | MW_AIC2_MASK;
+	pAbilities->component_list[4] = 0x00010000 | MW_CDDAC_MASK;
+	pAbilities->component_list[5] = 0x00010000 | MW_MIDI_MASK;
+	pAbilities->component_list[6] = 0x00010000 | MW_UART_MASK;
+	pAbilities->component_count = 7;
+
+	/* Fill out Mwave OS and BIOS task names */
+
+	memcpy(pAbilities->mwave_os_name, TP_ABILITIES_MWAVEOS_NAME,
+		sizeof(TP_ABILITIES_MWAVEOS_NAME));
+	memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
+		sizeof(TP_ABILITIES_BIOSTASK_NAME));
+
+	PRINTK_1(TRACE_TP3780I,
+		"tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
+
+	return retval;
+}
+
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+                               void __user *pvBuffer, unsigned int uCount,
+                               unsigned long ulDSPAddr)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	bool bRC = 0;
+
+	PRINTK_6(TRACE_TP3780I,
+		"tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+	if (pBDData->bDSPEnabled) {
+		switch (uOpcode) {
+		case IOCTL_MW_READ_DATA:
+			bRC = dsp3780I_ReadDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+			break;
+
+		case IOCTL_MW_READCLEAR_DATA:
+			bRC = dsp3780I_ReadAndClearDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+			break;
+
+		case IOCTL_MW_WRITE_DATA:
+			bRC = dsp3780I_WriteDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+			break;
+		}
+	}
+
+	retval = (bRC) ? -EIO : 0;
+	PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
+
+	return retval;
+}
+
+
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+                               void __user *pvBuffer, unsigned int uCount,
+                               unsigned long ulDSPAddr)
+{
+	int retval = 0;
+	DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+	unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+	bool bRC = 0;
+
+	PRINTK_6(TRACE_TP3780I,
+		"tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+		pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+	if (pBDData->bDSPEnabled) {
+		switch (uOpcode) {
+		case IOCTL_MW_READ_INST:
+			bRC = dsp3780I_ReadIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+			break;
+
+		case IOCTL_MW_WRITE_INST:
+			bRC = dsp3780I_WriteIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+			break;
+		}
+	}
+
+	retval = (bRC) ? -EIO : 0;
+
+	PRINTK_2(TRACE_TP3780I,
+		"tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
+
+	return retval;
+}
+
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
new file mode 100644
index 0000000..07685b6
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.h
@@ -0,0 +1,103 @@
+/*
+*
+* tp3780i.h -- declarations for tp3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*
+*
+* 10/23/2000 - Alpha Release
+*	First release to the public
+*/
+
+#ifndef _LINUX_TP3780I_H
+#define _LINUX_TP3780I_H
+
+#include <asm/io.h>
+#include "mwavepub.h"
+
+
+/* DSP abilities constants for 3780i based Thinkpads */
+#define TP_ABILITIES_INTS_PER_SEC       39160800
+#define TP_ABILITIES_DATA_SIZE          32768
+#define TP_ABILITIES_INST_SIZE          32768
+#define TP_ABILITIES_MWAVEOS_NAME       "mwaveos0700.dsp"
+#define TP_ABILITIES_BIOSTASK_NAME      "mwbio701.dsp"
+
+
+/* DSP configuration values for 3780i based Thinkpads */
+#define TP_CFG_NumTransfers     3	/* 16 transfers */
+#define TP_CFG_RerequestTimer   1	/* 2 usec */
+#define TP_CFG_MEMCS16          0	/* Disabled, 16-bit memory assumed */
+#define TP_CFG_IsaMemCmdWidth   3	/* 295 nsec (16-bit) */
+#define TP_CFG_GateIOCHRDY      0	/* No IOCHRDY gating */
+#define TP_CFG_EnablePwrMgmt    1	/* Enable low poser suspend/resume */
+#define TP_CFG_HBusTimerValue 255	/* HBus timer load value */
+#define TP_CFG_DisableLBusTimeout 0	/* Enable LBus timeout */
+#define TP_CFG_N_Divisor       32	/* Clock = 39.1608 Mhz */
+#define TP_CFG_M_Multiplier    37	/* " */
+#define TP_CFG_PllBypass        0	/* don't bypass */
+#define TP_CFG_ChipletEnable 0xFFFF	/* Enable all chiplets */
+
+typedef struct {
+	int bDSPEnabled;
+	int bShareDspIrq;
+	int bShareUartIrq;
+	DSP_3780I_CONFIG_SETTINGS rDspSettings;
+} THINKPAD_BD_DATA;
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
+int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+                               void __user *pvBuffer, unsigned int uCount,
+                               unsigned long ulDSPAddr);
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+                               void __user *pvBuffer, unsigned int uCount,
+                               unsigned long ulDSPAddr);
+
+
+#endif
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 0000000..2a91bf0
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,139 @@
+/* linux/drivers/char/nsc_gpio.c
+
+   National Semiconductor common GPIO device-file/VFS methods.
+   Allows a user space process to control the GPIO pins.
+
+   Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+   Copyright (c) 2005      Jim Cromie <jim.cromie@gmail.com>
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#define NAME "nsc_gpio"
+
+void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
+{
+	/* retrieve current config w/o changing it */
+	u32 config = amp->gpio_config(index, ~0, 0);
+
+	/* user requested via 'v' command, so its INFO */
+	dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
+		 index, config,
+		 (config & 1) ? "OE" : "TS",      /* output-enabled/tristate */
+		 (config & 2) ? "PP" : "OD",      /* push pull / open drain */
+		 (config & 4) ? "PUE" : "PUD",    /* pull up enabled/disabled */
+		 (config & 8) ? "LOCKED" : "",    /* locked / unlocked */
+		 (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
+		 (config & 32) ? "HI" : "LO",     /* trigger on rise/fall edge */
+		 (config & 64) ? "DEBOUNCE" : "", /* debounce */
+
+		 amp->gpio_get(index), amp->gpio_current(index));
+}
+
+ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+		       size_t len, loff_t *ppos)
+{
+	unsigned m = iminor(file_inode(file));
+	struct nsc_gpio_ops *amp = file->private_data;
+	struct device *dev = amp->dev;
+	size_t i;
+	int err = 0;
+
+	for (i = 0; i < len; ++i) {
+		char c;
+		if (get_user(c, data + i))
+			return -EFAULT;
+		switch (c) {
+		case '0':
+			amp->gpio_set(m, 0);
+			break;
+		case '1':
+			amp->gpio_set(m, 1);
+			break;
+		case 'O':
+			dev_dbg(dev, "GPIO%d output enabled\n", m);
+			amp->gpio_config(m, ~1, 1);
+			break;
+		case 'o':
+			dev_dbg(dev, "GPIO%d output disabled\n", m);
+			amp->gpio_config(m, ~1, 0);
+			break;
+		case 'T':
+			dev_dbg(dev, "GPIO%d output is push pull\n", m);
+			amp->gpio_config(m, ~2, 2);
+			break;
+		case 't':
+			dev_dbg(dev, "GPIO%d output is open drain\n", m);
+			amp->gpio_config(m, ~2, 0);
+			break;
+		case 'P':
+			dev_dbg(dev, "GPIO%d pull up enabled\n", m);
+			amp->gpio_config(m, ~4, 4);
+			break;
+		case 'p':
+			dev_dbg(dev, "GPIO%d pull up disabled\n", m);
+			amp->gpio_config(m, ~4, 0);
+			break;
+		case 'v':
+			/* View Current pin settings */
+			amp->gpio_dump(amp, m);
+			break;
+		case '\n':
+			/* end of settings string, do nothing */
+			break;
+		default:
+			dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
+				m, (int)c);
+			err++;
+		}
+	}
+	if (err)
+		return -EINVAL;	/* full string handled, report error */
+
+	return len;
+}
+
+ssize_t nsc_gpio_read(struct file *file, char __user * buf,
+		      size_t len, loff_t * ppos)
+{
+	unsigned m = iminor(file_inode(file));
+	int value;
+	struct nsc_gpio_ops *amp = file->private_data;
+
+	value = amp->gpio_get(m);
+	if (put_user(value ? '1' : '0', buf))
+		return -EFAULT;
+
+	return 1;
+}
+
+/* common file-ops routines for both scx200_gpio and pc87360_gpio */
+EXPORT_SYMBOL(nsc_gpio_write);
+EXPORT_SYMBOL(nsc_gpio_read);
+EXPORT_SYMBOL(nsc_gpio_dump);
+
+static int __init nsc_gpio_init(void)
+{
+	printk(KERN_DEBUG NAME " initializing\n");
+	return 0;
+}
+
+static void __exit nsc_gpio_cleanup(void)
+{
+	printk(KERN_DEBUG NAME " cleanup\n");
+}
+
+module_init(nsc_gpio_init);
+module_exit(nsc_gpio_cleanup);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
new file mode 100644
index 0000000..25264d6
--- /dev/null
+++ b/drivers/char/nvram.c
@@ -0,0 +1,699 @@
+/*
+ * CMOS/NV-RAM driver for Linux
+ *
+ * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ * idea by and with help from Richard Jelinek <rj@suse.de>
+ * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ *
+ * This driver allows you to access the contents of the non-volatile memory in
+ * the mc146818rtc.h real-time clock. This chip is built into all PCs and into
+ * many Atari machines. In the former it's called "CMOS-RAM", in the latter
+ * "NVRAM" (NV stands for non-volatile).
+ *
+ * The data are supplied as a (seekable) character device, /dev/nvram. The
+ * size of this file is dependent on the controller.  The usual size is 114,
+ * the number of freely available bytes in the memory (i.e., not used by the
+ * RTC itself).
+ *
+ * Checksums over the NVRAM contents are managed by this driver. In case of a
+ * bad checksum, reads and writes return -EIO. The checksum can be initialized
+ * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or
+ * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
+ * again; use with care!)
+ *
+ * This file also provides some functions for other parts of the kernel that
+ * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
+ * Obviously this can be used only if this driver is always configured into
+ * the kernel and is not a module. Since the functions are used by some Atari
+ * drivers, this is the case on the Atari.
+ *
+ *
+ * 	1.1	Cesar Barros: SMP locking fixes
+ * 		added changelog
+ * 	1.2	Erik Gilling: Cobalt Networks support
+ * 		Tim Hockin: general cleanup, Cobalt support
+ * 	1.3	Wim Van Sebroeck: convert PRINT_PROC to seq_file
+ */
+
+#define NVRAM_VERSION	"1.3"
+
+#include <linux/module.h>
+#include <linux/nvram.h>
+
+#define PC		1
+#define ATARI		2
+
+/* select machine configuration */
+#if defined(CONFIG_ATARI)
+#  define MACH ATARI
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__)  /* and ?? */
+#  define MACH PC
+#else
+#  error Cannot build nvram driver for this machine configuration.
+#endif
+
+#if MACH == PC
+
+/* RTC in a PC */
+#define CHECK_DRIVER_INIT()	1
+
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START	2
+#define PC_CKS_RANGE_END	31
+#define PC_CKS_LOC		32
+#define NVRAM_BYTES		(128-NVRAM_FIRST_BYTE)
+
+#define mach_check_checksum	pc_check_checksum
+#define mach_set_checksum	pc_set_checksum
+#define mach_proc_infos		pc_proc_infos
+
+#endif
+
+#if MACH == ATARI
+
+/* Special parameters for RTC in Atari machines */
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#define RTC_PORT(x)		(TT_RTC_BAS + 2*(x))
+#define CHECK_DRIVER_INIT()	(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
+
+#define NVRAM_BYTES		50
+
+/* On Ataris, the checksum is over all bytes except the checksum bytes
+ * themselves; these are at the very end */
+#define ATARI_CKS_RANGE_START	0
+#define ATARI_CKS_RANGE_END	47
+#define ATARI_CKS_LOC		48
+
+#define mach_check_checksum	atari_check_checksum
+#define mach_set_checksum	atari_set_checksum
+#define mach_proc_infos		atari_proc_infos
+
+#endif
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+
+
+static DEFINE_MUTEX(nvram_mutex);
+static DEFINE_SPINLOCK(nvram_state_lock);
+static int nvram_open_cnt;	/* #times opened */
+static int nvram_open_mode;	/* special open modes */
+#define NVRAM_WRITE		1 /* opened for writing (exclusive) */
+#define NVRAM_EXCL		2 /* opened with O_EXCL */
+
+static int mach_check_checksum(void);
+static void mach_set_checksum(void);
+
+#ifdef CONFIG_PROC_FS
+static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
+								void *offset);
+#endif
+
+/*
+ * These functions are provided to be called internally or by other parts of
+ * the kernel. It's up to the caller to ensure correct checksum before reading
+ * or after writing (needs to be done only once).
+ *
+ * It is worth noting that these functions all access bytes of general
+ * purpose memory in the NVRAM - that is to say, they all add the
+ * NVRAM_FIRST_BYTE offset.  Pass them offsets into NVRAM as if you did not
+ * know about the RTC cruft.
+ */
+
+unsigned char __nvram_read_byte(int i)
+{
+	return CMOS_READ(NVRAM_FIRST_BYTE + i);
+}
+EXPORT_SYMBOL(__nvram_read_byte);
+
+unsigned char nvram_read_byte(int i)
+{
+	unsigned long flags;
+	unsigned char c;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	c = __nvram_read_byte(i);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return c;
+}
+EXPORT_SYMBOL(nvram_read_byte);
+
+/* This races nicely with trying to read with checksum checking (nvram_read) */
+void __nvram_write_byte(unsigned char c, int i)
+{
+	CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
+}
+EXPORT_SYMBOL(__nvram_write_byte);
+
+void nvram_write_byte(unsigned char c, int i)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	__nvram_write_byte(c, i);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+}
+EXPORT_SYMBOL(nvram_write_byte);
+
+int __nvram_check_checksum(void)
+{
+	return mach_check_checksum();
+}
+EXPORT_SYMBOL(__nvram_check_checksum);
+
+int nvram_check_checksum(void)
+{
+	unsigned long flags;
+	int rv;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	rv = __nvram_check_checksum();
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return rv;
+}
+EXPORT_SYMBOL(nvram_check_checksum);
+
+static void __nvram_set_checksum(void)
+{
+	mach_set_checksum();
+}
+
+#if 0
+void nvram_set_checksum(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	__nvram_set_checksum();
+	spin_unlock_irqrestore(&rtc_lock, flags);
+}
+#endif  /*  0  */
+
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
+
+static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+{
+	return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+					NVRAM_BYTES);
+}
+
+static ssize_t nvram_read(struct file *file, char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	unsigned char contents[NVRAM_BYTES];
+	unsigned i = *ppos;
+	unsigned char *tmp;
+
+	spin_lock_irq(&rtc_lock);
+
+	if (!__nvram_check_checksum())
+		goto checksum_err;
+
+	for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
+		*tmp = __nvram_read_byte(i);
+
+	spin_unlock_irq(&rtc_lock);
+
+	if (copy_to_user(buf, contents, tmp - contents))
+		return -EFAULT;
+
+	*ppos = i;
+
+	return tmp - contents;
+
+checksum_err:
+	spin_unlock_irq(&rtc_lock);
+	return -EIO;
+}
+
+static ssize_t nvram_write(struct file *file, const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	unsigned char contents[NVRAM_BYTES];
+	unsigned i = *ppos;
+	unsigned char *tmp;
+
+	if (i >= NVRAM_BYTES)
+		return 0;	/* Past EOF */
+
+	if (count > NVRAM_BYTES - i)
+		count = NVRAM_BYTES - i;
+	if (count > NVRAM_BYTES)
+		return -EFAULT;	/* Can't happen, but prove it to gcc */
+
+	if (copy_from_user(contents, buf, count))
+		return -EFAULT;
+
+	spin_lock_irq(&rtc_lock);
+
+	if (!__nvram_check_checksum())
+		goto checksum_err;
+
+	for (tmp = contents; count--; ++i, ++tmp)
+		__nvram_write_byte(*tmp, i);
+
+	__nvram_set_checksum();
+
+	spin_unlock_irq(&rtc_lock);
+
+	*ppos = i;
+
+	return tmp - contents;
+
+checksum_err:
+	spin_unlock_irq(&rtc_lock);
+	return -EIO;
+}
+
+static long nvram_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	int i;
+
+	switch (cmd) {
+
+	case NVRAM_INIT:
+		/* initialize NVRAM contents and checksum */
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+
+		mutex_lock(&nvram_mutex);
+		spin_lock_irq(&rtc_lock);
+
+		for (i = 0; i < NVRAM_BYTES; ++i)
+			__nvram_write_byte(0, i);
+		__nvram_set_checksum();
+
+		spin_unlock_irq(&rtc_lock);
+		mutex_unlock(&nvram_mutex);
+		return 0;
+
+	case NVRAM_SETCKS:
+		/* just set checksum, contents unchanged (maybe useful after
+		 * checksum garbaged somehow...) */
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+
+		mutex_lock(&nvram_mutex);
+		spin_lock_irq(&rtc_lock);
+		__nvram_set_checksum();
+		spin_unlock_irq(&rtc_lock);
+		mutex_unlock(&nvram_mutex);
+		return 0;
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int nvram_open(struct inode *inode, struct file *file)
+{
+	spin_lock(&nvram_state_lock);
+
+	if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
+	    (nvram_open_mode & NVRAM_EXCL) ||
+	    ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
+		spin_unlock(&nvram_state_lock);
+		return -EBUSY;
+	}
+
+	if (file->f_flags & O_EXCL)
+		nvram_open_mode |= NVRAM_EXCL;
+	if (file->f_mode & FMODE_WRITE)
+		nvram_open_mode |= NVRAM_WRITE;
+	nvram_open_cnt++;
+
+	spin_unlock(&nvram_state_lock);
+
+	return 0;
+}
+
+static int nvram_release(struct inode *inode, struct file *file)
+{
+	spin_lock(&nvram_state_lock);
+
+	nvram_open_cnt--;
+
+	/* if only one instance is open, clear the EXCL bit */
+	if (nvram_open_mode & NVRAM_EXCL)
+		nvram_open_mode &= ~NVRAM_EXCL;
+	if (file->f_mode & FMODE_WRITE)
+		nvram_open_mode &= ~NVRAM_WRITE;
+
+	spin_unlock(&nvram_state_lock);
+
+	return 0;
+}
+
+#ifndef CONFIG_PROC_FS
+static int nvram_add_proc_fs(void)
+{
+	return 0;
+}
+
+#else
+
+static int nvram_proc_read(struct seq_file *seq, void *offset)
+{
+	unsigned char contents[NVRAM_BYTES];
+	int i = 0;
+
+	spin_lock_irq(&rtc_lock);
+	for (i = 0; i < NVRAM_BYTES; ++i)
+		contents[i] = __nvram_read_byte(i);
+	spin_unlock_irq(&rtc_lock);
+
+	mach_proc_infos(contents, seq, offset);
+
+	return 0;
+}
+
+static int nvram_add_proc_fs(void)
+{
+	if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read))
+		return -ENOMEM;
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+static const struct file_operations nvram_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= nvram_llseek,
+	.read		= nvram_read,
+	.write		= nvram_write,
+	.unlocked_ioctl	= nvram_ioctl,
+	.open		= nvram_open,
+	.release	= nvram_release,
+};
+
+static struct miscdevice nvram_dev = {
+	NVRAM_MINOR,
+	"nvram",
+	&nvram_fops
+};
+
+static int __init nvram_init(void)
+{
+	int ret;
+
+	/* First test whether the driver should init at all */
+	if (!CHECK_DRIVER_INIT())
+		return -ENODEV;
+
+	ret = misc_register(&nvram_dev);
+	if (ret) {
+		printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
+		    NVRAM_MINOR);
+		goto out;
+	}
+	ret = nvram_add_proc_fs();
+	if (ret) {
+		printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
+		goto outmisc;
+	}
+	ret = 0;
+	printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
+out:
+	return ret;
+outmisc:
+	misc_deregister(&nvram_dev);
+	goto out;
+}
+
+static void __exit nvram_cleanup_module(void)
+{
+	remove_proc_entry("driver/nvram", NULL);
+	misc_deregister(&nvram_dev);
+}
+
+module_init(nvram_init);
+module_exit(nvram_cleanup_module);
+
+/*
+ * Machine specific functions
+ */
+
+#if MACH == PC
+
+static int pc_check_checksum(void)
+{
+	int i;
+	unsigned short sum = 0;
+	unsigned short expect;
+
+	for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+		sum += __nvram_read_byte(i);
+	expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+	    __nvram_read_byte(PC_CKS_LOC+1);
+	return (sum & 0xffff) == expect;
+}
+
+static void pc_set_checksum(void)
+{
+	int i;
+	unsigned short sum = 0;
+
+	for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+		sum += __nvram_read_byte(i);
+	__nvram_write_byte(sum >> 8, PC_CKS_LOC);
+	__nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
+}
+
+#ifdef CONFIG_PROC_FS
+
+static const char * const floppy_types[] = {
+	"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
+	"3.5'' 2.88M", "3.5'' 2.88M"
+};
+
+static const char * const gfx_types[] = {
+	"EGA, VGA, ... (with BIOS)",
+	"CGA (40 cols)",
+	"CGA (80 cols)",
+	"monochrome",
+};
+
+static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
+								void *offset)
+{
+	int checksum;
+	int type;
+
+	spin_lock_irq(&rtc_lock);
+	checksum = __nvram_check_checksum();
+	spin_unlock_irq(&rtc_lock);
+
+	seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not ");
+
+	seq_printf(seq, "# floppies     : %d\n",
+	    (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0);
+	seq_printf(seq, "Floppy 0 type  : ");
+	type = nvram[2] >> 4;
+	if (type < ARRAY_SIZE(floppy_types))
+		seq_printf(seq, "%s\n", floppy_types[type]);
+	else
+		seq_printf(seq, "%d (unknown)\n", type);
+	seq_printf(seq, "Floppy 1 type  : ");
+	type = nvram[2] & 0x0f;
+	if (type < ARRAY_SIZE(floppy_types))
+		seq_printf(seq, "%s\n", floppy_types[type]);
+	else
+		seq_printf(seq, "%d (unknown)\n", type);
+
+	seq_printf(seq, "HD 0 type      : ");
+	type = nvram[4] >> 4;
+	if (type)
+		seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type);
+	else
+		seq_printf(seq, "none\n");
+
+	seq_printf(seq, "HD 1 type      : ");
+	type = nvram[4] & 0x0f;
+	if (type)
+		seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type);
+	else
+		seq_printf(seq, "none\n");
+
+	seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+	    nvram[18] | (nvram[19] << 8),
+	    nvram[20], nvram[25],
+	    nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8));
+	seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+	    nvram[39] | (nvram[40] << 8),
+	    nvram[41], nvram[46],
+	    nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8));
+
+	seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8));
+	seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n",
+	    nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8));
+
+	seq_printf(seq, "Gfx adapter    : %s\n",
+	    gfx_types[(nvram[6] >> 4) & 3]);
+
+	seq_printf(seq, "FPU            : %sinstalled\n",
+	    (nvram[6] & 2) ? "" : "not ");
+
+	return;
+}
+#endif
+
+#endif /* MACH == PC */
+
+#if MACH == ATARI
+
+static int atari_check_checksum(void)
+{
+	int i;
+	unsigned char sum = 0;
+
+	for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+		sum += __nvram_read_byte(i);
+	return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
+	    (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
+}
+
+static void atari_set_checksum(void)
+{
+	int i;
+	unsigned char sum = 0;
+
+	for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+		sum += __nvram_read_byte(i);
+	__nvram_write_byte(~sum, ATARI_CKS_LOC);
+	__nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+}
+
+#ifdef CONFIG_PROC_FS
+
+static struct {
+	unsigned char val;
+	const char *name;
+} boot_prefs[] = {
+	{ 0x80, "TOS" },
+	{ 0x40, "ASV" },
+	{ 0x20, "NetBSD (?)" },
+	{ 0x10, "Linux" },
+	{ 0x00, "unspecified" }
+};
+
+static const char * const languages[] = {
+	"English (US)",
+	"German",
+	"French",
+	"English (UK)",
+	"Spanish",
+	"Italian",
+	"6 (undefined)",
+	"Swiss (French)",
+	"Swiss (German)"
+};
+
+static const char * const dateformat[] = {
+	"MM%cDD%cYY",
+	"DD%cMM%cYY",
+	"YY%cMM%cDD",
+	"YY%cDD%cMM",
+	"4 (undefined)",
+	"5 (undefined)",
+	"6 (undefined)",
+	"7 (undefined)"
+};
+
+static const char * const colors[] = {
+	"2", "4", "16", "256", "65536", "??", "??", "??"
+};
+
+static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
+								void *offset)
+{
+	int checksum = nvram_check_checksum();
+	int i;
+	unsigned vmode;
+
+	seq_printf(seq, "Checksum status  : %svalid\n", checksum ? "" : "not ");
+
+	seq_printf(seq, "Boot preference  : ");
+	for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
+		if (nvram[1] == boot_prefs[i].val) {
+			seq_printf(seq, "%s\n", boot_prefs[i].name);
+			break;
+		}
+	}
+	if (i < 0)
+		seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
+
+	seq_printf(seq, "SCSI arbitration : %s\n",
+	    (nvram[16] & 0x80) ? "on" : "off");
+	seq_printf(seq, "SCSI host ID     : ");
+	if (nvram[16] & 0x80)
+		seq_printf(seq, "%d\n", nvram[16] & 7);
+	else
+		seq_printf(seq, "n/a\n");
+
+	/* the following entries are defined only for the Falcon */
+	if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
+		return;
+
+	seq_printf(seq, "OS language      : ");
+	if (nvram[6] < ARRAY_SIZE(languages))
+		seq_printf(seq, "%s\n", languages[nvram[6]]);
+	else
+		seq_printf(seq, "%u (undefined)\n", nvram[6]);
+	seq_printf(seq, "Keyboard language: ");
+	if (nvram[7] < ARRAY_SIZE(languages))
+		seq_printf(seq, "%s\n", languages[nvram[7]]);
+	else
+		seq_printf(seq, "%u (undefined)\n", nvram[7]);
+	seq_printf(seq, "Date format      : ");
+	seq_printf(seq, dateformat[nvram[8] & 7],
+	    nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
+	seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
+	seq_printf(seq, "Boot delay       : ");
+	if (nvram[10] == 0)
+		seq_printf(seq, "default");
+	else
+		seq_printf(seq, "%ds%s\n", nvram[10],
+		    nvram[10] < 8 ? ", no memory test" : "");
+
+	vmode = (nvram[14] << 8) | nvram[15];
+	seq_printf(seq,
+	    "Video mode       : %s colors, %d columns, %s %s monitor\n",
+	    colors[vmode & 7],
+	    vmode & 8 ? 80 : 40,
+	    vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
+	seq_printf(seq, "                   %soverscan, compat. mode %s%s\n",
+	    vmode & 64 ? "" : "no ",
+	    vmode & 128 ? "on" : "off",
+	    vmode & 256 ?
+	    (vmode & 16 ? ", line doubling" : ", half screen") : "");
+
+	return;
+}
+#endif
+
+#endif /* MACH == ATARI */
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
new file mode 100644
index 0000000..a7113b7
--- /dev/null
+++ b/drivers/char/nwbutton.c
@@ -0,0 +1,246 @@
+/*
+ * 	NetWinder Button Driver-
+ *	Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#define __NWBUTTON_C		/* Tell the header file who we are */
+#include "nwbutton.h"
+
+static void button_sequence_finished(struct timer_list *unused);
+
+static int button_press_count;		/* The count of button presses */
+/* Times for the end of a sequence */
+static DEFINE_TIMER(button_timer, button_sequence_finished);
+static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */
+static char button_output_buffer[32];	/* Stores data to write out of device */
+static int bcount;			/* The number of bytes in the buffer */
+static int bdelay = BUTTON_DELAY;	/* The delay, in jiffies */
+static struct button_callback button_callback_list[32]; /* The callback list */
+static int callback_count;		/* The number of callbacks registered */
+static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */
+
+/*
+ * This function is called by other drivers to register a callback function
+ * to be called when a particular number of button presses occurs.
+ * The callback list is a static array of 32 entries (I somehow doubt many
+ * people are ever going to want to register more than 32 different actions
+ * to be performed by the kernel on different numbers of button presses ;).
+ * However, if an attempt to register a 33rd entry (perhaps a stuck loop
+ * somewhere registering the same entry over and over?) it will fail to
+ * do so and return -ENOMEM. If an attempt is made to register a null pointer,
+ * it will fail to do so and return -EINVAL.
+ * Because callbacks can be unregistered at random the list can become
+ * fragmented, so we need to search through the list until we find the first
+ * free entry.
+ *
+ * FIXME: Has anyone spotted any locking functions int his code recently ??
+ */
+
+int button_add_callback (void (*callback) (void), int count)
+{
+	int lp = 0;
+	if (callback_count == 32) {
+		return -ENOMEM;
+	}
+	if (!callback) {
+		return -EINVAL;
+	}
+	callback_count++;
+	for (; (button_callback_list [lp].callback); lp++);
+	button_callback_list [lp].callback = callback;
+	button_callback_list [lp].count = count;
+	return 0;
+}
+
+/*
+ * This function is called by other drivers to deregister a callback function.
+ * If you attempt to unregister a callback which does not exist, it will fail
+ * with -EINVAL. If there is more than one entry with the same address,
+ * because it searches the list from end to beginning, it will unregister the
+ * last one to be registered first (FILO- First In Last Out).
+ * Note that this is not necessarily true if the entries are not submitted
+ * at the same time, because another driver could have unregistered a callback
+ * between the submissions creating a gap earlier in the list, which would
+ * be filled first at submission time.
+ */
+
+int button_del_callback (void (*callback) (void))
+{
+	int lp = 31;
+	if (!callback) {
+		return -EINVAL;
+	}
+	while (lp >= 0) {
+		if ((button_callback_list [lp].callback) == callback) {
+			button_callback_list [lp].callback = NULL;
+			button_callback_list [lp].count = 0;
+			callback_count--;
+			return 0;
+		}
+		lp--;
+	}
+	return -EINVAL;
+}
+
+/*
+ * This function is called by button_sequence_finished to search through the
+ * list of callback functions, and call any of them whose count argument
+ * matches the current count of button presses. It starts at the beginning
+ * of the list and works up to the end. It will refuse to follow a null
+ * pointer (which should never happen anyway).
+ */
+
+static void button_consume_callbacks (int bpcount)
+{
+	int lp = 0;
+	for (; lp <= 31; lp++) {
+		if ((button_callback_list [lp].count) == bpcount) {
+			if (button_callback_list [lp].callback) {
+				button_callback_list[lp].callback();
+			}
+		}
+	}
+}
+
+/* 
+ * This function is called when the button_timer times out.
+ * ie. When you don't press the button for bdelay jiffies, this is taken to
+ * mean you have ended the sequence of key presses, and this function is
+ * called to wind things up (write the press_count out to /dev/button, call
+ * any matching registered function callbacks, initiate reboot, etc.).
+ */
+
+static void button_sequence_finished(struct timer_list *unused)
+{
+	if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+	    button_press_count == reboot_count)
+		kill_cad_pid(SIGINT, 1);	/* Ask init to reboot us */
+	button_consume_callbacks (button_press_count);
+	bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
+	button_press_count = 0;		/* Reset the button press counter */
+	wake_up_interruptible (&button_wait_queue);
+}
+
+/* 
+ *  This handler is called when the orange button is pressed (GPIO 10 of the
+ *  SuperIO chip, which maps to logical IRQ 26). If the press_count is 0,
+ *  this is the first press, so it starts a timer and increments the counter.
+ *  If it is higher than 0, it deletes the old timer, starts a new one, and
+ *  increments the counter.
+ */ 
+
+static irqreturn_t button_handler (int irq, void *dev_id)
+{
+	button_press_count++;
+	mod_timer(&button_timer, jiffies + bdelay);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * This function is called when a user space program attempts to read
+ * /dev/nwbutton. It puts the device to sleep on the wait queue until
+ * button_sequence_finished writes some data to the buffer and flushes
+ * the queue, at which point it writes the data out to the device and
+ * returns the number of characters it has written. This function is
+ * reentrant, so that many processes can be attempting to read from the
+ * device at any one time.
+ */
+
+static int button_read (struct file *filp, char __user *buffer,
+			size_t count, loff_t *ppos)
+{
+	DEFINE_WAIT(wait);
+	prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE);
+	schedule();
+	finish_wait(&button_wait_queue, &wait);
+	return (copy_to_user (buffer, &button_output_buffer, bcount))
+		 ? -EFAULT : bcount;
+}
+
+/* 
+ * This structure is the file operations structure, which specifies what
+ * callbacks functions the kernel should call when a user mode process
+ * attempts to perform these operations on the device.
+ */
+
+static const struct file_operations button_fops = {
+	.owner		= THIS_MODULE,
+	.read		= button_read,
+	.llseek		= noop_llseek,
+};
+
+/* 
+ * This structure is the misc device structure, which specifies the minor
+ * device number (158 in this case), the name of the device (for /proc/misc),
+ * and the address of the above file operations structure.
+ */
+
+static struct miscdevice button_misc_device = {
+	BUTTON_MINOR,
+	"nwbutton",
+	&button_fops,
+};
+
+/*
+ * This function is called to initialise the driver, either from misc.c at
+ * bootup if the driver is compiled into the kernel, or from init_module
+ * below at module insert time. It attempts to register the device node
+ * and the IRQ and fails with a warning message if either fails, though
+ * neither ever should because the device number and IRQ are unique to
+ * this driver.
+ */
+
+static int __init nwbutton_init(void)
+{
+	if (!machine_is_netwinder())
+		return -ENODEV;
+
+	printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden "
+			"<alex@linuxhacker.org> 1998.\n", VERSION);
+
+	if (misc_register (&button_misc_device)) {
+		printk (KERN_WARNING "nwbutton: Couldn't register device 10, "
+				"%d.\n", BUTTON_MINOR);
+		return -EBUSY;
+	}
+
+	if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, 0,
+			"nwbutton", NULL)) {
+		printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n",
+				IRQ_NETWINDER_BUTTON);
+		misc_deregister (&button_misc_device);
+		return -EIO;
+	}
+	return 0;
+}
+
+static void __exit nwbutton_exit (void) 
+{
+	free_irq (IRQ_NETWINDER_BUTTON, NULL);
+	misc_deregister (&button_misc_device);
+}
+
+
+MODULE_AUTHOR("Alex Holden");
+MODULE_LICENSE("GPL");
+
+module_init(nwbutton_init);
+module_exit(nwbutton_exit);
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
new file mode 100644
index 0000000..9dedfd7
--- /dev/null
+++ b/drivers/char/nwbutton.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NWBUTTON_H
+#define __NWBUTTON_H
+
+/*
+ * 	NetWinder Button Driver-
+ *	Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ */
+
+#ifdef __NWBUTTON_C	/* Actually compiling the driver itself */
+
+/* Various defines: */
+
+#define NUM_PRESSES_REBOOT 2	/* How many presses to activate shutdown */
+#define BUTTON_DELAY 30 	/* How many jiffies for sequence to end */
+#define VERSION "0.3"		/* Driver version number */
+#define BUTTON_MINOR 158	/* Major 10, Minor 158, /dev/nwbutton */
+
+/* Structure definitions: */
+
+struct button_callback {
+	void (*callback) (void);
+	int count;
+};
+
+/* Function prototypes: */
+
+static void button_sequence_finished(struct timer_list *unused);
+static irqreturn_t button_handler (int irq, void *dev_id);
+int button_init (void);
+int button_add_callback (void (*callback) (void), int count);
+int button_del_callback (void (*callback) (void));
+static void button_consume_callbacks (int bpcount);
+
+#else /* Not compiling the driver itself */
+
+extern int button_add_callback (void (*callback) (void), int count);
+extern int button_del_callback (void (*callback) (void));
+
+#endif /* __NWBUTTON_C */
+#endif /* __NWBUTTON_H */
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
new file mode 100644
index 0000000..a284ae2
--- /dev/null
+++ b/drivers/char/nwflash.c
@@ -0,0 +1,625 @@
+/*
+ * Flash memory interface rev.5 driver for the Intel
+ * Flash chips used on the NetWinder.
+ *
+ * 20/08/2000	RMK	use __ioremap to map flash into virtual memory
+ *			make a few more places use "volatile"
+ * 22/05/2001	RMK	- Lock read against write
+ *			- merge printk level changes (with mods) from Alan Cox.
+ *			- use *ppos as the file position, not file->f_pos.
+ *			- fix check for out of range pos and r/w size
+ *
+ * Please note that we are tampering with the only flash chip in the
+ * machine, which contains the bootup code.  We therefore have the
+ * power to convert these machines into doorstops...
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+
+#include <asm/hardware/dec21285.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <linux/uaccess.h>
+
+/*****************************************************************************/
+#include <asm/nwflash.h>
+
+#define	NWFLASH_VERSION "6.4"
+
+static DEFINE_MUTEX(flash_mutex);
+static void kick_open(void);
+static int get_flash_id(void);
+static int erase_block(int nBlock);
+static int write_block(unsigned long p, const char __user *buf, int count);
+
+#define KFLASH_SIZE	1024*1024	//1 Meg
+#define KFLASH_SIZE4	4*1024*1024	//4 Meg
+#define KFLASH_ID	0x89A6		//Intel flash
+#define KFLASH_ID4	0xB0D4		//Intel flash 4Meg
+
+static bool flashdebug;		//if set - we will display progress msgs
+
+static int gbWriteEnable;
+static int gbWriteBase64Enable;
+static volatile unsigned char *FLASH_BASE;
+static int gbFlashSize = KFLASH_SIZE;
+static DEFINE_MUTEX(nwflash_mutex);
+
+static int get_flash_id(void)
+{
+	volatile unsigned int c1, c2;
+
+	/*
+	 * try to get flash chip ID
+	 */
+	kick_open();
+	c2 = inb(0x80);
+	*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90;
+	udelay(15);
+	c1 = *(volatile unsigned char *) FLASH_BASE;
+	c2 = inb(0x80);
+
+	/*
+	 * on 4 Meg flash the second byte is actually at offset 2...
+	 */
+	if (c1 == 0xB0)
+		c2 = *(volatile unsigned char *) (FLASH_BASE + 2);
+	else
+		c2 = *(volatile unsigned char *) (FLASH_BASE + 1);
+
+	c2 += (c1 << 8);
+
+	/*
+	 * set it back to read mode
+	 */
+	*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+	if (c2 == KFLASH_ID4)
+		gbFlashSize = KFLASH_SIZE4;
+
+	return c2;
+}
+
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	mutex_lock(&flash_mutex);
+	switch (cmd) {
+	case CMD_WRITE_DISABLE:
+		gbWriteBase64Enable = 0;
+		gbWriteEnable = 0;
+		break;
+
+	case CMD_WRITE_ENABLE:
+		gbWriteEnable = 1;
+		break;
+
+	case CMD_WRITE_BASE64K_ENABLE:
+		gbWriteBase64Enable = 1;
+		break;
+
+	default:
+		gbWriteBase64Enable = 0;
+		gbWriteEnable = 0;
+		mutex_unlock(&flash_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&flash_mutex);
+	return 0;
+}
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
+			  loff_t *ppos)
+{
+	ssize_t ret;
+
+	if (flashdebug)
+		printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
+		       "buffer=%p, count=0x%zx.\n", *ppos, buf, size);
+	/*
+	 * We now lock against reads and writes. --rmk
+	 */
+	if (mutex_lock_interruptible(&nwflash_mutex))
+		return -ERESTARTSYS;
+
+	ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
+	mutex_unlock(&nwflash_mutex);
+
+	return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+			   size_t size, loff_t * ppos)
+{
+	unsigned long p = *ppos;
+	unsigned int count = size;
+	int written;
+	int nBlock, temp, rc;
+	int i, j;
+
+	if (flashdebug)
+		printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n",
+		       p, buf, count);
+
+	if (!gbWriteEnable)
+		return -EINVAL;
+
+	if (p < 64 * 1024 && (!gbWriteBase64Enable))
+		return -EINVAL;
+
+	/*
+	 * check for out of range pos or count
+	 */
+	if (p >= gbFlashSize)
+		return count ? -ENXIO : 0;
+
+	if (count > gbFlashSize - p)
+		count = gbFlashSize - p;
+			
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+
+	/*
+	 * We now lock against reads and writes. --rmk
+	 */
+	if (mutex_lock_interruptible(&nwflash_mutex))
+		return -ERESTARTSYS;
+
+	written = 0;
+
+	nBlock = (int) p >> 16;	//block # of 64K bytes
+
+	/*
+	 * # of 64K blocks to erase and write
+	 */
+	temp = ((int) (p + count) >> 16) - nBlock + 1;
+
+	/*
+	 * write ends at exactly 64k boundary?
+	 */
+	if (((int) (p + count) & 0xFFFF) == 0)
+		temp -= 1;
+
+	if (flashdebug)
+		printk(KERN_DEBUG "flash_write: writing %d block(s) "
+			"starting at %d.\n", temp, nBlock);
+
+	for (; temp; temp--, nBlock++) {
+		if (flashdebug)
+			printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock);
+
+		/*
+		 * first we have to erase the block(s), where we will write...
+		 */
+		i = 0;
+		j = 0;
+	  RetryBlock:
+		do {
+			rc = erase_block(nBlock);
+			i++;
+		} while (rc && i < 10);
+
+		if (rc) {
+			printk(KERN_ERR "flash_write: erase error %x\n", rc);
+			break;
+		}
+		if (flashdebug)
+			printk(KERN_DEBUG "flash_write: writing offset %lX, "
+			       "from buf %p, bytes left %X.\n", p, buf,
+			       count - written);
+
+		/*
+		 * write_block will limit write to space left in this block
+		 */
+		rc = write_block(p, buf, count - written);
+		j++;
+
+		/*
+		 * if somehow write verify failed? Can't happen??
+		 */
+		if (!rc) {
+			/*
+			 * retry up to 10 times
+			 */
+			if (j < 10)
+				goto RetryBlock;
+			else
+				/*
+				 * else quit with error...
+				 */
+				rc = -1;
+
+		}
+		if (rc < 0) {
+			printk(KERN_ERR "flash_write: write error %X\n", rc);
+			break;
+		}
+		p += rc;
+		buf += rc;
+		written += rc;
+		*ppos += rc;
+
+		if (flashdebug)
+			printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
+	}
+
+	mutex_unlock(&nwflash_mutex);
+
+	return written;
+}
+
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t flash_llseek(struct file *file, loff_t offset, int orig)
+{
+	loff_t ret;
+
+	mutex_lock(&flash_mutex);
+	if (flashdebug)
+		printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n",
+		       (unsigned int) offset, orig);
+
+	ret = no_seek_end_llseek_size(file, offset, orig, gbFlashSize);
+	mutex_unlock(&flash_mutex);
+	return ret;
+}
+
+
+/*
+ * assume that main Write routine did the parameter checking...
+ * so just go ahead and erase, what requested!
+ */
+
+static int erase_block(int nBlock)
+{
+	volatile unsigned int c1;
+	volatile unsigned char *pWritePtr;
+	unsigned long timeout;
+	int temp, temp1;
+
+	/*
+	 * reset footbridge to the correct offset 0 (...0..3)
+	 */
+	*CSR_ROMWRITEREG = 0;
+
+	/*
+	 * dummy ROM read
+	 */
+	c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+	kick_open();
+	/*
+	 * reset status if old errors
+	 */
+	*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+	/*
+	 * erase a block...
+	 * aim at the middle of a current block...
+	 */
+	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16)));
+	/*
+	 * dummy read
+	 */
+	c1 = *pWritePtr;
+
+	kick_open();
+	/*
+	 * erase
+	 */
+	*(volatile unsigned char *) pWritePtr = 0x20;
+
+	/*
+	 * confirm
+	 */
+	*(volatile unsigned char *) pWritePtr = 0xD0;
+
+	/*
+	 * wait 10 ms
+	 */
+	msleep(10);
+
+	/*
+	 * wait while erasing in process (up to 10 sec)
+	 */
+	timeout = jiffies + 10 * HZ;
+	c1 = 0;
+	while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
+		msleep(10);
+		/*
+		 * read any address
+		 */
+		c1 = *(volatile unsigned char *) (pWritePtr);
+		//              printk("Flash_erase: status=%X.\n",c1);
+	}
+
+	/*
+	 * set flash for normal read access
+	 */
+	kick_open();
+//      *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF;
+	*(volatile unsigned char *) pWritePtr = 0xFF;	//back to normal operation
+
+	/*
+	 * check if erase errors were reported
+	 */
+	if (c1 & 0x20) {
+		printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr);
+
+		/*
+		 * reset error
+		 */
+		*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+		return -2;
+	}
+
+	/*
+	 * just to make sure - verify if erased OK...
+	 */
+	msleep(10);
+
+	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16)));
+
+	for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) {
+		if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) {
+			printk(KERN_ERR "flash_erase: verify err at %p = %X\n",
+			       pWritePtr, temp1);
+			return -1;
+		}
+	}
+
+	return 0;
+
+}
+
+/*
+ * write_block will limit number of bytes written to the space in this block
+ */
+static int write_block(unsigned long p, const char __user *buf, int count)
+{
+	volatile unsigned int c1;
+	volatile unsigned int c2;
+	unsigned char *pWritePtr;
+	unsigned int uAddress;
+	unsigned int offset;
+	unsigned long timeout;
+	unsigned long timeout1;
+
+	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+	/*
+	 * check if write will end in this block....
+	 */
+	offset = p & 0xFFFF;
+
+	if (offset + count > 0x10000)
+		count = 0x10000 - offset;
+
+	/*
+	 * wait up to 30 sec for this block
+	 */
+	timeout = jiffies + 30 * HZ;
+
+	for (offset = 0; offset < count; offset++, pWritePtr++) {
+		uAddress = (unsigned int) pWritePtr;
+		uAddress &= 0xFFFFFFFC;
+		if (__get_user(c2, buf + offset))
+			return -EFAULT;
+
+	  WriteRetry:
+	  	/*
+	  	 * dummy read
+	  	 */
+		c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+		/*
+		 * kick open the write gate
+		 */
+		kick_open();
+
+		/*
+		 * program footbridge to the correct offset...0..3
+		 */
+		*CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3;
+
+		/*
+		 * write cmd
+		 */
+		*(volatile unsigned char *) (uAddress) = 0x40;
+
+		/*
+		 * data to write
+		 */
+		*(volatile unsigned char *) (uAddress) = c2;
+
+		/*
+		 * get status
+		 */
+		*(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70;
+
+		c1 = 0;
+
+		/*
+		 * wait up to 1 sec for this byte
+		 */
+		timeout1 = jiffies + 1 * HZ;
+
+		/*
+		 * while not ready...
+		 */
+		while (!(c1 & 0x80) && time_before(jiffies, timeout1))
+			c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+		/*
+		 * if timeout getting status
+		 */
+		if (time_after_eq(jiffies, timeout1)) {
+			kick_open();
+			/*
+			 * reset err
+			 */
+			*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+			goto WriteRetry;
+		}
+		/*
+		 * switch on read access, as a default flash operation mode
+		 */
+		kick_open();
+		/*
+		 * read access
+		 */
+		*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+		/*
+		 * if hardware reports an error writing, and not timeout - 
+		 * reset the chip and retry
+		 */
+		if (c1 & 0x10) {
+			kick_open();
+			/*
+			 * reset err
+			 */
+			*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+			/*
+			 * before timeout?
+			 */
+			if (time_before(jiffies, timeout)) {
+				if (flashdebug)
+					printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n",
+					       pWritePtr - FLASH_BASE);
+
+				/*
+				 * wait couple ms
+				 */
+				msleep(10);
+
+				goto WriteRetry;
+			} else {
+				printk(KERN_ERR "write_block: timeout at 0x%X\n",
+				       pWritePtr - FLASH_BASE);
+				/*
+				 * return error -2
+				 */
+				return -2;
+
+			}
+		}
+	}
+
+	msleep(10);
+
+	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+	for (offset = 0; offset < count; offset++) {
+		char c, c1;
+		if (__get_user(c, buf))
+			return -EFAULT;
+		buf++;
+		if ((c1 = *pWritePtr++) != c) {
+			printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n",
+			       pWritePtr - FLASH_BASE, c1, c);
+			return 0;
+		}
+	}
+
+	return count;
+}
+
+
+static void kick_open(void)
+{
+	unsigned long flags;
+
+	/*
+	 * we want to write a bit pattern XXX1 to Xilinx to enable
+	 * the write gate, which will be open for about the next 2ms.
+	 */
+	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+	nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+
+	/*
+	 * let the ISA bus to catch on...
+	 */
+	udelay(25);
+}
+
+static const struct file_operations flash_fops =
+{
+	.owner		= THIS_MODULE,
+	.llseek		= flash_llseek,
+	.read		= flash_read,
+	.write		= flash_write,
+	.unlocked_ioctl	= flash_ioctl,
+};
+
+static struct miscdevice flash_miscdev =
+{
+	FLASH_MINOR,
+	"nwflash",
+	&flash_fops
+};
+
+static int __init nwflash_init(void)
+{
+	int ret = -ENODEV;
+
+	if (machine_is_netwinder()) {
+		int id;
+
+		FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4);
+		if (!FLASH_BASE)
+			goto out;
+
+		id = get_flash_id();
+		if ((id != KFLASH_ID) && (id != KFLASH_ID4)) {
+			ret = -ENXIO;
+			iounmap((void *)FLASH_BASE);
+			printk("Flash: incorrect ID 0x%04X.\n", id);
+			goto out;
+		}
+
+		printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n",
+		       NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024));
+
+		ret = misc_register(&flash_miscdev);
+		if (ret < 0) {
+			iounmap((void *)FLASH_BASE);
+		}
+	}
+out:
+	return ret;
+}
+
+static void __exit nwflash_exit(void)
+{
+	misc_deregister(&flash_miscdev);
+	iounmap((void *)FLASH_BASE);
+}
+
+MODULE_LICENSE("GPL");
+
+module_param(flashdebug, bool, 0644);
+
+module_init(nwflash_init);
+module_exit(nwflash_exit);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 0000000..5f4be88
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,352 @@
+/* linux/drivers/char/pc8736x_gpio.c
+
+   National Semiconductor PC8736x GPIO driver.  Allows a user space
+   process to play with the GPIO pins.
+
+   Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com>
+
+   adapted from linux/drivers/char/scx200_gpio.c
+   Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#define DEVNAME "pc8736x_gpio"
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major;		/* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static DEFINE_MUTEX(pc8736x_gpio_config_lock);
+static unsigned pc8736x_gpio_base;
+static u8 pc8736x_gpio_shadow[4];
+
+#define SIO_BASE1       0x2E	/* 1st command-reg to check */
+#define SIO_BASE2       0x4E	/* alt command-reg to check */
+
+#define SIO_SID		0x20	/* SuperI/O ID Register */
+#define SIO_SID_PC87365	0xe5	/* Expected value in ID Register for PC87365 */
+#define SIO_SID_PC87366	0xe9	/* Expected value in ID Register for PC87366 */
+
+#define SIO_CF1		0x21	/* chip config, bit0 is chip enable */
+
+#define PC8736X_GPIO_RANGE	16 /* ioaddr range */
+#define PC8736X_GPIO_CT		32 /* minors matching 4 8 bit ports */
+
+#define SIO_UNIT_SEL	0x7	/* unit select reg */
+#define SIO_UNIT_ACT	0x30	/* unit enable */
+#define SIO_GPIO_UNIT	0x7	/* unit number of GPIO */
+#define SIO_VLM_UNIT	0x0D
+#define SIO_TMS_UNIT	0x0E
+
+/* config-space addrs to read/write each unit's runtime addr */
+#define SIO_BASE_HADDR		0x60
+#define SIO_BASE_LADDR		0x61
+
+/* GPIO config-space pin-control addresses */
+#define SIO_GPIO_PIN_SELECT	0xF0
+#define SIO_GPIO_PIN_CONFIG     0xF1
+#define SIO_GPIO_PIN_EVENT      0xF2
+
+static unsigned char superio_cmd = 0;
+static unsigned char selected_device = 0xFF;	/* bogus start val */
+
+/* GPIO port runtime access, functionality */
+static int port_offset[] = { 0, 4, 8, 10 };	/* non-uniform offsets ! */
+/* static int event_capable[] = { 1, 1, 0, 0 };   ports 2,3 are hobbled */
+
+#define PORT_OUT	0
+#define PORT_IN		1
+#define PORT_EVT_EN	2
+#define PORT_EVT_STST	3
+
+static struct platform_device *pdev;  /* use in dev_*() */
+
+static inline void superio_outb(int addr, int val)
+{
+	outb_p(addr, superio_cmd);
+	outb_p(val, superio_cmd + 1);
+}
+
+static inline int superio_inb(int addr)
+{
+	outb_p(addr, superio_cmd);
+	return inb_p(superio_cmd + 1);
+}
+
+static int pc8736x_superio_present(void)
+{
+	int id;
+
+	/* try the 2 possible values, read a hardware reg to verify */
+	superio_cmd = SIO_BASE1;
+	id = superio_inb(SIO_SID);
+	if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+		return superio_cmd;
+
+	superio_cmd = SIO_BASE2;
+	id = superio_inb(SIO_SID);
+	if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+		return superio_cmd;
+
+	return 0;
+}
+
+static void device_select(unsigned devldn)
+{
+	superio_outb(SIO_UNIT_SEL, devldn);
+	selected_device = devldn;
+}
+
+static void select_pin(unsigned iminor)
+{
+	/* select GPIO port/pin from device minor number */
+	device_select(SIO_GPIO_UNIT);
+	superio_outb(SIO_GPIO_PIN_SELECT,
+		     ((iminor << 1) & 0xF0) | (iminor & 0x7));
+}
+
+static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
+					    u32 func_slct)
+{
+	u32 config, new_config;
+
+	mutex_lock(&pc8736x_gpio_config_lock);
+
+	device_select(SIO_GPIO_UNIT);
+	select_pin(index);
+
+	/* read current config value */
+	config = superio_inb(func_slct);
+
+	/* set new config */
+	new_config = (config & mask) | bits;
+	superio_outb(func_slct, new_config);
+
+	mutex_unlock(&pc8736x_gpio_config_lock);
+
+	return config;
+}
+
+static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+	return pc8736x_gpio_configure_fn(index, mask, bits,
+					 SIO_GPIO_PIN_CONFIG);
+}
+
+static int pc8736x_gpio_get(unsigned minor)
+{
+	int port, bit, val;
+
+	port = minor >> 3;
+	bit = minor & 7;
+	val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+	val >>= bit;
+	val &= 1;
+
+	dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
+		minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
+		val);
+
+	return val;
+}
+
+static void pc8736x_gpio_set(unsigned minor, int val)
+{
+	int port, bit, curval;
+
+	minor &= 0x1f;
+	port = minor >> 3;
+	bit = minor & 7;
+	curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+	dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
+		pc8736x_gpio_base + port_offset[port] + PORT_OUT,
+		curval, bit, (curval & ~(1 << bit)), val, (val << bit));
+
+	val = (curval & ~(1 << bit)) | (val << bit);
+
+	dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
+		" %2x -> %2x\n", minor, port, bit, curval, val);
+
+	outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+	curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+	val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+
+	dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
+	pc8736x_gpio_shadow[port] = val;
+}
+
+static int pc8736x_gpio_current(unsigned minor)
+{
+	int port, bit;
+	minor &= 0x1f;
+	port = minor >> 3;
+	bit = minor & 7;
+	return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
+}
+
+static void pc8736x_gpio_change(unsigned index)
+{
+	pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
+}
+
+static struct nsc_gpio_ops pc8736x_gpio_ops = {
+	.owner		= THIS_MODULE,
+	.gpio_config	= pc8736x_gpio_configure,
+	.gpio_dump	= nsc_gpio_dump,
+	.gpio_get	= pc8736x_gpio_get,
+	.gpio_set	= pc8736x_gpio_set,
+	.gpio_change	= pc8736x_gpio_change,
+	.gpio_current	= pc8736x_gpio_current
+};
+
+static int pc8736x_gpio_open(struct inode *inode, struct file *file)
+{
+	unsigned m = iminor(inode);
+	file->private_data = &pc8736x_gpio_ops;
+
+	dev_dbg(&pdev->dev, "open %d\n", m);
+
+	if (m >= PC8736X_GPIO_CT)
+		return -EINVAL;
+	return nonseekable_open(inode, file);
+}
+
+static const struct file_operations pc8736x_gpio_fileops = {
+	.owner	= THIS_MODULE,
+	.open	= pc8736x_gpio_open,
+	.write	= nsc_gpio_write,
+	.read	= nsc_gpio_read,
+	.llseek = no_llseek,
+};
+
+static void __init pc8736x_init_shadow(void)
+{
+	int port;
+
+	/* read the current values driven on the GPIO signals */
+	for (port = 0; port < 4; ++port)
+		pc8736x_gpio_shadow[port]
+		    = inb_p(pc8736x_gpio_base + port_offset[port]
+			    + PORT_OUT);
+
+}
+
+static struct cdev pc8736x_gpio_cdev;
+
+static int __init pc8736x_gpio_init(void)
+{
+	int rc;
+	dev_t devid;
+
+	pdev = platform_device_alloc(DEVNAME, 0);
+	if (!pdev)
+		return -ENOMEM;
+
+	rc = platform_device_add(pdev);
+	if (rc) {
+		rc = -ENODEV;
+		goto undo_platform_dev_alloc;
+	}
+	dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
+
+	if (!pc8736x_superio_present()) {
+		rc = -ENODEV;
+		dev_err(&pdev->dev, "no device found\n");
+		goto undo_platform_dev_add;
+	}
+	pc8736x_gpio_ops.dev = &pdev->dev;
+
+	/* Verify that chip and it's GPIO unit are both enabled.
+	   My BIOS does this, so I take minimum action here
+	 */
+	rc = superio_inb(SIO_CF1);
+	if (!(rc & 0x01)) {
+		rc = -ENODEV;
+		dev_err(&pdev->dev, "device not enabled\n");
+		goto undo_platform_dev_add;
+	}
+	device_select(SIO_GPIO_UNIT);
+	if (!superio_inb(SIO_UNIT_ACT)) {
+		rc = -ENODEV;
+		dev_err(&pdev->dev, "GPIO unit not enabled\n");
+		goto undo_platform_dev_add;
+	}
+
+	/* read the GPIO unit base addr that chip responds to */
+	pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
+			     | superio_inb(SIO_BASE_LADDR));
+
+	if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) {
+		rc = -ENODEV;
+		dev_err(&pdev->dev, "GPIO ioport %x busy\n",
+			pc8736x_gpio_base);
+		goto undo_platform_dev_add;
+	}
+	dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
+
+	if (major) {
+		devid = MKDEV(major, 0);
+		rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME);
+	} else {
+		rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME);
+		major = MAJOR(devid);
+	}
+
+	if (rc < 0) {
+		dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
+		goto undo_request_region;
+	}
+	if (!major) {
+		major = rc;
+		dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
+	}
+
+	pc8736x_init_shadow();
+
+	/* ignore minor errs, and succeed */
+	cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
+	cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
+
+	return 0;
+
+undo_request_region:
+	release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+undo_platform_dev_add:
+	platform_device_del(pdev);
+undo_platform_dev_alloc:
+	platform_device_put(pdev);
+
+	return rc;
+}
+
+static void __exit pc8736x_gpio_cleanup(void)
+{
+	dev_dbg(&pdev->dev, "cleanup\n");
+
+	cdev_del(&pc8736x_gpio_cdev);
+	unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
+	release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+
+	platform_device_unregister(pdev);
+}
+
+module_init(pc8736x_gpio_init);
+module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
new file mode 100644
index 0000000..1d1e7da
--- /dev/null
+++ b/drivers/char/pcmcia/Kconfig
@@ -0,0 +1,67 @@
+#
+# PCMCIA character device configuration
+#
+
+menu "PCMCIA character devices"
+	depends on PCMCIA!=n
+
+config SYNCLINK_CS
+	tristate "SyncLink PC Card support"
+	depends on PCMCIA && TTY
+	help
+	  Enable support for the SyncLink PC Card serial adapter, running
+	  asynchronous and HDLC communications up to 512Kbps. The port is
+	  selectable for RS-232, V.35, RS-449, RS-530, and X.21
+
+	  This driver may be built as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want).
+	  The module will be called synclink_cs.  If you want to do that, say M
+	  here.
+
+config CARDMAN_4000
+	tristate "Omnikey Cardman 4000 support"
+	depends on PCMCIA
+	select BITREVERSE
+	help
+	  Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
+	  reader.
+
+	  This kernel driver requires additional userspace support, either
+	  by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/),
+	  or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc).
+
+config CARDMAN_4040
+	tristate "Omnikey CardMan 4040 support"
+	depends on PCMCIA
+	help
+	  Enable support for the Omnikey CardMan 4040 PCMCIA Smartcard
+	  reader.
+
+	  This card is basically a USB CCID device connected to a FIFO
+	  in I/O space.  To use the kernel driver, you will need either the
+	  PC/SC ifdhandler provided from the Omnikey homepage
+	  (http://www.omnikey.com/), or a current development version of OpenCT
+	  (http://www.opensc-project.org/opensc).
+
+config SCR24X
+	tristate "SCR24x Chip Card Interface support"
+	depends on PCMCIA
+	help
+	  Enable support for the SCR24x PCMCIA Chip Card Interface.
+
+	  To compile this driver as a module, choose M here.
+	  The module will be called scr24x_cs..
+
+	  If unsure say N.
+
+config IPWIRELESS
+	tristate "IPWireless 3G UMTS PCMCIA card support"
+	depends on PCMCIA && NETDEVICES && TTY
+	select PPP
+	help
+	  This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
+	  some countries (for example Czech Republic, T-Mobile ISP) this card
+	  is shipped for service called UMTS 4G.
+
+endmenu
+
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
new file mode 100644
index 0000000..5b836bc
--- /dev/null
+++ b/drivers/char/pcmcia/Makefile
@@ -0,0 +1,10 @@
+#
+# drivers/char/pcmcia/Makefile
+#
+# Makefile for the Linux PCMCIA char device drivers.
+#
+
+obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
+obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
+obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
+obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
new file mode 100644
index 0000000..a219964
--- /dev/null
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -0,0 +1,1920 @@
+ /*
+  * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000"
+  *
+  * cm4000_cs.c support.linux@omnikey.com
+  *
+  * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files
+  * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files
+  * Thu Nov 14 16:34:11 GMT 2002 mh   - added PPS functionality
+  * Tue Nov 19 16:36:27 GMT 2002 mh   - added SUSPEND/RESUME functionailty
+  * Wed Jul 28 12:55:01 CEST 2004 mh  - kernel 2.6 adjustments
+  *
+  * current version: 2.4.0gm4
+  *
+  * (C) 2000,2001,2002,2003,2004 Omnikey AG
+  *
+  * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
+  * 	- Adhere to Kernel process/coding-style.rst
+  * 	- Port to 2.6.13 "new" style PCMCIA
+  * 	- Check for copy_{from,to}_user return values
+  * 	- Use nonseekable_open()
+  * 	- add class interface for udev device creation
+  *
+  * All rights reserved. Licensed under dual BSD/GPL license.
+  */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <linux/cm4000_cs.h>
+
+/* #define ATR_CSUM */
+
+#define reader_to_dev(x)	(&x->p_dev->dev)
+
+/* n (debug level) is ignored */
+/* additional debug output may be enabled by re-compiling with
+ * CM4000_DEBUG set */
+/* #define CM4000_DEBUG */
+#define DEBUGP(n, rdr, x, args...) do { 		\
+		dev_dbg(reader_to_dev(rdr), "%s:" x, 	\
+			   __func__ , ## args);		\
+	} while (0)
+
+static DEFINE_MUTEX(cmm_mutex);
+
+#define	T_1SEC		(HZ)
+#define	T_10MSEC	msecs_to_jiffies(10)
+#define	T_20MSEC	msecs_to_jiffies(20)
+#define	T_40MSEC	msecs_to_jiffies(40)
+#define	T_50MSEC	msecs_to_jiffies(50)
+#define	T_100MSEC	msecs_to_jiffies(100)
+#define	T_500MSEC	msecs_to_jiffies(500)
+
+static void cm4000_release(struct pcmcia_device *link);
+
+static int major;		/* major number we get from the kernel */
+
+/* note: the first state has to have number 0 always */
+
+#define	M_FETCH_ATR	0
+#define	M_TIMEOUT_WAIT	1
+#define	M_READ_ATR_LEN	2
+#define	M_READ_ATR	3
+#define	M_ATR_PRESENT	4
+#define	M_BAD_CARD	5
+#define M_CARDOFF	6
+
+#define	LOCK_IO			0
+#define	LOCK_MONITOR		1
+
+#define IS_AUTOPPS_ACT		 6
+#define	IS_PROCBYTE_PRESENT	 7
+#define	IS_INVREV		 8
+#define IS_ANY_T0		 9
+#define	IS_ANY_T1		10
+#define	IS_ATR_PRESENT		11
+#define	IS_ATR_VALID		12
+#define	IS_CMM_ABSENT		13
+#define	IS_BAD_LENGTH		14
+#define	IS_BAD_CSUM		15
+#define	IS_BAD_CARD		16
+
+#define REG_FLAGS0(x)		(x + 0)
+#define REG_FLAGS1(x)		(x + 1)
+#define REG_NUM_BYTES(x)	(x + 2)
+#define REG_BUF_ADDR(x)		(x + 3)
+#define REG_BUF_DATA(x)		(x + 4)
+#define REG_NUM_SEND(x)		(x + 5)
+#define REG_BAUDRATE(x)		(x + 6)
+#define REG_STOPBITS(x)		(x + 7)
+
+struct cm4000_dev {
+	struct pcmcia_device *p_dev;
+
+	unsigned char atr[MAX_ATR];
+	unsigned char rbuf[512];
+	unsigned char sbuf[512];
+
+	wait_queue_head_t devq;		/* when removing cardman must not be
+					   zeroed! */
+
+	wait_queue_head_t ioq;		/* if IO is locked, wait on this Q */
+	wait_queue_head_t atrq;		/* wait for ATR valid */
+	wait_queue_head_t readq;	/* used by write to wake blk.read */
+
+	/* warning: do not move this fields.
+	 * initialising to zero depends on it - see ZERO_DEV below.  */
+	unsigned char atr_csum;
+	unsigned char atr_len_retry;
+	unsigned short atr_len;
+	unsigned short rlen;	/* bytes avail. after write */
+	unsigned short rpos;	/* latest read pos. write zeroes */
+	unsigned char procbyte;	/* T=0 procedure byte */
+	unsigned char mstate;	/* state of card monitor */
+	unsigned char cwarn;	/* slow down warning */
+	unsigned char flags0;	/* cardman IO-flags 0 */
+	unsigned char flags1;	/* cardman IO-flags 1 */
+	unsigned int mdelay;	/* variable monitor speeds, in jiffies */
+
+	unsigned int baudv;	/* baud value for speed */
+	unsigned char ta1;
+	unsigned char proto;	/* T=0, T=1, ... */
+	unsigned long flags;	/* lock+flags (MONITOR,IO,ATR) * for concurrent
+				   access */
+
+	unsigned char pts[4];
+
+	struct timer_list timer;	/* used to keep monitor running */
+	int monitor_running;
+};
+
+#define	ZERO_DEV(dev)  						\
+	memset(&dev->atr_csum,0,				\
+		sizeof(struct cm4000_dev) - 			\
+		offsetof(struct cm4000_dev, atr_csum))
+
+static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
+static struct class *cmm_class;
+
+/* This table doesn't use spaces after the comma between fields and thus
+ * violates process/coding-style.rst.  However, I don't really think wrapping it around will
+ * make it any clearer to read -HW */
+static unsigned char fi_di_table[10][14] = {
+/*FI     00   01   02   03   04   05   06   07   08   09   10   11   12   13 */
+/*DI */
+/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
+/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11},
+/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11},
+/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3},
+/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4},
+/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5},
+/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6},
+/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
+/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8},
+/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
+};
+
+#ifndef CM4000_DEBUG
+#define	xoutb	outb
+#define	xinb	inb
+#else
+static inline void xoutb(unsigned char val, unsigned short port)
+{
+	pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
+	outb(val, port);
+}
+static inline unsigned char xinb(unsigned short port)
+{
+	unsigned char val;
+
+	val = inb(port);
+	pr_debug("%.2x=inb(%.4x)\n", val, port);
+
+	return val;
+}
+#endif
+
+static inline unsigned char invert_revert(unsigned char ch)
+{
+	return bitrev8(~ch);
+}
+
+static void str_invert_revert(unsigned char *b, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		b[i] = invert_revert(b[i]);
+}
+
+#define	ATRLENCK(dev,pos) \
+	if (pos>=dev->atr_len || pos>=MAX_ATR) \
+		goto return_0;
+
+static unsigned int calc_baudv(unsigned char fidi)
+{
+	unsigned int wcrcf, wbrcf, fi_rfu, di_rfu;
+
+	fi_rfu = 372;
+	di_rfu = 1;
+
+	/* FI */
+	switch ((fidi >> 4) & 0x0F) {
+	case 0x00:
+		wcrcf = 372;
+		break;
+	case 0x01:
+		wcrcf = 372;
+		break;
+	case 0x02:
+		wcrcf = 558;
+		break;
+	case 0x03:
+		wcrcf = 744;
+		break;
+	case 0x04:
+		wcrcf = 1116;
+		break;
+	case 0x05:
+		wcrcf = 1488;
+		break;
+	case 0x06:
+		wcrcf = 1860;
+		break;
+	case 0x07:
+		wcrcf = fi_rfu;
+		break;
+	case 0x08:
+		wcrcf = fi_rfu;
+		break;
+	case 0x09:
+		wcrcf = 512;
+		break;
+	case 0x0A:
+		wcrcf = 768;
+		break;
+	case 0x0B:
+		wcrcf = 1024;
+		break;
+	case 0x0C:
+		wcrcf = 1536;
+		break;
+	case 0x0D:
+		wcrcf = 2048;
+		break;
+	default:
+		wcrcf = fi_rfu;
+		break;
+	}
+
+	/* DI */
+	switch (fidi & 0x0F) {
+	case 0x00:
+		wbrcf = di_rfu;
+		break;
+	case 0x01:
+		wbrcf = 1;
+		break;
+	case 0x02:
+		wbrcf = 2;
+		break;
+	case 0x03:
+		wbrcf = 4;
+		break;
+	case 0x04:
+		wbrcf = 8;
+		break;
+	case 0x05:
+		wbrcf = 16;
+		break;
+	case 0x06:
+		wbrcf = 32;
+		break;
+	case 0x07:
+		wbrcf = di_rfu;
+		break;
+	case 0x08:
+		wbrcf = 12;
+		break;
+	case 0x09:
+		wbrcf = 20;
+		break;
+	default:
+		wbrcf = di_rfu;
+		break;
+	}
+
+	return (wcrcf / wbrcf);
+}
+
+static unsigned short io_read_num_rec_bytes(unsigned int iobase,
+					    unsigned short *s)
+{
+	unsigned short tmp;
+
+	tmp = *s = 0;
+	do {
+		*s = tmp;
+		tmp = inb(REG_NUM_BYTES(iobase)) |
+				(inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0);
+	} while (tmp != *s);
+
+	return *s;
+}
+
+static int parse_atr(struct cm4000_dev *dev)
+{
+	unsigned char any_t1, any_t0;
+	unsigned char ch, ifno;
+	int ix, done;
+
+	DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len);
+
+	if (dev->atr_len < 3) {
+		DEBUGP(5, dev, "parse_atr: atr_len < 3\n");
+		return 0;
+	}
+
+	if (dev->atr[0] == 0x3f)
+		set_bit(IS_INVREV, &dev->flags);
+	else
+		clear_bit(IS_INVREV, &dev->flags);
+	ix = 1;
+	ifno = 1;
+	ch = dev->atr[1];
+	dev->proto = 0;		/* XXX PROTO */
+	any_t1 = any_t0 = done = 0;
+	dev->ta1 = 0x11;	/* defaults to 9600 baud */
+	do {
+		if (ifno == 1 && (ch & 0x10)) {
+			/* read first interface byte and TA1 is present */
+			dev->ta1 = dev->atr[2];
+			DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1);
+			ifno++;
+		} else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */
+			dev->ta1 = 0x11;
+			ifno++;
+		}
+
+		DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0);
+		ix += ((ch & 0x10) >> 4)	/* no of int.face chars */
+		    +((ch & 0x20) >> 5)
+		    + ((ch & 0x40) >> 6)
+		    + ((ch & 0x80) >> 7);
+		/* ATRLENCK(dev,ix); */
+		if (ch & 0x80) {	/* TDi */
+			ch = dev->atr[ix];
+			if ((ch & 0x0f)) {
+				any_t1 = 1;
+				DEBUGP(5, dev, "card is capable of T=1\n");
+			} else {
+				any_t0 = 1;
+				DEBUGP(5, dev, "card is capable of T=0\n");
+			}
+		} else
+			done = 1;
+	} while (!done);
+
+	DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n",
+	      ix, dev->atr[1] & 15, any_t1);
+	if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) {
+		DEBUGP(5, dev, "length error\n");
+		return 0;
+	}
+	if (any_t0)
+		set_bit(IS_ANY_T0, &dev->flags);
+
+	if (any_t1) {		/* compute csum */
+		dev->atr_csum = 0;
+#ifdef ATR_CSUM
+		for (i = 1; i < dev->atr_len; i++)
+			dev->atr_csum ^= dev->atr[i];
+		if (dev->atr_csum) {
+			set_bit(IS_BAD_CSUM, &dev->flags);
+			DEBUGP(5, dev, "bad checksum\n");
+			goto return_0;
+		}
+#endif
+		if (any_t0 == 0)
+			dev->proto = 1;	/* XXX PROTO */
+		set_bit(IS_ANY_T1, &dev->flags);
+	}
+
+	return 1;
+}
+
+struct card_fixup {
+	char atr[12];
+	u_int8_t atr_len;
+	u_int8_t stopbits;
+};
+
+static struct card_fixup card_fixups[] = {
+	{	/* ACOS */
+		.atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 },
+		.atr_len = 7,
+		.stopbits = 0x03,
+	},
+	{	/* Motorola */
+		.atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07,
+			0x41, 0x81, 0x81 },
+		.atr_len = 11,
+		.stopbits = 0x04,
+	},
+};
+
+static void set_cardparameter(struct cm4000_dev *dev)
+{
+	int i;
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+	u_int8_t stopbits = 0x02; /* ISO default */
+
+	DEBUGP(3, dev, "-> set_cardparameter\n");
+
+	dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8);
+	xoutb(dev->flags1, REG_FLAGS1(iobase));
+	DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1);
+
+	/* set baudrate */
+	xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase));
+
+	DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv,
+	      ((dev->baudv - 1) & 0xFF));
+
+	/* set stopbits */
+	for (i = 0; i < ARRAY_SIZE(card_fixups); i++) {
+		if (!memcmp(dev->atr, card_fixups[i].atr,
+			    card_fixups[i].atr_len))
+			stopbits = card_fixups[i].stopbits;
+	}
+	xoutb(stopbits, REG_STOPBITS(iobase));
+
+	DEBUGP(3, dev, "<- set_cardparameter\n");
+}
+
+static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+{
+
+	unsigned long tmp, i;
+	unsigned short num_bytes_read;
+	unsigned char pts_reply[4];
+	ssize_t rc;
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+
+	rc = 0;
+
+	DEBUGP(3, dev, "-> set_protocol\n");
+	DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, "
+		 "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, "
+		 "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol,
+		 (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2,
+		 ptsreq->pts3);
+
+	/* Fill PTS structure */
+	dev->pts[0] = 0xff;
+	dev->pts[1] = 0x00;
+	tmp = ptsreq->protocol;
+	while ((tmp = (tmp >> 1)) > 0)
+		dev->pts[1]++;
+	dev->proto = dev->pts[1];	/* Set new protocol */
+	dev->pts[1] = (0x01 << 4) | (dev->pts[1]);
+
+	/* Correct Fi/Di according to CM4000 Fi/Di table */
+	DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1);
+	/* set Fi/Di according to ATR TA(1) */
+	dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F];
+
+	/* Calculate PCK character */
+	dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2];
+
+	DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n",
+	       dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]);
+
+	/* check card convention */
+	if (test_bit(IS_INVREV, &dev->flags))
+		str_invert_revert(dev->pts, 4);
+
+	/* reset SM */
+	xoutb(0x80, REG_FLAGS0(iobase));
+
+	/* Enable access to the message buffer */
+	DEBUGP(5, dev, "Enable access to the messages buffer\n");
+	dev->flags1 = 0x20	/* T_Active */
+	    | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */
+	    | ((dev->baudv >> 8) & 0x01);	/* MSB-baud */
+	xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+	DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n",
+	       dev->flags1);
+
+	/* write challenge to the buffer */
+	DEBUGP(5, dev, "Write challenge to buffer: ");
+	for (i = 0; i < 4; i++) {
+		xoutb(i, REG_BUF_ADDR(iobase));
+		xoutb(dev->pts[i], REG_BUF_DATA(iobase));	/* buf data */
+#ifdef CM4000_DEBUG
+		pr_debug("0x%.2x ", dev->pts[i]);
+	}
+	pr_debug("\n");
+#else
+	}
+#endif
+
+	/* set number of bytes to write */
+	DEBUGP(5, dev, "Set number of bytes to write\n");
+	xoutb(0x04, REG_NUM_SEND(iobase));
+
+	/* Trigger CARDMAN CONTROLLER */
+	xoutb(0x50, REG_FLAGS0(iobase));
+
+	/* Monitor progress */
+	/* wait for xmit done */
+	DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n");
+
+	for (i = 0; i < 100; i++) {
+		if (inb(REG_FLAGS0(iobase)) & 0x08) {
+			DEBUGP(5, dev, "NumRecBytes is valid\n");
+			break;
+		}
+		mdelay(10);
+	}
+	if (i == 100) {
+		DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
+		       "valid\n");
+		rc = -EIO;
+		goto exit_setprotocol;
+	}
+
+	DEBUGP(5, dev, "Reading NumRecBytes\n");
+	for (i = 0; i < 100; i++) {
+		io_read_num_rec_bytes(iobase, &num_bytes_read);
+		if (num_bytes_read >= 4) {
+			DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
+			break;
+		}
+		mdelay(10);
+	}
+
+	/* check whether it is a short PTS reply? */
+	if (num_bytes_read == 3)
+		i = 0;
+
+	if (i == 100) {
+		DEBUGP(5, dev, "Timeout reading num_bytes_read\n");
+		rc = -EIO;
+		goto exit_setprotocol;
+	}
+
+	DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n");
+	xoutb(0x80, REG_FLAGS0(iobase));
+
+	/* Read PPS reply */
+	DEBUGP(5, dev, "Read PPS reply\n");
+	for (i = 0; i < num_bytes_read; i++) {
+		xoutb(i, REG_BUF_ADDR(iobase));
+		pts_reply[i] = inb(REG_BUF_DATA(iobase));
+	}
+
+#ifdef CM4000_DEBUG
+	DEBUGP(2, dev, "PTSreply: ");
+	for (i = 0; i < num_bytes_read; i++) {
+		pr_debug("0x%.2x ", pts_reply[i]);
+	}
+	pr_debug("\n");
+#endif	/* CM4000_DEBUG */
+
+	DEBUGP(5, dev, "Clear Tactive in Flags1\n");
+	xoutb(0x20, REG_FLAGS1(iobase));
+
+	/* Compare ptsreq and ptsreply */
+	if ((dev->pts[0] == pts_reply[0]) &&
+	    (dev->pts[1] == pts_reply[1]) &&
+	    (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) {
+		/* setcardparameter according to PPS */
+		dev->baudv = calc_baudv(dev->pts[2]);
+		set_cardparameter(dev);
+	} else if ((dev->pts[0] == pts_reply[0]) &&
+		   ((dev->pts[1] & 0xef) == pts_reply[1]) &&
+		   ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) {
+		/* short PTS reply, set card parameter to default values */
+		dev->baudv = calc_baudv(0x11);
+		set_cardparameter(dev);
+	} else
+		rc = -EIO;
+
+exit_setprotocol:
+	DEBUGP(3, dev, "<- set_protocol\n");
+	return rc;
+}
+
+static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
+{
+
+	/* note: statemachine is assumed to be reset */
+	if (inb(REG_FLAGS0(iobase)) & 8) {
+		clear_bit(IS_ATR_VALID, &dev->flags);
+		set_bit(IS_CMM_ABSENT, &dev->flags);
+		return 0;	/* detect CMM = 1 -> failure */
+	}
+	/* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */
+	xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase));
+	if ((inb(REG_FLAGS0(iobase)) & 8) == 0) {
+		clear_bit(IS_ATR_VALID, &dev->flags);
+		set_bit(IS_CMM_ABSENT, &dev->flags);
+		return 0;	/* detect CMM=0 -> failure */
+	}
+	/* clear detectCMM again by restoring original flags1 */
+	xoutb(dev->flags1, REG_FLAGS1(iobase));
+	return 1;
+}
+
+static void terminate_monitor(struct cm4000_dev *dev)
+{
+
+	/* tell the monitor to stop and wait until
+	 * it terminates.
+	 */
+	DEBUGP(3, dev, "-> terminate_monitor\n");
+	wait_event_interruptible(dev->devq,
+				 test_and_set_bit(LOCK_MONITOR,
+						  (void *)&dev->flags));
+
+	/* now, LOCK_MONITOR has been set.
+	 * allow a last cycle in the monitor.
+	 * the monitor will indicate that it has
+	 * finished by clearing this bit.
+	 */
+	DEBUGP(5, dev, "Now allow last cycle of monitor!\n");
+	while (test_bit(LOCK_MONITOR, (void *)&dev->flags))
+		msleep(25);
+
+	DEBUGP(5, dev, "Delete timer\n");
+	del_timer_sync(&dev->timer);
+#ifdef CM4000_DEBUG
+	dev->monitor_running = 0;
+#endif
+
+	DEBUGP(3, dev, "<- terminate_monitor\n");
+}
+
+/*
+ * monitor the card every 50msec. as a side-effect, retrieve the
+ * atr once a card is inserted. another side-effect of retrieving the
+ * atr is that the card will be powered on, so there is no need to
+ * power on the card explicitly from the application: the driver
+ * is already doing that for you.
+ */
+
+static void monitor_card(struct timer_list *t)
+{
+	struct cm4000_dev *dev = from_timer(dev, t, timer);
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+	unsigned short s;
+	struct ptsreq ptsreq;
+	int i, atrc;
+
+	DEBUGP(7, dev, "->  monitor_card\n");
+
+	/* if someone has set the lock for us: we're done! */
+	if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) {
+		DEBUGP(4, dev, "About to stop monitor\n");
+		/* no */
+		dev->rlen =
+		    dev->rpos =
+		    dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
+		dev->mstate = M_FETCH_ATR;
+		clear_bit(LOCK_MONITOR, &dev->flags);
+		/* close et al. are sleeping on devq, so wake it */
+		wake_up_interruptible(&dev->devq);
+		DEBUGP(2, dev, "<- monitor_card (we are done now)\n");
+		return;
+	}
+
+	/* try to lock io: if it is already locked, just add another timer */
+	if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) {
+		DEBUGP(4, dev, "Couldn't get IO lock\n");
+		goto return_with_timer;
+	}
+
+	/* is a card/a reader inserted at all ? */
+	dev->flags0 = xinb(REG_FLAGS0(iobase));
+	DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0);
+	DEBUGP(7, dev, "smartcard present: %s\n",
+	       dev->flags0 & 1 ? "yes" : "no");
+	DEBUGP(7, dev, "cardman present: %s\n",
+	       dev->flags0 == 0xff ? "no" : "yes");
+
+	if ((dev->flags0 & 1) == 0	/* no smartcard inserted */
+	    || dev->flags0 == 0xff) {	/* no cardman inserted */
+		/* no */
+		dev->rlen =
+		    dev->rpos =
+		    dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
+		dev->mstate = M_FETCH_ATR;
+
+		dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */
+
+		if (dev->flags0 == 0xff) {
+			DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n");
+			set_bit(IS_CMM_ABSENT, &dev->flags);
+		} else if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
+			DEBUGP(4, dev, "clear IS_CMM_ABSENT bit "
+			       "(card is removed)\n");
+			clear_bit(IS_CMM_ABSENT, &dev->flags);
+		}
+
+		goto release_io;
+	} else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) {
+		/* cardman and card present but cardman was absent before
+		 * (after suspend with inserted card) */
+		DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n");
+		clear_bit(IS_CMM_ABSENT, &dev->flags);
+	}
+
+	if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
+		DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n");
+		goto release_io;
+	}
+
+	switch (dev->mstate) {
+		unsigned char flags0;
+	case M_CARDOFF:
+		DEBUGP(4, dev, "M_CARDOFF\n");
+		flags0 = inb(REG_FLAGS0(iobase));
+		if (flags0 & 0x02) {
+			/* wait until Flags0 indicate power is off */
+			dev->mdelay = T_10MSEC;
+		} else {
+			/* Flags0 indicate power off and no card inserted now;
+			 * Reset CARDMAN CONTROLLER */
+			xoutb(0x80, REG_FLAGS0(iobase));
+
+			/* prepare for fetching ATR again: after card off ATR
+			 * is read again automatically */
+			dev->rlen =
+			    dev->rpos =
+			    dev->atr_csum =
+			    dev->atr_len_retry = dev->cwarn = 0;
+			dev->mstate = M_FETCH_ATR;
+
+			/* minimal gap between CARDOFF and read ATR is 50msec */
+			dev->mdelay = T_50MSEC;
+		}
+		break;
+	case M_FETCH_ATR:
+		DEBUGP(4, dev, "M_FETCH_ATR\n");
+		xoutb(0x80, REG_FLAGS0(iobase));
+		DEBUGP(4, dev, "Reset BAUDV to 9600\n");
+		dev->baudv = 0x173;	/* 9600 */
+		xoutb(0x02, REG_STOPBITS(iobase));	/* stopbits=2 */
+		xoutb(0x73, REG_BAUDRATE(iobase));	/* baud value */
+		xoutb(0x21, REG_FLAGS1(iobase));	/* T_Active=1, baud
+							   value */
+		/* warm start vs. power on: */
+		xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase));
+		dev->mdelay = T_40MSEC;
+		dev->mstate = M_TIMEOUT_WAIT;
+		break;
+	case M_TIMEOUT_WAIT:
+		DEBUGP(4, dev, "M_TIMEOUT_WAIT\n");
+		/* numRecBytes */
+		io_read_num_rec_bytes(iobase, &dev->atr_len);
+		dev->mdelay = T_10MSEC;
+		dev->mstate = M_READ_ATR_LEN;
+		break;
+	case M_READ_ATR_LEN:
+		DEBUGP(4, dev, "M_READ_ATR_LEN\n");
+		/* infinite loop possible, since there is no timeout */
+
+#define	MAX_ATR_LEN_RETRY	100
+
+		if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) {
+			if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) {					/* + XX msec */
+				dev->mdelay = T_10MSEC;
+				dev->mstate = M_READ_ATR;
+			}
+		} else {
+			dev->atr_len = s;
+			dev->atr_len_retry = 0;	/* set new timeout */
+		}
+
+		DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len);
+		break;
+	case M_READ_ATR:
+		DEBUGP(4, dev, "M_READ_ATR\n");
+		xoutb(0x80, REG_FLAGS0(iobase));	/* reset SM */
+		for (i = 0; i < dev->atr_len; i++) {
+			xoutb(i, REG_BUF_ADDR(iobase));
+			dev->atr[i] = inb(REG_BUF_DATA(iobase));
+		}
+		/* Deactivate T_Active flags */
+		DEBUGP(4, dev, "Deactivate T_Active flags\n");
+		dev->flags1 = 0x01;
+		xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+		/* atr is present (which doesn't mean it's valid) */
+		set_bit(IS_ATR_PRESENT, &dev->flags);
+		if (dev->atr[0] == 0x03)
+			str_invert_revert(dev->atr, dev->atr_len);
+		atrc = parse_atr(dev);
+		if (atrc == 0) {	/* atr invalid */
+			dev->mdelay = 0;
+			dev->mstate = M_BAD_CARD;
+		} else {
+			dev->mdelay = T_50MSEC;
+			dev->mstate = M_ATR_PRESENT;
+			set_bit(IS_ATR_VALID, &dev->flags);
+		}
+
+		if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
+			DEBUGP(4, dev, "monitor_card: ATR valid\n");
+ 			/* if ta1 == 0x11, no PPS necessary (default values) */
+			/* do not do PPS with multi protocol cards */
+			if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) &&
+			    (dev->ta1 != 0x11) &&
+			    !(test_bit(IS_ANY_T0, &dev->flags) &&
+			    test_bit(IS_ANY_T1, &dev->flags))) {
+				DEBUGP(4, dev, "Perform AUTOPPS\n");
+				set_bit(IS_AUTOPPS_ACT, &dev->flags);
+				ptsreq.protocol = (0x01 << dev->proto);
+				ptsreq.flags = 0x01;
+				ptsreq.pts1 = 0x00;
+				ptsreq.pts2 = 0x00;
+				ptsreq.pts3 = 0x00;
+				if (set_protocol(dev, &ptsreq) == 0) {
+					DEBUGP(4, dev, "AUTOPPS ret SUCC\n");
+					clear_bit(IS_AUTOPPS_ACT, &dev->flags);
+					wake_up_interruptible(&dev->atrq);
+				} else {
+					DEBUGP(4, dev, "AUTOPPS failed: "
+					       "repower using defaults\n");
+					/* prepare for repowering  */
+					clear_bit(IS_ATR_PRESENT, &dev->flags);
+					clear_bit(IS_ATR_VALID, &dev->flags);
+					dev->rlen =
+					    dev->rpos =
+					    dev->atr_csum =
+					    dev->atr_len_retry = dev->cwarn = 0;
+					dev->mstate = M_FETCH_ATR;
+
+					dev->mdelay = T_50MSEC;
+				}
+			} else {
+				/* for cards which use slightly different
+				 * params (extra guard time) */
+				set_cardparameter(dev);
+				if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1)
+					DEBUGP(4, dev, "AUTOPPS already active "
+					       "2nd try:use default values\n");
+				if (dev->ta1 == 0x11)
+					DEBUGP(4, dev, "No AUTOPPS necessary "
+					       "TA(1)==0x11\n");
+				if (test_bit(IS_ANY_T0, &dev->flags)
+				    && test_bit(IS_ANY_T1, &dev->flags))
+					DEBUGP(4, dev, "Do NOT perform AUTOPPS "
+					       "with multiprotocol cards\n");
+				clear_bit(IS_AUTOPPS_ACT, &dev->flags);
+				wake_up_interruptible(&dev->atrq);
+			}
+		} else {
+			DEBUGP(4, dev, "ATR invalid\n");
+			wake_up_interruptible(&dev->atrq);
+		}
+		break;
+	case M_BAD_CARD:
+		DEBUGP(4, dev, "M_BAD_CARD\n");
+		/* slow down warning, but prompt immediately after insertion */
+		if (dev->cwarn == 0 || dev->cwarn == 10) {
+			set_bit(IS_BAD_CARD, &dev->flags);
+			dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
+			if (test_bit(IS_BAD_CSUM, &dev->flags)) {
+				DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
+				       "be zero) failed\n", dev->atr_csum);
+			}
+#ifdef CM4000_DEBUG
+			else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
+				DEBUGP(4, dev, "ATR length error\n");
+			} else {
+				DEBUGP(4, dev, "card damaged or wrong way "
+					"inserted\n");
+			}
+#endif
+			dev->cwarn = 0;
+			wake_up_interruptible(&dev->atrq);	/* wake open */
+		}
+		dev->cwarn++;
+		dev->mdelay = T_100MSEC;
+		dev->mstate = M_FETCH_ATR;
+		break;
+	default:
+		DEBUGP(7, dev, "Unknown action\n");
+		break;		/* nothing */
+	}
+
+release_io:
+	DEBUGP(7, dev, "release_io\n");
+	clear_bit(LOCK_IO, &dev->flags);
+	wake_up_interruptible(&dev->ioq);	/* whoever needs IO */
+
+return_with_timer:
+	DEBUGP(7, dev, "<- monitor_card (returns with timer)\n");
+	mod_timer(&dev->timer, jiffies + dev->mdelay);
+	clear_bit(LOCK_MONITOR, &dev->flags);
+}
+
+/* Interface to userland (file_operations) */
+
+static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
+			loff_t *ppos)
+{
+	struct cm4000_dev *dev = filp->private_data;
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+	ssize_t rc;
+	int i, j, k;
+
+	DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid);
+
+	if (count == 0)		/* according to manpage */
+		return 0;
+
+	if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
+	    test_bit(IS_CMM_ABSENT, &dev->flags))
+		return -ENODEV;
+
+	if (test_bit(IS_BAD_CSUM, &dev->flags))
+		return -EIO;
+
+	/* also see the note about this in cmm_write */
+	if (wait_event_interruptible
+	    (dev->atrq,
+	     ((filp->f_flags & O_NONBLOCK)
+	      || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		return -ERESTARTSYS;
+	}
+
+	if (test_bit(IS_ATR_VALID, &dev->flags) == 0)
+		return -EIO;
+
+	/* this one implements blocking IO */
+	if (wait_event_interruptible
+	    (dev->readq,
+	     ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		return -ERESTARTSYS;
+	}
+
+	/* lock io */
+	if (wait_event_interruptible
+	    (dev->ioq,
+	     ((filp->f_flags & O_NONBLOCK)
+	      || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		return -ERESTARTSYS;
+	}
+
+	rc = 0;
+	dev->flags0 = inb(REG_FLAGS0(iobase));
+	if ((dev->flags0 & 1) == 0	/* no smartcard inserted */
+	    || dev->flags0 == 0xff) {	/* no cardman inserted */
+		clear_bit(IS_ATR_VALID, &dev->flags);
+		if (dev->flags0 & 1) {
+			set_bit(IS_CMM_ABSENT, &dev->flags);
+			rc = -ENODEV;
+		} else {
+			rc = -EIO;
+		}
+		goto release_io;
+	}
+
+	DEBUGP(4, dev, "begin read answer\n");
+	j = min(count, (size_t)(dev->rlen - dev->rpos));
+	k = dev->rpos;
+	if (k + j > 255)
+		j = 256 - k;
+	DEBUGP(4, dev, "read1 j=%d\n", j);
+	for (i = 0; i < j; i++) {
+		xoutb(k++, REG_BUF_ADDR(iobase));
+		dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
+	}
+	j = min(count, (size_t)(dev->rlen - dev->rpos));
+	if (k + j > 255) {
+		DEBUGP(4, dev, "read2 j=%d\n", j);
+		dev->flags1 |= 0x10;	/* MSB buf addr set */
+		xoutb(dev->flags1, REG_FLAGS1(iobase));
+		for (; i < j; i++) {
+			xoutb(k++, REG_BUF_ADDR(iobase));
+			dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
+		}
+	}
+
+	if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) {
+		DEBUGP(4, dev, "T=0 and count > buffer\n");
+		dev->rbuf[i] = dev->rbuf[i - 1];
+		dev->rbuf[i - 1] = dev->procbyte;
+		j++;
+	}
+	count = j;
+
+	dev->rpos = dev->rlen + 1;
+
+	/* Clear T1Active */
+	DEBUGP(4, dev, "Clear T1Active\n");
+	dev->flags1 &= 0xdf;
+	xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+	xoutb(0, REG_FLAGS1(iobase));	/* clear detectCMM */
+	/* last check before exit */
+	if (!io_detect_cm4000(iobase, dev)) {
+		rc = -ENODEV;
+		goto release_io;
+	}
+
+	if (test_bit(IS_INVREV, &dev->flags) && count > 0)
+		str_invert_revert(dev->rbuf, count);
+
+	if (copy_to_user(buf, dev->rbuf, count))
+		rc = -EFAULT;
+
+release_io:
+	clear_bit(LOCK_IO, &dev->flags);
+	wake_up_interruptible(&dev->ioq);
+
+	DEBUGP(2, dev, "<- cmm_read returns: rc = %zi\n",
+	       (rc < 0 ? rc : count));
+	return rc < 0 ? rc : count;
+}
+
+static ssize_t cmm_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	struct cm4000_dev *dev = filp->private_data;
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+	unsigned short s;
+	unsigned char tmp;
+	unsigned char infolen;
+	unsigned char sendT0;
+	unsigned short nsend;
+	unsigned short nr;
+	ssize_t rc;
+	int i;
+
+	DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid);
+
+	if (count == 0)		/* according to manpage */
+		return 0;
+
+	if (dev->proto == 0 && count < 4) {
+		/* T0 must have at least 4 bytes */
+		DEBUGP(4, dev, "T0 short write\n");
+		return -EIO;
+	}
+
+	nr = count & 0x1ff;	/* max bytes to write */
+
+	sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
+
+	if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
+	    test_bit(IS_CMM_ABSENT, &dev->flags))
+		return -ENODEV;
+
+	if (test_bit(IS_BAD_CSUM, &dev->flags)) {
+		DEBUGP(4, dev, "bad csum\n");
+		return -EIO;
+	}
+
+	/*
+	 * wait for atr to become valid.
+	 * note: it is important to lock this code. if we dont, the monitor
+	 * could be run between test_bit and the call to sleep on the
+	 * atr-queue.  if *then* the monitor detects atr valid, it will wake up
+	 * any process on the atr-queue, *but* since we have been interrupted,
+	 * we do not yet sleep on this queue. this would result in a missed
+	 * wake_up and the calling process would sleep forever (until
+	 * interrupted).  also, do *not* restore_flags before sleep_on, because
+	 * this could result in the same situation!
+	 */
+	if (wait_event_interruptible
+	    (dev->atrq,
+	     ((filp->f_flags & O_NONBLOCK)
+	      || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		return -ERESTARTSYS;
+	}
+
+	if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {	/* invalid atr */
+		DEBUGP(4, dev, "invalid ATR\n");
+		return -EIO;
+	}
+
+	/* lock io */
+	if (wait_event_interruptible
+	    (dev->ioq,
+	     ((filp->f_flags & O_NONBLOCK)
+	      || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		return -ERESTARTSYS;
+	}
+
+	if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count)))
+		return -EFAULT;
+
+	rc = 0;
+	dev->flags0 = inb(REG_FLAGS0(iobase));
+	if ((dev->flags0 & 1) == 0	/* no smartcard inserted */
+	    || dev->flags0 == 0xff) {	/* no cardman inserted */
+		clear_bit(IS_ATR_VALID, &dev->flags);
+		if (dev->flags0 & 1) {
+			set_bit(IS_CMM_ABSENT, &dev->flags);
+			rc = -ENODEV;
+		} else {
+			DEBUGP(4, dev, "IO error\n");
+			rc = -EIO;
+		}
+		goto release_io;
+	}
+
+	xoutb(0x80, REG_FLAGS0(iobase));	/* reset SM  */
+
+	if (!io_detect_cm4000(iobase, dev)) {
+		rc = -ENODEV;
+		goto release_io;
+	}
+
+	/* reflect T=0 send/read mode in flags1 */
+	dev->flags1 |= (sendT0);
+
+	set_cardparameter(dev);
+
+	/* dummy read, reset flag procedure received */
+	tmp = inb(REG_FLAGS1(iobase));
+
+	dev->flags1 = 0x20	/* T_Active */
+	    | (sendT0)
+	    | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity  */
+	    | (((dev->baudv - 1) & 0x0100) >> 8);	/* MSB-Baud */
+	DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1);
+	xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+	/* xmit data */
+	DEBUGP(4, dev, "Xmit data\n");
+	for (i = 0; i < nr; i++) {
+		if (i >= 256) {
+			dev->flags1 = 0x20	/* T_Active */
+			    | (sendT0)	/* SendT0 */
+				/* inverse parity: */
+			    | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)
+			    | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */
+			    | 0x10;	/* set address high */
+			DEBUGP(4, dev, "dev->flags = 0x%.2x - set address "
+			       "high\n", dev->flags1);
+			xoutb(dev->flags1, REG_FLAGS1(iobase));
+		}
+		if (test_bit(IS_INVREV, &dev->flags)) {
+			DEBUGP(4, dev, "Apply inverse convention for 0x%.2x "
+				"-> 0x%.2x\n", (unsigned char)dev->sbuf[i],
+			      invert_revert(dev->sbuf[i]));
+			xoutb(i, REG_BUF_ADDR(iobase));
+			xoutb(invert_revert(dev->sbuf[i]),
+			      REG_BUF_DATA(iobase));
+		} else {
+			xoutb(i, REG_BUF_ADDR(iobase));
+			xoutb(dev->sbuf[i], REG_BUF_DATA(iobase));
+		}
+	}
+	DEBUGP(4, dev, "Xmit done\n");
+
+	if (dev->proto == 0) {
+		/* T=0 proto: 0 byte reply  */
+		if (nr == 4) {
+			DEBUGP(4, dev, "T=0 assumes 0 byte reply\n");
+			xoutb(i, REG_BUF_ADDR(iobase));
+			if (test_bit(IS_INVREV, &dev->flags))
+				xoutb(0xff, REG_BUF_DATA(iobase));
+			else
+				xoutb(0x00, REG_BUF_DATA(iobase));
+		}
+
+		/* numSendBytes */
+		if (sendT0)
+			nsend = nr;
+		else {
+			if (nr == 4)
+				nsend = 5;
+			else {
+				nsend = 5 + (unsigned char)dev->sbuf[4];
+				if (dev->sbuf[4] == 0)
+					nsend += 0x100;
+			}
+		}
+	} else
+		nsend = nr;
+
+	/* T0: output procedure byte */
+	if (test_bit(IS_INVREV, &dev->flags)) {
+		DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) "
+		       "0x%.2x\n", invert_revert(dev->sbuf[1]));
+		xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase));
+	} else {
+		DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]);
+		xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase));
+	}
+
+	DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n",
+	       (unsigned char)(nsend & 0xff));
+	xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase));
+
+	DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n",
+	       0x40	/* SM_Active */
+	      | (dev->flags0 & 2 ? 0 : 4)	/* power on if needed */
+	      |(dev->proto ? 0x10 : 0x08)	/* T=1/T=0 */
+	      |(nsend & 0x100) >> 8 /* MSB numSendBytes */ );
+	xoutb(0x40		/* SM_Active */
+	      | (dev->flags0 & 2 ? 0 : 4)	/* power on if needed */
+	      |(dev->proto ? 0x10 : 0x08)	/* T=1/T=0 */
+	      |(nsend & 0x100) >> 8,	/* MSB numSendBytes */
+	      REG_FLAGS0(iobase));
+
+	/* wait for xmit done */
+	if (dev->proto == 1) {
+		DEBUGP(4, dev, "Wait for xmit done\n");
+		for (i = 0; i < 1000; i++) {
+			if (inb(REG_FLAGS0(iobase)) & 0x08)
+				break;
+			msleep_interruptible(10);
+		}
+		if (i == 1000) {
+			DEBUGP(4, dev, "timeout waiting for xmit done\n");
+			rc = -EIO;
+			goto release_io;
+		}
+	}
+
+	/* T=1: wait for infoLen */
+
+	infolen = 0;
+	if (dev->proto) {
+		/* wait until infoLen is valid */
+		for (i = 0; i < 6000; i++) {	/* max waiting time of 1 min */
+			io_read_num_rec_bytes(iobase, &s);
+			if (s >= 3) {
+				infolen = inb(REG_FLAGS1(iobase));
+				DEBUGP(4, dev, "infolen=%d\n", infolen);
+				break;
+			}
+			msleep_interruptible(10);
+		}
+		if (i == 6000) {
+			DEBUGP(4, dev, "timeout waiting for infoLen\n");
+			rc = -EIO;
+			goto release_io;
+		}
+	} else
+		clear_bit(IS_PROCBYTE_PRESENT, &dev->flags);
+
+	/* numRecBytes | bit9 of numRecytes */
+	io_read_num_rec_bytes(iobase, &dev->rlen);
+	for (i = 0; i < 600; i++) {	/* max waiting time of 2 sec */
+		if (dev->proto) {
+			if (dev->rlen >= infolen + 4)
+				break;
+		}
+		msleep_interruptible(10);
+		/* numRecBytes | bit9 of numRecytes */
+		io_read_num_rec_bytes(iobase, &s);
+		if (s > dev->rlen) {
+			DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n");
+			i = 0;	/* reset timeout */
+			dev->rlen = s;
+		}
+		/* T=0: we are done when numRecBytes doesn't
+		 *      increment any more and NoProcedureByte
+		 *      is set and numRecBytes == bytes sent + 6
+		 *      (header bytes + data + 1 for sw2)
+		 *      except when the card replies an error
+		 *      which means, no data will be sent back.
+		 */
+		else if (dev->proto == 0) {
+			if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) {
+				/* no procedure byte received since last read */
+				DEBUGP(1, dev, "NoProcedure byte set\n");
+				/* i=0; */
+			} else {
+				/* procedure byte received since last read */
+				DEBUGP(1, dev, "NoProcedure byte unset "
+					"(reset timeout)\n");
+				dev->procbyte = inb(REG_FLAGS1(iobase));
+				DEBUGP(1, dev, "Read procedure byte 0x%.2x\n",
+				      dev->procbyte);
+				i = 0;	/* resettimeout */
+			}
+			if (inb(REG_FLAGS0(iobase)) & 0x08) {
+				DEBUGP(1, dev, "T0Done flag (read reply)\n");
+				break;
+			}
+		}
+		if (dev->proto)
+			infolen = inb(REG_FLAGS1(iobase));
+	}
+	if (i == 600) {
+		DEBUGP(1, dev, "timeout waiting for numRecBytes\n");
+		rc = -EIO;
+		goto release_io;
+	} else {
+		if (dev->proto == 0) {
+			DEBUGP(1, dev, "Wait for T0Done bit to be  set\n");
+			for (i = 0; i < 1000; i++) {
+				if (inb(REG_FLAGS0(iobase)) & 0x08)
+					break;
+				msleep_interruptible(10);
+			}
+			if (i == 1000) {
+				DEBUGP(1, dev, "timeout waiting for T0Done\n");
+				rc = -EIO;
+				goto release_io;
+			}
+
+			dev->procbyte = inb(REG_FLAGS1(iobase));
+			DEBUGP(4, dev, "Read procedure byte 0x%.2x\n",
+			      dev->procbyte);
+
+			io_read_num_rec_bytes(iobase, &dev->rlen);
+			DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen);
+
+		}
+	}
+	/* T=1: read offset=zero, T=0: read offset=after challenge */
+	dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr;
+	DEBUGP(4, dev, "dev->rlen = %i,  dev->rpos = %i, nr = %i\n",
+	      dev->rlen, dev->rpos, nr);
+
+release_io:
+	DEBUGP(4, dev, "Reset SM\n");
+	xoutb(0x80, REG_FLAGS0(iobase));	/* reset SM */
+
+	if (rc < 0) {
+		DEBUGP(4, dev, "Write failed but clear T_Active\n");
+		dev->flags1 &= 0xdf;
+		xoutb(dev->flags1, REG_FLAGS1(iobase));
+	}
+
+	clear_bit(LOCK_IO, &dev->flags);
+	wake_up_interruptible(&dev->ioq);
+	wake_up_interruptible(&dev->readq);	/* tell read we have data */
+
+	/* ITSEC E2: clear write buffer */
+	memset((char *)dev->sbuf, 0, 512);
+
+	/* return error or actually written bytes */
+	DEBUGP(2, dev, "<- cmm_write\n");
+	return rc < 0 ? rc : nr;
+}
+
+static void start_monitor(struct cm4000_dev *dev)
+{
+	DEBUGP(3, dev, "-> start_monitor\n");
+	if (!dev->monitor_running) {
+		DEBUGP(5, dev, "create, init and add timer\n");
+		timer_setup(&dev->timer, monitor_card, 0);
+		dev->monitor_running = 1;
+		mod_timer(&dev->timer, jiffies);
+	} else
+		DEBUGP(5, dev, "monitor already running\n");
+	DEBUGP(3, dev, "<- start_monitor\n");
+}
+
+static void stop_monitor(struct cm4000_dev *dev)
+{
+	DEBUGP(3, dev, "-> stop_monitor\n");
+	if (dev->monitor_running) {
+		DEBUGP(5, dev, "stopping monitor\n");
+		terminate_monitor(dev);
+		/* reset monitor SM */
+		clear_bit(IS_ATR_VALID, &dev->flags);
+		clear_bit(IS_ATR_PRESENT, &dev->flags);
+	} else
+		DEBUGP(5, dev, "monitor already stopped\n");
+	DEBUGP(3, dev, "<- stop_monitor\n");
+}
+
+static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct cm4000_dev *dev = filp->private_data;
+	unsigned int iobase = dev->p_dev->resource[0]->start;
+	struct inode *inode = file_inode(filp);
+	struct pcmcia_device *link;
+	int size;
+	int rc;
+	void __user *argp = (void __user *)arg;
+#ifdef CM4000_DEBUG
+	char *ioctl_names[CM_IOC_MAXNR + 1] = {
+		[_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
+		[_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
+		[_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF",
+		[_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
+		[_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
+	};
+	DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
+	       iminor(inode), ioctl_names[_IOC_NR(cmd)]);
+#endif
+
+	mutex_lock(&cmm_mutex);
+	rc = -ENODEV;
+	link = dev_table[iminor(inode)];
+	if (!pcmcia_dev_present(link)) {
+		DEBUGP(4, dev, "DEV_OK false\n");
+		goto out;
+	}
+
+	if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
+		DEBUGP(4, dev, "CMM_ABSENT flag set\n");
+		goto out;
+	}
+	rc = -EINVAL;
+
+	if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
+		DEBUGP(4, dev, "ioctype mismatch\n");
+		goto out;
+	}
+	if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
+		DEBUGP(4, dev, "iocnr mismatch\n");
+		goto out;
+	}
+	size = _IOC_SIZE(cmd);
+	rc = -EFAULT;
+	DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
+	      _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
+
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		if (!access_ok(VERIFY_WRITE, argp, size))
+			goto out;
+	}
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		if (!access_ok(VERIFY_READ, argp, size))
+			goto out;
+	}
+	rc = 0;
+
+	switch (cmd) {
+	case CM_IOCGSTATUS:
+		DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n");
+		{
+			int status;
+
+			/* clear other bits, but leave inserted & powered as
+			 * they are */
+			status = dev->flags0 & 3;
+			if (test_bit(IS_ATR_PRESENT, &dev->flags))
+				status |= CM_ATR_PRESENT;
+			if (test_bit(IS_ATR_VALID, &dev->flags))
+				status |= CM_ATR_VALID;
+			if (test_bit(IS_CMM_ABSENT, &dev->flags))
+				status |= CM_NO_READER;
+			if (test_bit(IS_BAD_CARD, &dev->flags))
+				status |= CM_BAD_CARD;
+			if (copy_to_user(argp, &status, sizeof(int)))
+				rc = -EFAULT;
+		}
+		break;
+	case CM_IOCGATR:
+		DEBUGP(4, dev, "... in CM_IOCGATR\n");
+		{
+			struct atreq __user *atreq = argp;
+			int tmp;
+			/* allow nonblocking io and being interrupted */
+			if (wait_event_interruptible
+			    (dev->atrq,
+			     ((filp->f_flags & O_NONBLOCK)
+			      || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
+				  != 0)))) {
+				if (filp->f_flags & O_NONBLOCK)
+					rc = -EAGAIN;
+				else
+					rc = -ERESTARTSYS;
+				break;
+			}
+
+			rc = -EFAULT;
+			if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
+				tmp = -1;
+				if (copy_to_user(&(atreq->atr_len), &tmp,
+						 sizeof(int)))
+					break;
+			} else {
+				if (copy_to_user(atreq->atr, dev->atr,
+						 dev->atr_len))
+					break;
+
+				tmp = dev->atr_len;
+				if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
+					break;
+			}
+			rc = 0;
+			break;
+		}
+	case CM_IOCARDOFF:
+
+#ifdef CM4000_DEBUG
+		DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
+		if (dev->flags0 & 0x01) {
+			DEBUGP(4, dev, "    Card inserted\n");
+		} else {
+			DEBUGP(2, dev, "    No card inserted\n");
+		}
+		if (dev->flags0 & 0x02) {
+			DEBUGP(4, dev, "    Card powered\n");
+		} else {
+			DEBUGP(2, dev, "    Card not powered\n");
+		}
+#endif
+
+		/* is a card inserted and powered? */
+		if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) {
+
+			/* get IO lock */
+			if (wait_event_interruptible
+			    (dev->ioq,
+			     ((filp->f_flags & O_NONBLOCK)
+			      || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
+				  == 0)))) {
+				if (filp->f_flags & O_NONBLOCK)
+					rc = -EAGAIN;
+				else
+					rc = -ERESTARTSYS;
+				break;
+			}
+			/* Set Flags0 = 0x42 */
+			DEBUGP(4, dev, "Set Flags0=0x42 \n");
+			xoutb(0x42, REG_FLAGS0(iobase));
+			clear_bit(IS_ATR_PRESENT, &dev->flags);
+			clear_bit(IS_ATR_VALID, &dev->flags);
+			dev->mstate = M_CARDOFF;
+			clear_bit(LOCK_IO, &dev->flags);
+			if (wait_event_interruptible
+			    (dev->atrq,
+			     ((filp->f_flags & O_NONBLOCK)
+			      || (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
+				  0)))) {
+				if (filp->f_flags & O_NONBLOCK)
+					rc = -EAGAIN;
+				else
+					rc = -ERESTARTSYS;
+				break;
+			}
+		}
+		/* release lock */
+		clear_bit(LOCK_IO, &dev->flags);
+		wake_up_interruptible(&dev->ioq);
+
+		rc = 0;
+		break;
+	case CM_IOCSPTS:
+		{
+			struct ptsreq krnptsreq;
+
+			if (copy_from_user(&krnptsreq, argp,
+					   sizeof(struct ptsreq))) {
+				rc = -EFAULT;
+				break;
+			}
+
+			rc = 0;
+			DEBUGP(4, dev, "... in CM_IOCSPTS\n");
+			/* wait for ATR to get valid */
+			if (wait_event_interruptible
+			    (dev->atrq,
+			     ((filp->f_flags & O_NONBLOCK)
+			      || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
+				  != 0)))) {
+				if (filp->f_flags & O_NONBLOCK)
+					rc = -EAGAIN;
+				else
+					rc = -ERESTARTSYS;
+				break;
+			}
+			/* get IO lock */
+			if (wait_event_interruptible
+			    (dev->ioq,
+			     ((filp->f_flags & O_NONBLOCK)
+			      || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
+				  == 0)))) {
+				if (filp->f_flags & O_NONBLOCK)
+					rc = -EAGAIN;
+				else
+					rc = -ERESTARTSYS;
+				break;
+			}
+
+			if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
+				/* auto power_on again */
+				dev->mstate = M_FETCH_ATR;
+				clear_bit(IS_ATR_VALID, &dev->flags);
+			}
+			/* release lock */
+			clear_bit(LOCK_IO, &dev->flags);
+			wake_up_interruptible(&dev->ioq);
+
+		}
+		break;
+#ifdef CM4000_DEBUG
+	case CM_IOSDBGLVL:
+		rc = -ENOTTY;
+		break;
+#endif
+	default:
+		DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
+		rc = -ENOTTY;
+	}
+out:
+	mutex_unlock(&cmm_mutex);
+	return rc;
+}
+
+static int cmm_open(struct inode *inode, struct file *filp)
+{
+	struct cm4000_dev *dev;
+	struct pcmcia_device *link;
+	int minor = iminor(inode);
+	int ret;
+
+	if (minor >= CM4000_MAX_DEV)
+		return -ENODEV;
+
+	mutex_lock(&cmm_mutex);
+	link = dev_table[minor];
+	if (link == NULL || !pcmcia_dev_present(link)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (link->open) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	dev = link->priv;
+	filp->private_data = dev;
+
+	DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n",
+	      imajor(inode), minor, current->comm, current->pid);
+
+	/* init device variables, they may be "polluted" after close
+	 * or, the device may never have been closed (i.e. open failed)
+	 */
+
+	ZERO_DEV(dev);
+
+	/* opening will always block since the
+	 * monitor will be started by open, which
+	 * means we have to wait for ATR becoming
+	 * valid = block until valid (or card
+	 * inserted)
+	 */
+	if (filp->f_flags & O_NONBLOCK) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	dev->mdelay = T_50MSEC;
+
+	/* start monitoring the cardstatus */
+	start_monitor(dev);
+
+	link->open = 1;		/* only one open per device */
+
+	DEBUGP(2, dev, "<- cmm_open\n");
+	ret = nonseekable_open(inode, filp);
+out:
+	mutex_unlock(&cmm_mutex);
+	return ret;
+}
+
+static int cmm_close(struct inode *inode, struct file *filp)
+{
+	struct cm4000_dev *dev;
+	struct pcmcia_device *link;
+	int minor = iminor(inode);
+
+	if (minor >= CM4000_MAX_DEV)
+		return -ENODEV;
+
+	link = dev_table[minor];
+	if (link == NULL)
+		return -ENODEV;
+
+	dev = link->priv;
+
+	DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n",
+	       imajor(inode), minor);
+
+	stop_monitor(dev);
+
+	ZERO_DEV(dev);
+
+	link->open = 0;		/* only one open per device */
+	wake_up(&dev->devq);	/* socket removed? */
+
+	DEBUGP(2, dev, "cmm_close\n");
+	return 0;
+}
+
+static void cmm_cm4000_release(struct pcmcia_device * link)
+{
+	struct cm4000_dev *dev = link->priv;
+
+	/* dont terminate the monitor, rather rely on
+	 * close doing that for us.
+	 */
+	DEBUGP(3, dev, "-> cmm_cm4000_release\n");
+	while (link->open) {
+		printk(KERN_INFO MODULE_NAME ": delaying release until "
+		       "process has terminated\n");
+		/* note: don't interrupt us:
+		 * close the applications which own
+		 * the devices _first_ !
+		 */
+		wait_event(dev->devq, (link->open == 0));
+	}
+	/* dev->devq=NULL;	this cannot be zeroed earlier */
+	DEBUGP(3, dev, "<- cmm_cm4000_release\n");
+	return;
+}
+
+/*==== Interface to PCMCIA Layer =======================================*/
+
+static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+	return pcmcia_request_io(p_dev);
+}
+
+static int cm4000_config(struct pcmcia_device * link, int devno)
+{
+	link->config_flags |= CONF_AUTO_SET_IO;
+
+	/* read the config-tuples */
+	if (pcmcia_loop_config(link, cm4000_config_check, NULL))
+		goto cs_release;
+
+	if (pcmcia_enable_device(link))
+		goto cs_release;
+
+	return 0;
+
+cs_release:
+	cm4000_release(link);
+	return -ENODEV;
+}
+
+static int cm4000_suspend(struct pcmcia_device *link)
+{
+	struct cm4000_dev *dev;
+
+	dev = link->priv;
+	stop_monitor(dev);
+
+	return 0;
+}
+
+static int cm4000_resume(struct pcmcia_device *link)
+{
+	struct cm4000_dev *dev;
+
+	dev = link->priv;
+	if (link->open)
+		start_monitor(dev);
+
+	return 0;
+}
+
+static void cm4000_release(struct pcmcia_device *link)
+{
+	cmm_cm4000_release(link);	/* delay release until device closed */
+	pcmcia_disable_device(link);
+}
+
+static int cm4000_probe(struct pcmcia_device *link)
+{
+	struct cm4000_dev *dev;
+	int i, ret;
+
+	for (i = 0; i < CM4000_MAX_DEV; i++)
+		if (dev_table[i] == NULL)
+			break;
+
+	if (i == CM4000_MAX_DEV) {
+		printk(KERN_NOTICE MODULE_NAME ": all devices in use\n");
+		return -ENODEV;
+	}
+
+	/* create a new cm4000_cs device */
+	dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	dev->p_dev = link;
+	link->priv = dev;
+	dev_table[i] = link;
+
+	init_waitqueue_head(&dev->devq);
+	init_waitqueue_head(&dev->ioq);
+	init_waitqueue_head(&dev->atrq);
+	init_waitqueue_head(&dev->readq);
+
+	ret = cm4000_config(link, i);
+	if (ret) {
+		dev_table[i] = NULL;
+		kfree(dev);
+		return ret;
+	}
+
+	device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i);
+
+	return 0;
+}
+
+static void cm4000_detach(struct pcmcia_device *link)
+{
+	struct cm4000_dev *dev = link->priv;
+	int devno;
+
+	/* find device */
+	for (devno = 0; devno < CM4000_MAX_DEV; devno++)
+		if (dev_table[devno] == link)
+			break;
+	if (devno == CM4000_MAX_DEV)
+		return;
+
+	stop_monitor(dev);
+
+	cm4000_release(link);
+
+	dev_table[devno] = NULL;
+	kfree(dev);
+
+	device_destroy(cmm_class, MKDEV(major, devno));
+
+	return;
+}
+
+static const struct file_operations cm4000_fops = {
+	.owner	= THIS_MODULE,
+	.read	= cmm_read,
+	.write	= cmm_write,
+	.unlocked_ioctl	= cmm_ioctl,
+	.open	= cmm_open,
+	.release= cmm_close,
+	.llseek = no_llseek,
+};
+
+static const struct pcmcia_device_id cm4000_ids[] = {
+	PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002),
+	PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39),
+	PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, cm4000_ids);
+
+static struct pcmcia_driver cm4000_driver = {
+	.owner	  = THIS_MODULE,
+	.name	  = "cm4000_cs",
+	.probe    = cm4000_probe,
+	.remove   = cm4000_detach,
+	.suspend  = cm4000_suspend,
+	.resume   = cm4000_resume,
+	.id_table = cm4000_ids,
+};
+
+static int __init cmm_init(void)
+{
+	int rc;
+
+	cmm_class = class_create(THIS_MODULE, "cardman_4000");
+	if (IS_ERR(cmm_class))
+		return PTR_ERR(cmm_class);
+
+	major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
+	if (major < 0) {
+		printk(KERN_WARNING MODULE_NAME
+			": could not get major number\n");
+		class_destroy(cmm_class);
+		return major;
+	}
+
+	rc = pcmcia_register_driver(&cm4000_driver);
+	if (rc < 0) {
+		unregister_chrdev(major, DEVICE_NAME);
+		class_destroy(cmm_class);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit cmm_exit(void)
+{
+	pcmcia_unregister_driver(&cm4000_driver);
+	unregister_chrdev(major, DEVICE_NAME);
+	class_destroy(cmm_class);
+};
+
+module_init(cmm_init);
+module_exit(cmm_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
new file mode 100644
index 0000000..f809654
--- /dev/null
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -0,0 +1,685 @@
+/*
+ * A driver for the Omnikey PCMCIA smartcard reader CardMan 4040
+ *
+ * (c) 2000-2004 Omnikey AG (http://www.omnikey.com/)
+ *
+ * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
+ * 	- add support for poll()
+ * 	- driver cleanup
+ * 	- add waitqueues
+ * 	- adhere to linux kernel coding style and policies
+ * 	- support 2.6.13 "new style" pcmcia interface
+ * 	- add class interface for udev device creation
+ *
+ * The device basically is a USB CCID compliant device that has been
+ * attached to an I/O-Mapped FIFO.
+ *
+ * All rights reserved, Dual BSD/GPL Licensed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include "cm4040_cs.h"
+
+
+#define reader_to_dev(x)	(&x->p_dev->dev)
+
+/* n (debug level) is ignored */
+/* additional debug output may be enabled by re-compiling with
+ * CM4040_DEBUG set */
+/* #define CM4040_DEBUG */
+#define DEBUGP(n, rdr, x, args...) do { 		\
+		dev_dbg(reader_to_dev(rdr), "%s:" x, 	\
+			   __func__ , ## args);		\
+	} while (0)
+
+static DEFINE_MUTEX(cm4040_mutex);
+
+#define	CCID_DRIVER_BULK_DEFAULT_TIMEOUT  	(150*HZ)
+#define	CCID_DRIVER_ASYNC_POWERUP_TIMEOUT 	(35*HZ)
+#define	CCID_DRIVER_MINIMUM_TIMEOUT 		(3*HZ)
+#define READ_WRITE_BUFFER_SIZE 512
+#define POLL_LOOP_COUNT				1000
+
+/* how often to poll for fifo status change */
+#define POLL_PERIOD 				msecs_to_jiffies(10)
+
+static void reader_release(struct pcmcia_device *link);
+
+static int major;
+static struct class *cmx_class;
+
+#define		BS_READABLE	0x01
+#define		BS_WRITABLE	0x02
+
+struct reader_dev {
+	struct pcmcia_device	*p_dev;
+	wait_queue_head_t	devq;
+	wait_queue_head_t	poll_wait;
+	wait_queue_head_t	read_wait;
+	wait_queue_head_t	write_wait;
+	unsigned long 	  	buffer_status;
+	unsigned long     	timeout;
+	unsigned char     	s_buf[READ_WRITE_BUFFER_SIZE];
+	unsigned char     	r_buf[READ_WRITE_BUFFER_SIZE];
+	struct timer_list 	poll_timer;
+};
+
+static struct pcmcia_device *dev_table[CM_MAX_DEV];
+
+#ifndef CM4040_DEBUG
+#define	xoutb	outb
+#define	xinb	inb
+#else
+static inline void xoutb(unsigned char val, unsigned short port)
+{
+	pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
+	outb(val, port);
+}
+
+static inline unsigned char xinb(unsigned short port)
+{
+	unsigned char val;
+
+	val = inb(port);
+	pr_debug("%.2x=inb(%.4x)\n", val, port);
+	return val;
+}
+#endif
+
+/* poll the device fifo status register.  not to be confused with
+ * the poll syscall. */
+static void cm4040_do_poll(struct timer_list *t)
+{
+	struct reader_dev *dev = from_timer(dev, t, poll_timer);
+	unsigned int obs = xinb(dev->p_dev->resource[0]->start
+				+ REG_OFFSET_BUFFER_STATUS);
+
+	if ((obs & BSR_BULK_IN_FULL)) {
+		set_bit(BS_READABLE, &dev->buffer_status);
+		DEBUGP(4, dev, "waking up read_wait\n");
+		wake_up_interruptible(&dev->read_wait);
+	} else
+		clear_bit(BS_READABLE, &dev->buffer_status);
+
+	if (!(obs & BSR_BULK_OUT_FULL)) {
+		set_bit(BS_WRITABLE, &dev->buffer_status);
+		DEBUGP(4, dev, "waking up write_wait\n");
+		wake_up_interruptible(&dev->write_wait);
+	} else
+		clear_bit(BS_WRITABLE, &dev->buffer_status);
+
+	if (dev->buffer_status)
+		wake_up_interruptible(&dev->poll_wait);
+
+	mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
+}
+
+static void cm4040_stop_poll(struct reader_dev *dev)
+{
+	del_timer_sync(&dev->poll_timer);
+}
+
+static int wait_for_bulk_out_ready(struct reader_dev *dev)
+{
+	int i, rc;
+	int iobase = dev->p_dev->resource[0]->start;
+
+	for (i = 0; i < POLL_LOOP_COUNT; i++) {
+		if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
+		    & BSR_BULK_OUT_FULL) == 0) {
+			DEBUGP(4, dev, "BulkOut empty (i=%d)\n", i);
+			return 1;
+		}
+	}
+
+	DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
+		dev->timeout);
+	rc = wait_event_interruptible_timeout(dev->write_wait,
+					      test_and_clear_bit(BS_WRITABLE,
+						       &dev->buffer_status),
+					      dev->timeout);
+
+	if (rc > 0)
+		DEBUGP(4, dev, "woke up: BulkOut empty\n");
+	else if (rc == 0)
+		DEBUGP(4, dev, "woke up: BulkOut full, returning 0 :(\n");
+	else if (rc < 0)
+		DEBUGP(4, dev, "woke up: signal arrived\n");
+
+	return rc;
+}
+
+/* Write to Sync Control Register */
+static int write_sync_reg(unsigned char val, struct reader_dev *dev)
+{
+	int iobase = dev->p_dev->resource[0]->start;
+	int rc;
+
+	rc = wait_for_bulk_out_ready(dev);
+	if (rc <= 0)
+		return rc;
+
+	xoutb(val, iobase + REG_OFFSET_SYNC_CONTROL);
+	rc = wait_for_bulk_out_ready(dev);
+	if (rc <= 0)
+		return rc;
+
+	return 1;
+}
+
+static int wait_for_bulk_in_ready(struct reader_dev *dev)
+{
+	int i, rc;
+	int iobase = dev->p_dev->resource[0]->start;
+
+	for (i = 0; i < POLL_LOOP_COUNT; i++) {
+		if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
+		    & BSR_BULK_IN_FULL) == BSR_BULK_IN_FULL) {
+			DEBUGP(3, dev, "BulkIn full (i=%d)\n", i);
+			return 1;
+		}
+	}
+
+	DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
+		dev->timeout);
+	rc = wait_event_interruptible_timeout(dev->read_wait,
+					      test_and_clear_bit(BS_READABLE,
+						 	&dev->buffer_status),
+					      dev->timeout);
+	if (rc > 0)
+		DEBUGP(4, dev, "woke up: BulkIn full\n");
+	else if (rc == 0)
+		DEBUGP(4, dev, "woke up: BulkIn not full, returning 0 :(\n");
+	else if (rc < 0)
+		DEBUGP(4, dev, "woke up: signal arrived\n");
+
+	return rc;
+}
+
+static ssize_t cm4040_read(struct file *filp, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct reader_dev *dev = filp->private_data;
+	int iobase = dev->p_dev->resource[0]->start;
+	size_t bytes_to_read;
+	unsigned long i;
+	size_t min_bytes_to_read;
+	int rc;
+	unsigned char uc;
+
+	DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid);
+
+	if (count == 0)
+		return 0;
+
+	if (count < 10)
+		return -EFAULT;
+
+	if (filp->f_flags & O_NONBLOCK) {
+		DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+		DEBUGP(2, dev, "<- cm4040_read (failure)\n");
+		return -EAGAIN;
+	}
+
+	if (!pcmcia_dev_present(dev->p_dev))
+		return -ENODEV;
+
+	for (i = 0; i < 5; i++) {
+		rc = wait_for_bulk_in_ready(dev);
+		if (rc <= 0) {
+			DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+			DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+			if (rc == -ERESTARTSYS)
+				return rc;
+			return -EIO;
+		}
+	  	dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
+#ifdef CM4040_DEBUG
+		pr_debug("%lu:%2x ", i, dev->r_buf[i]);
+	}
+	pr_debug("\n");
+#else
+	}
+#endif
+
+	bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);
+
+	DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);
+
+	min_bytes_to_read = min(count, bytes_to_read + 5);
+	min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
+
+	DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);
+
+	for (i = 0; i < (min_bytes_to_read-5); i++) {
+		rc = wait_for_bulk_in_ready(dev);
+		if (rc <= 0) {
+			DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+			DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+			if (rc == -ERESTARTSYS)
+				return rc;
+			return -EIO;
+		}
+		dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN);
+#ifdef CM4040_DEBUG
+		pr_debug("%lu:%2x ", i, dev->r_buf[i]);
+	}
+	pr_debug("\n");
+#else
+	}
+#endif
+
+	*ppos = min_bytes_to_read;
+	if (copy_to_user(buf, dev->r_buf, min_bytes_to_read))
+		return -EFAULT;
+
+	rc = wait_for_bulk_in_ready(dev);
+	if (rc <= 0) {
+		DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+		DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+		if (rc == -ERESTARTSYS)
+			return rc;
+		return -EIO;
+	}
+
+	rc = write_sync_reg(SCR_READER_TO_HOST_DONE, dev);
+	if (rc <= 0) {
+		DEBUGP(5, dev, "write_sync_reg c=%.2x\n", rc);
+		DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+		if (rc == -ERESTARTSYS)
+			return rc;
+		else
+			return -EIO;
+	}
+
+	uc = xinb(iobase + REG_OFFSET_BULK_IN);
+
+	DEBUGP(2, dev, "<- cm4040_read (successfully)\n");
+	return min_bytes_to_read;
+}
+
+static ssize_t cm4040_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	struct reader_dev *dev = filp->private_data;
+	int iobase = dev->p_dev->resource[0]->start;
+	ssize_t rc;
+	int i;
+	unsigned int bytes_to_write;
+
+	DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid);
+
+	if (count == 0) {
+		DEBUGP(2, dev, "<- cm4040_write empty read (successfully)\n");
+		return 0;
+	}
+
+	if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
+		DEBUGP(2, dev, "<- cm4040_write buffersize=%zd < 5\n", count);
+		return -EIO;
+	}
+
+	if (filp->f_flags & O_NONBLOCK) {
+		DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+		DEBUGP(4, dev, "<- cm4040_write (failure)\n");
+		return -EAGAIN;
+	}
+
+	if (!pcmcia_dev_present(dev->p_dev))
+		return -ENODEV;
+
+	bytes_to_write = count;
+	if (copy_from_user(dev->s_buf, buf, bytes_to_write))
+		return -EFAULT;
+
+	switch (dev->s_buf[0]) {
+		case CMD_PC_TO_RDR_XFRBLOCK:
+		case CMD_PC_TO_RDR_SECURE:
+		case CMD_PC_TO_RDR_TEST_SECURE:
+		case CMD_PC_TO_RDR_OK_SECURE:
+			dev->timeout = CCID_DRIVER_BULK_DEFAULT_TIMEOUT;
+			break;
+
+		case CMD_PC_TO_RDR_ICCPOWERON:
+			dev->timeout = CCID_DRIVER_ASYNC_POWERUP_TIMEOUT;
+			break;
+
+		case CMD_PC_TO_RDR_GETSLOTSTATUS:
+		case CMD_PC_TO_RDR_ICCPOWEROFF:
+		case CMD_PC_TO_RDR_GETPARAMETERS:
+		case CMD_PC_TO_RDR_RESETPARAMETERS:
+		case CMD_PC_TO_RDR_SETPARAMETERS:
+		case CMD_PC_TO_RDR_ESCAPE:
+		case CMD_PC_TO_RDR_ICCCLOCK:
+		default:
+			dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
+			break;
+	}
+
+	rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
+	if (rc <= 0) {
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
+		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+		if (rc == -ERESTARTSYS)
+			return rc;
+		else
+			return -EIO;
+	}
+
+	DEBUGP(4, dev, "start \n");
+
+	for (i = 0; i < bytes_to_write; i++) {
+		rc = wait_for_bulk_out_ready(dev);
+		if (rc <= 0) {
+			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
+			       rc);
+			DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+			if (rc == -ERESTARTSYS)
+				return rc;
+			else
+				return -EIO;
+		}
+
+		xoutb(dev->s_buf[i],iobase + REG_OFFSET_BULK_OUT);
+	}
+	DEBUGP(4, dev, "end\n");
+
+	rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
+
+	if (rc <= 0) {
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
+		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+		if (rc == -ERESTARTSYS)
+			return rc;
+		else
+			return -EIO;
+	}
+
+	DEBUGP(2, dev, "<- cm4040_write (successfully)\n");
+	return count;
+}
+
+static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
+{
+	struct reader_dev *dev = filp->private_data;
+	__poll_t mask = 0;
+
+	poll_wait(filp, &dev->poll_wait, wait);
+
+	if (test_and_clear_bit(BS_READABLE, &dev->buffer_status))
+		mask |= EPOLLIN | EPOLLRDNORM;
+	if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status))
+		mask |= EPOLLOUT | EPOLLWRNORM;
+
+	DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask);
+
+	return mask;
+}
+
+static int cm4040_open(struct inode *inode, struct file *filp)
+{
+	struct reader_dev *dev;
+	struct pcmcia_device *link;
+	int minor = iminor(inode);
+	int ret;
+
+	if (minor >= CM_MAX_DEV)
+		return -ENODEV;
+
+	mutex_lock(&cm4040_mutex);
+	link = dev_table[minor];
+	if (link == NULL || !pcmcia_dev_present(link)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (link->open) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	dev = link->priv;
+	filp->private_data = dev;
+
+	if (filp->f_flags & O_NONBLOCK) {
+		DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	link->open = 1;
+
+	mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
+
+	DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
+	ret = nonseekable_open(inode, filp);
+out:
+	mutex_unlock(&cm4040_mutex);
+	return ret;
+}
+
+static int cm4040_close(struct inode *inode, struct file *filp)
+{
+	struct reader_dev *dev = filp->private_data;
+	struct pcmcia_device *link;
+	int minor = iminor(inode);
+
+	DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode),
+	      iminor(inode));
+
+	if (minor >= CM_MAX_DEV)
+		return -ENODEV;
+
+	link = dev_table[minor];
+	if (link == NULL)
+		return -ENODEV;
+
+	cm4040_stop_poll(dev);
+
+	link->open = 0;
+	wake_up(&dev->devq);
+
+	DEBUGP(2, dev, "<- cm4040_close\n");
+	return 0;
+}
+
+static void cm4040_reader_release(struct pcmcia_device *link)
+{
+	struct reader_dev *dev = link->priv;
+
+	DEBUGP(3, dev, "-> cm4040_reader_release\n");
+	while (link->open) {
+		DEBUGP(3, dev, KERN_INFO MODULE_NAME ": delaying release "
+		       "until process has terminated\n");
+ 		wait_event(dev->devq, (link->open == 0));
+	}
+	DEBUGP(3, dev, "<- cm4040_reader_release\n");
+	return;
+}
+
+static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+	return pcmcia_request_io(p_dev);
+}
+
+
+static int reader_config(struct pcmcia_device *link, int devno)
+{
+	struct reader_dev *dev;
+	int fail_rc;
+
+	link->config_flags |= CONF_AUTO_SET_IO;
+
+	if (pcmcia_loop_config(link, cm4040_config_check, NULL))
+		goto cs_release;
+
+	fail_rc = pcmcia_enable_device(link);
+	if (fail_rc != 0) {
+		dev_info(&link->dev, "pcmcia_enable_device failed 0x%x\n",
+			 fail_rc);
+		goto cs_release;
+	}
+
+	dev = link->priv;
+
+	DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno,
+	      link->resource[0]);
+	DEBUGP(2, dev, "<- reader_config (succ)\n");
+
+	return 0;
+
+cs_release:
+	reader_release(link);
+	return -ENODEV;
+}
+
+static void reader_release(struct pcmcia_device *link)
+{
+	cm4040_reader_release(link);
+	pcmcia_disable_device(link);
+}
+
+static int reader_probe(struct pcmcia_device *link)
+{
+	struct reader_dev *dev;
+	int i, ret;
+
+	for (i = 0; i < CM_MAX_DEV; i++) {
+		if (dev_table[i] == NULL)
+			break;
+	}
+
+	if (i == CM_MAX_DEV)
+		return -ENODEV;
+
+	dev = kzalloc(sizeof(struct reader_dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
+	dev->buffer_status = 0;
+
+	link->priv = dev;
+	dev->p_dev = link;
+
+	dev_table[i] = link;
+
+	init_waitqueue_head(&dev->devq);
+	init_waitqueue_head(&dev->poll_wait);
+	init_waitqueue_head(&dev->read_wait);
+	init_waitqueue_head(&dev->write_wait);
+	timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
+
+	ret = reader_config(link, i);
+	if (ret) {
+		dev_table[i] = NULL;
+		kfree(dev);
+		return ret;
+	}
+
+	device_create(cmx_class, NULL, MKDEV(major, i), NULL, "cmx%d", i);
+
+	return 0;
+}
+
+static void reader_detach(struct pcmcia_device *link)
+{
+	struct reader_dev *dev = link->priv;
+	int devno;
+
+	/* find device */
+	for (devno = 0; devno < CM_MAX_DEV; devno++) {
+		if (dev_table[devno] == link)
+			break;
+	}
+	if (devno == CM_MAX_DEV)
+		return;
+
+	reader_release(link);
+
+	dev_table[devno] = NULL;
+	kfree(dev);
+
+	device_destroy(cmx_class, MKDEV(major, devno));
+
+	return;
+}
+
+static const struct file_operations reader_fops = {
+	.owner		= THIS_MODULE,
+	.read		= cm4040_read,
+	.write		= cm4040_write,
+	.open		= cm4040_open,
+	.release	= cm4040_close,
+	.poll		= cm4040_poll,
+	.llseek		= no_llseek,
+};
+
+static const struct pcmcia_device_id cm4040_ids[] = {
+	PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200),
+	PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040",
+				0xE32CDD8C, 0x8F23318B),
+	PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, cm4040_ids);
+
+static struct pcmcia_driver reader_driver = {
+  	.owner		= THIS_MODULE,
+	.name		= "cm4040_cs",
+	.probe		= reader_probe,
+	.remove		= reader_detach,
+	.id_table	= cm4040_ids,
+};
+
+static int __init cm4040_init(void)
+{
+	int rc;
+
+	cmx_class = class_create(THIS_MODULE, "cardman_4040");
+	if (IS_ERR(cmx_class))
+		return PTR_ERR(cmx_class);
+
+	major = register_chrdev(0, DEVICE_NAME, &reader_fops);
+	if (major < 0) {
+		printk(KERN_WARNING MODULE_NAME
+			": could not get major number\n");
+		class_destroy(cmx_class);
+		return major;
+	}
+
+	rc = pcmcia_register_driver(&reader_driver);
+	if (rc < 0) {
+		unregister_chrdev(major, DEVICE_NAME);
+		class_destroy(cmx_class);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit cm4040_exit(void)
+{
+	pcmcia_unregister_driver(&reader_driver);
+	unregister_chrdev(major, DEVICE_NAME);
+	class_destroy(cmx_class);
+}
+
+module_init(cm4040_init);
+module_exit(cm4040_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.h b/drivers/char/pcmcia/cm4040_cs.h
new file mode 100644
index 0000000..e2ffff9
--- /dev/null
+++ b/drivers/char/pcmcia/cm4040_cs.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef	_CM4040_H_
+#define	_CM4040_H_
+
+#define	CM_MAX_DEV		4
+
+#define	DEVICE_NAME		"cmx"
+#define	MODULE_NAME		"cm4040_cs"
+
+#define REG_OFFSET_BULK_OUT      0
+#define REG_OFFSET_BULK_IN       0
+#define REG_OFFSET_BUFFER_STATUS 1
+#define REG_OFFSET_SYNC_CONTROL  2
+
+#define BSR_BULK_IN_FULL  0x02
+#define BSR_BULK_OUT_FULL 0x01
+
+#define SCR_HOST_TO_READER_START 0x80
+#define SCR_ABORT                0x40
+#define SCR_EN_NOTIFY            0x20
+#define SCR_ACK_NOTIFY           0x10
+#define SCR_READER_TO_HOST_DONE  0x08
+#define SCR_HOST_TO_READER_DONE  0x04
+#define SCR_PULSE_INTERRUPT      0x02
+#define SCR_POWER_DOWN           0x01
+
+
+#define  CMD_PC_TO_RDR_ICCPOWERON       0x62
+#define  CMD_PC_TO_RDR_GETSLOTSTATUS    0x65
+#define  CMD_PC_TO_RDR_ICCPOWEROFF      0x63
+#define  CMD_PC_TO_RDR_SECURE           0x69
+#define  CMD_PC_TO_RDR_GETPARAMETERS    0x6C
+#define  CMD_PC_TO_RDR_RESETPARAMETERS  0x6D
+#define  CMD_PC_TO_RDR_SETPARAMETERS    0x61
+#define  CMD_PC_TO_RDR_XFRBLOCK         0x6F
+#define  CMD_PC_TO_RDR_ESCAPE           0x6B
+#define  CMD_PC_TO_RDR_ICCCLOCK         0x6E
+#define  CMD_PC_TO_RDR_TEST_SECURE      0x74
+#define  CMD_PC_TO_RDR_OK_SECURE        0x89
+
+
+#define  CMD_RDR_TO_PC_SLOTSTATUS         0x81
+#define  CMD_RDR_TO_PC_DATABLOCK          0x80
+#define  CMD_RDR_TO_PC_PARAMETERS         0x82
+#define  CMD_RDR_TO_PC_ESCAPE             0x83
+#define  CMD_RDR_TO_PC_OK_SECURE          0x89
+
+#endif	/* _CM4040_H_ */
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
new file mode 100644
index 0000000..f6b43d9
--- /dev/null
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -0,0 +1,373 @@
+/*
+ * SCR24x PCMCIA Smart Card Reader Driver
+ *
+ * Copyright (C) 2005-2006 TL Sudheendran
+ * Copyright (C) 2016 Lubomir Rintel
+ *
+ * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#define CCID_HEADER_SIZE	10
+#define CCID_LENGTH_OFFSET	1
+#define CCID_MAX_LEN		271
+
+#define SCR24X_DATA(n)		(1 + n)
+#define SCR24X_CMD_STATUS	7
+#define CMD_START		0x40
+#define CMD_WRITE_BYTE		0x41
+#define CMD_READ_BYTE		0x42
+#define STATUS_BUSY		0x80
+
+struct scr24x_dev {
+	struct device *dev;
+	struct cdev c_dev;
+	unsigned char buf[CCID_MAX_LEN];
+	int devno;
+	struct mutex lock;
+	struct kref refcnt;
+	u8 __iomem *regs;
+};
+
+#define SCR24X_DEVS 8
+static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
+
+static struct class *scr24x_class;
+static dev_t scr24x_devt;
+
+static void scr24x_delete(struct kref *kref)
+{
+	struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
+								refcnt);
+
+	kfree(dev);
+}
+
+static int scr24x_wait_ready(struct scr24x_dev *dev)
+{
+	u_char status;
+	int timeout = 100;
+
+	do {
+		status = ioread8(dev->regs + SCR24X_CMD_STATUS);
+		if (!(status & STATUS_BUSY))
+			return 0;
+
+		msleep(20);
+	} while (--timeout);
+
+	return -EIO;
+}
+
+static int scr24x_open(struct inode *inode, struct file *filp)
+{
+	struct scr24x_dev *dev = container_of(inode->i_cdev,
+				struct scr24x_dev, c_dev);
+
+	kref_get(&dev->refcnt);
+	filp->private_data = dev;
+
+	return nonseekable_open(inode, filp);
+}
+
+static int scr24x_release(struct inode *inode, struct file *filp)
+{
+	struct scr24x_dev *dev = filp->private_data;
+
+	/* We must not take the dev->lock here as scr24x_delete()
+	 * might be called to remove the dev structure altogether.
+	 * We don't need the lock anyway, since after the reference
+	 * acquired in probe() is released in remove() the chrdev
+	 * is already unregistered and noone can possibly acquire
+	 * a reference via open() anymore. */
+	kref_put(&dev->refcnt, scr24x_delete);
+	return 0;
+}
+
+static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
+{
+	size_t i, y;
+	int ret;
+
+	for (i = offset; i < limit; i += 5) {
+		iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
+		ret = scr24x_wait_ready(dev);
+		if (ret < 0)
+			return ret;
+
+		for (y = 0; y < 5 && i + y < limit; y++)
+			dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
+	}
+
+	return 0;
+}
+
+static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
+								loff_t *ppos)
+{
+	struct scr24x_dev *dev = filp->private_data;
+	int ret;
+	int len;
+
+	if (count < CCID_HEADER_SIZE)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&dev->lock))
+		return -ERESTARTSYS;
+
+	if (!dev->dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+	len = CCID_HEADER_SIZE;
+	ret = read_chunk(dev, 0, len);
+	if (ret < 0)
+		goto out;
+
+	len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
+	if (len > sizeof(dev->buf)) {
+		ret = -EIO;
+		goto out;
+	}
+	ret = read_chunk(dev, CCID_HEADER_SIZE, len);
+	if (ret < 0)
+		goto out;
+
+	if (len < count)
+		count = len;
+
+	if (copy_to_user(buf, dev->buf, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static ssize_t scr24x_write(struct file *filp, const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	struct scr24x_dev *dev = filp->private_data;
+	size_t i, y;
+	int ret;
+
+	if (mutex_lock_interruptible(&dev->lock))
+		return -ERESTARTSYS;
+
+	if (!dev->dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > sizeof(dev->buf)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (copy_from_user(dev->buf, buf, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+
+	iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+
+	for (i = 0; i < count; i += 5) {
+		for (y = 0; y < 5 && i + y < count; y++)
+			iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
+
+		iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
+		ret = scr24x_wait_ready(dev);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static const struct file_operations scr24x_fops = {
+	.owner		= THIS_MODULE,
+	.read		= scr24x_read,
+	.write		= scr24x_write,
+	.open		= scr24x_open,
+	.release	= scr24x_release,
+	.llseek		= no_llseek,
+};
+
+static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
+{
+	if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
+		return -ENODEV;
+	return pcmcia_request_io(link);
+}
+
+static int scr24x_probe(struct pcmcia_device *link)
+{
+	struct scr24x_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
+	if (dev->devno >= SCR24X_DEVS) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	mutex_init(&dev->lock);
+	kref_init(&dev->refcnt);
+
+	link->priv = dev;
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+	ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
+	if (ret < 0)
+		goto err;
+
+	dev->dev = &link->dev;
+	dev->regs = devm_ioport_map(&link->dev,
+				link->resource[PCMCIA_IOPORT_0]->start,
+				resource_size(link->resource[PCMCIA_IOPORT_0]));
+	if (!dev->regs) {
+		ret = -EIO;
+		goto err;
+	}
+
+	cdev_init(&dev->c_dev, &scr24x_fops);
+	dev->c_dev.owner = THIS_MODULE;
+	dev->c_dev.ops = &scr24x_fops;
+	ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
+	if (ret < 0)
+		goto err;
+
+	ret = pcmcia_enable_device(link);
+	if (ret < 0) {
+		pcmcia_disable_device(link);
+		goto err;
+	}
+
+	device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
+		      NULL, "scr24x%d", dev->devno);
+
+	dev_info(&link->dev, "SCR24x Chip Card Interface\n");
+	return 0;
+
+err:
+	if (dev->devno < SCR24X_DEVS)
+		clear_bit(dev->devno, scr24x_minors);
+	kfree (dev);
+	return ret;
+}
+
+static void scr24x_remove(struct pcmcia_device *link)
+{
+	struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
+
+	device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
+	mutex_lock(&dev->lock);
+	pcmcia_disable_device(link);
+	cdev_del(&dev->c_dev);
+	clear_bit(dev->devno, scr24x_minors);
+	dev->dev = NULL;
+	mutex_unlock(&dev->lock);
+
+	kref_put(&dev->refcnt, scr24x_delete);
+}
+
+static const struct pcmcia_device_id scr24x_ids[] = {
+	PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
+					0x53cb94f9, 0xbfdf89a5),
+	PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
+	PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
+	PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
+	PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
+
+static struct pcmcia_driver scr24x_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "scr24x_cs",
+	.probe		= scr24x_probe,
+	.remove		= scr24x_remove,
+	.id_table	= scr24x_ids,
+};
+
+static int __init scr24x_init(void)
+{
+	int ret;
+
+	scr24x_class = class_create(THIS_MODULE, "scr24x");
+	if (IS_ERR(scr24x_class))
+		return PTR_ERR(scr24x_class);
+
+	ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
+	if (ret < 0)  {
+		class_destroy(scr24x_class);
+		return ret;
+	}
+
+	ret = pcmcia_register_driver(&scr24x_driver);
+	if (ret < 0) {
+		unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+		class_destroy(scr24x_class);
+	}
+
+	return ret;
+}
+
+static void __exit scr24x_exit(void)
+{
+	pcmcia_unregister_driver(&scr24x_driver);
+	unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+	class_destroy(scr24x_class);
+}
+
+module_init(scr24x_init);
+module_exit(scr24x_exit);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
new file mode 100644
index 0000000..66b0419
--- /dev/null
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -0,0 +1,4305 @@
+/*
+ * linux/drivers/char/pcmcia/synclink_cs.c
+ *
+ * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $
+ *
+ * Device driver for Microgate SyncLink PC Card
+ * multiprotocol serial adapter.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
+#if defined(__i386__)
+#  define BREAKPOINT() asm("   int $3");
+#else
+#  define BREAKPOINT() { }
+#endif
+
+#define MAX_DEVICE_COUNT 4
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioctl.h>
+#include <linux/synclink.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
+#endif
+
+#define GET_USER(error,value,addr) error = get_user(value,addr)
+#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
+#define PUT_USER(error,value,addr) error = put_user(value,addr)
+#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
+
+#include <linux/uaccess.h>
+
+static MGSL_PARAMS default_params = {
+	MGSL_MODE_HDLC,			/* unsigned long mode */
+	0,				/* unsigned char loopback; */
+	HDLC_FLAG_UNDERRUN_ABORT15,	/* unsigned short flags; */
+	HDLC_ENCODING_NRZI_SPACE,	/* unsigned char encoding; */
+	0,				/* unsigned long clock_speed; */
+	0xff,				/* unsigned char addr_filter; */
+	HDLC_CRC_16_CCITT,		/* unsigned short crc_type; */
+	HDLC_PREAMBLE_LENGTH_8BITS,	/* unsigned char preamble_length; */
+	HDLC_PREAMBLE_PATTERN_NONE,	/* unsigned char preamble; */
+	9600,				/* unsigned long data_rate; */
+	8,				/* unsigned char data_bits; */
+	1,				/* unsigned char stop_bits; */
+	ASYNC_PARITY_NONE		/* unsigned char parity; */
+};
+
+typedef struct {
+	int count;
+	unsigned char status;
+	char data[1];
+} RXBUF;
+
+/* The queue of BH actions to be performed */
+
+#define BH_RECEIVE  1
+#define BH_TRANSMIT 2
+#define BH_STATUS   4
+
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+struct _input_signal_events {
+	int	ri_up;
+	int	ri_down;
+	int	dsr_up;
+	int	dsr_down;
+	int	dcd_up;
+	int	dcd_down;
+	int	cts_up;
+	int	cts_down;
+};
+
+
+/*
+ * Device instance data structure
+ */
+
+typedef struct _mgslpc_info {
+	struct tty_port		port;
+	void *if_ptr;	/* General purpose pointer (used by SPPP) */
+	int			magic;
+	int			line;
+
+	struct mgsl_icount	icount;
+
+	int			timeout;
+	int			x_char;		/* xon/xoff character */
+	unsigned char		read_status_mask;
+	unsigned char		ignore_status_mask;
+
+	unsigned char *tx_buf;
+	int            tx_put;
+	int            tx_get;
+	int            tx_count;
+
+	/* circular list of fixed length rx buffers */
+
+	unsigned char  *rx_buf;        /* memory allocated for all rx buffers */
+	int            rx_buf_total_size; /* size of memory allocated for rx buffers */
+	int            rx_put;         /* index of next empty rx buffer */
+	int            rx_get;         /* index of next full rx buffer */
+	int            rx_buf_size;    /* size in bytes of single rx buffer */
+	int            rx_buf_count;   /* total number of rx buffers */
+	int            rx_frame_count; /* number of full rx buffers */
+
+	wait_queue_head_t	status_event_wait_q;
+	wait_queue_head_t	event_wait_q;
+	struct timer_list	tx_timer;	/* HDLC transmit timeout timer */
+	struct _mgslpc_info	*next_device;	/* device list link */
+
+	unsigned short imra_value;
+	unsigned short imrb_value;
+	unsigned char  pim_value;
+
+	spinlock_t lock;
+	struct work_struct task;		/* task structure for scheduling bh */
+
+	u32 max_frame_size;
+
+	u32 pending_bh;
+
+	bool bh_running;
+	bool bh_requested;
+
+	int dcd_chkcount; /* check counts to prevent */
+	int cts_chkcount; /* too many IRQs if a signal */
+	int dsr_chkcount; /* is floating */
+	int ri_chkcount;
+
+	bool rx_enabled;
+	bool rx_overflow;
+
+	bool tx_enabled;
+	bool tx_active;
+	bool tx_aborting;
+	u32 idle_mode;
+
+	int if_mode; /* serial interface selection (RS-232, v.35 etc) */
+
+	char device_name[25];		/* device instance name */
+
+	unsigned int io_base;	/* base I/O address of adapter */
+	unsigned int irq_level;
+
+	MGSL_PARAMS params;		/* communications parameters */
+
+	unsigned char serial_signals;	/* current serial signal states */
+
+	bool irq_occurred;		/* for diagnostics use */
+	char testing_irq;
+	unsigned int init_error;	/* startup error (DIAGS)	*/
+
+	char *flag_buf;
+	bool drop_rts_on_tx_done;
+
+	struct	_input_signal_events	input_signal_events;
+
+	/* PCMCIA support */
+	struct pcmcia_device	*p_dev;
+	int		      stop;
+
+	/* SPPP/Cisco HDLC device parts */
+	int netcount;
+	spinlock_t netlock;
+
+#if SYNCLINK_GENERIC_HDLC
+	struct net_device *netdev;
+#endif
+
+} MGSLPC_INFO;
+
+#define MGSLPC_MAGIC 0x5402
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+#define TXBUFSIZE 4096
+
+
+#define CHA     0x00   /* channel A offset */
+#define CHB     0x40   /* channel B offset */
+
+/*
+ *  FIXME: PPC has PVR defined in asm/reg.h.  For now we just undef it.
+ */
+#undef PVR
+
+#define RXFIFO  0
+#define TXFIFO  0
+#define STAR    0x20
+#define CMDR    0x20
+#define RSTA    0x21
+#define PRE     0x21
+#define MODE    0x22
+#define TIMR    0x23
+#define XAD1    0x24
+#define XAD2    0x25
+#define RAH1    0x26
+#define RAH2    0x27
+#define DAFO    0x27
+#define RAL1    0x28
+#define RFC     0x28
+#define RHCR    0x29
+#define RAL2    0x29
+#define RBCL    0x2a
+#define XBCL    0x2a
+#define RBCH    0x2b
+#define XBCH    0x2b
+#define CCR0    0x2c
+#define CCR1    0x2d
+#define CCR2    0x2e
+#define CCR3    0x2f
+#define VSTR    0x34
+#define BGR     0x34
+#define RLCR    0x35
+#define AML     0x36
+#define AMH     0x37
+#define GIS     0x38
+#define IVA     0x38
+#define IPC     0x39
+#define ISR     0x3a
+#define IMR     0x3a
+#define PVR     0x3c
+#define PIS     0x3d
+#define PIM     0x3d
+#define PCR     0x3e
+#define CCR4    0x3f
+
+// IMR/ISR
+
+#define IRQ_BREAK_ON    BIT15   // rx break detected
+#define IRQ_DATAOVERRUN BIT14	// receive data overflow
+#define IRQ_ALLSENT     BIT13	// all sent
+#define IRQ_UNDERRUN    BIT12	// transmit data underrun
+#define IRQ_TIMER       BIT11	// timer interrupt
+#define IRQ_CTS         BIT10	// CTS status change
+#define IRQ_TXREPEAT    BIT9	// tx message repeat
+#define IRQ_TXFIFO      BIT8	// transmit pool ready
+#define IRQ_RXEOM       BIT7	// receive message end
+#define IRQ_EXITHUNT    BIT6	// receive frame start
+#define IRQ_RXTIME      BIT6    // rx char timeout
+#define IRQ_DCD         BIT2	// carrier detect status change
+#define IRQ_OVERRUN     BIT1	// receive frame overflow
+#define IRQ_RXFIFO      BIT0	// receive pool full
+
+// STAR
+
+#define XFW   BIT6		// transmit FIFO write enable
+#define CEC   BIT2		// command executing
+#define CTS   BIT1		// CTS state
+
+#define PVR_DTR      BIT0
+#define PVR_DSR      BIT1
+#define PVR_RI       BIT2
+#define PVR_AUTOCTS  BIT3
+#define PVR_RS232    0x20   /* 0010b */
+#define PVR_V35      0xe0   /* 1110b */
+#define PVR_RS422    0x40   /* 0100b */
+
+/* Register access functions */
+
+#define write_reg(info, reg, val) outb((val),(info)->io_base + (reg))
+#define read_reg(info, reg) inb((info)->io_base + (reg))
+
+#define read_reg16(info, reg) inw((info)->io_base + (reg))
+#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
+
+#define set_reg_bits(info, reg, mask) \
+	write_reg(info, (reg), \
+		 (unsigned char) (read_reg(info, (reg)) | (mask)))
+#define clear_reg_bits(info, reg, mask) \
+	write_reg(info, (reg), \
+		 (unsigned char) (read_reg(info, (reg)) & ~(mask)))
+/*
+ * interrupt enable/disable routines
+ */
+static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
+{
+	if (channel == CHA) {
+		info->imra_value |= mask;
+		write_reg16(info, CHA + IMR, info->imra_value);
+	} else {
+		info->imrb_value |= mask;
+		write_reg16(info, CHB + IMR, info->imrb_value);
+	}
+}
+static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
+{
+	if (channel == CHA) {
+		info->imra_value &= ~mask;
+		write_reg16(info, CHA + IMR, info->imra_value);
+	} else {
+		info->imrb_value &= ~mask;
+		write_reg16(info, CHB + IMR, info->imrb_value);
+	}
+}
+
+#define port_irq_disable(info, mask) \
+	{ info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+
+#define port_irq_enable(info, mask) \
+	{ info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+
+static void rx_start(MGSLPC_INFO *info);
+static void rx_stop(MGSLPC_INFO *info);
+
+static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty);
+static void tx_stop(MGSLPC_INFO *info);
+static void tx_set_idle(MGSLPC_INFO *info);
+
+static void get_signals(MGSLPC_INFO *info);
+static void set_signals(MGSLPC_INFO *info);
+
+static void reset_device(MGSLPC_INFO *info);
+
+static void hdlc_mode(MGSLPC_INFO *info);
+static void async_mode(MGSLPC_INFO *info);
+
+static void tx_timeout(struct timer_list *t);
+
+static int carrier_raised(struct tty_port *port);
+static void dtr_rts(struct tty_port *port, int onoff);
+
+#if SYNCLINK_GENERIC_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(MGSLPC_INFO *info);
+static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
+static int  hdlcdev_init(MGSLPC_INFO *info);
+static void hdlcdev_exit(MGSLPC_INFO *info);
+#endif
+
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
+
+static bool register_test(MGSLPC_INFO *info);
+static bool irq_test(MGSLPC_INFO *info);
+static int adapter_test(MGSLPC_INFO *info);
+
+static int claim_resources(MGSLPC_INFO *info);
+static void release_resources(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
+static void mgslpc_remove_device(MGSLPC_INFO *info);
+
+static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
+static void rx_reset_buffers(MGSLPC_INFO *info);
+static int  rx_alloc_buffers(MGSLPC_INFO *info);
+static void rx_free_buffers(MGSLPC_INFO *info);
+
+static irqreturn_t mgslpc_isr(int irq, void *dev_id);
+
+/*
+ * Bottom half interrupt handlers
+ */
+static void bh_handler(struct work_struct *work);
+static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty);
+static void bh_status(MGSLPC_INFO *info);
+
+/*
+ * ioctl handlers
+ */
+static int tiocmget(struct tty_struct *tty);
+static int tiocmset(struct tty_struct *tty,
+					unsigned int set, unsigned int clear);
+static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount);
+static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params);
+static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty);
+static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode);
+static int set_txidle(MGSLPC_INFO *info, int idle_mode);
+static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty);
+static int tx_abort(MGSLPC_INFO *info);
+static int set_rxenable(MGSLPC_INFO *info, int enable);
+static int wait_events(MGSLPC_INFO *info, int __user *mask);
+
+static MGSLPC_INFO *mgslpc_device_list = NULL;
+static int mgslpc_device_count = 0;
+
+/*
+ * Set this param to non-zero to load eax with the
+ * .text section address and breakpoint on module load.
+ * This is useful for use with gdb and add-symbol-file command.
+ */
+static bool break_on_load;
+
+/*
+ * Driver major number, defaults to zero to get auto
+ * assigned major number. May be forced as module parameter.
+ */
+static int ttymajor=0;
+
+static int debug_level = 0;
+static int maxframe[MAX_DEVICE_COUNT] = {0,};
+
+module_param(break_on_load, bool, 0);
+module_param(ttymajor, int, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+
+MODULE_LICENSE("GPL");
+
+static char *driver_name = "SyncLink PC Card driver";
+static char *driver_version = "$Revision: 4.34 $";
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty);
+static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout);
+
+/* PCMCIA prototypes */
+
+static int mgslpc_config(struct pcmcia_device *link);
+static void mgslpc_release(u_long arg);
+static void mgslpc_detach(struct pcmcia_device *p_dev);
+
+/*
+ * 1st function defined in .text section. Calling this function in
+ * init_module() followed by a breakpoint allows a remote debugger
+ * (gdb) to get the .text address for the add-symbol-file command.
+ * This allows remote debugging of dynamically loadable modules.
+ */
+static void* mgslpc_get_text_ptr(void)
+{
+	return mgslpc_get_text_ptr;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf  - pass receive data to line discipline
+ */
+
+static void ldisc_receive_buf(struct tty_struct *tty,
+			      const __u8 *data, char *flags, int count)
+{
+	struct tty_ldisc *ld;
+	if (!tty)
+		return;
+	ld = tty_ldisc_ref(tty);
+	if (ld) {
+		if (ld->ops->receive_buf)
+			ld->ops->receive_buf(tty, data, flags, count);
+		tty_ldisc_deref(ld);
+	}
+}
+
+static const struct tty_port_operations mgslpc_port_ops = {
+	.carrier_raised = carrier_raised,
+	.dtr_rts = dtr_rts
+};
+
+static int mgslpc_probe(struct pcmcia_device *link)
+{
+	MGSLPC_INFO *info;
+	int ret;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_attach\n");
+
+	info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+	if (!info) {
+		printk("Error can't allocate device instance data\n");
+		return -ENOMEM;
+	}
+
+	info->magic = MGSLPC_MAGIC;
+	tty_port_init(&info->port);
+	info->port.ops = &mgslpc_port_ops;
+	INIT_WORK(&info->task, bh_handler);
+	info->max_frame_size = 4096;
+	info->port.close_delay = 5*HZ/10;
+	info->port.closing_wait = 30*HZ;
+	init_waitqueue_head(&info->status_event_wait_q);
+	init_waitqueue_head(&info->event_wait_q);
+	spin_lock_init(&info->lock);
+	spin_lock_init(&info->netlock);
+	memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+	info->idle_mode = HDLC_TXIDLE_FLAGS;
+	info->imra_value = 0xffff;
+	info->imrb_value = 0xffff;
+	info->pim_value = 0xff;
+
+	info->p_dev = link;
+	link->priv = info;
+
+	/* Initialize the struct pcmcia_device structure */
+
+	ret = mgslpc_config(link);
+	if (ret != 0)
+		goto failed;
+
+	ret = mgslpc_add_device(info);
+	if (ret != 0)
+		goto failed_release;
+
+	return 0;
+
+failed_release:
+	mgslpc_release((u_long)link);
+failed:
+	tty_port_destroy(&info->port);
+	kfree(info);
+	return ret;
+}
+
+/* Card has been inserted.
+ */
+
+static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
+{
+	return pcmcia_request_io(p_dev);
+}
+
+static int mgslpc_config(struct pcmcia_device *link)
+{
+	MGSLPC_INFO *info = link->priv;
+	int ret;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_config(0x%p)\n", link);
+
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+	ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+	if (ret != 0)
+		goto failed;
+
+	link->config_index = 8;
+	link->config_regs = PRESENT_OPTION;
+
+	ret = pcmcia_request_irq(link, mgslpc_isr);
+	if (ret)
+		goto failed;
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		goto failed;
+
+	info->io_base = link->resource[0]->start;
+	info->irq_level = link->irq;
+	return 0;
+
+failed:
+	mgslpc_release((u_long)link);
+	return -ENODEV;
+}
+
+/* Card has been removed.
+ * Unregister device and release PCMCIA configuration.
+ * If device is open, postpone until it is closed.
+ */
+static void mgslpc_release(u_long arg)
+{
+	struct pcmcia_device *link = (struct pcmcia_device *)arg;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_release(0x%p)\n", link);
+
+	pcmcia_disable_device(link);
+}
+
+static void mgslpc_detach(struct pcmcia_device *link)
+{
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_detach(0x%p)\n", link);
+
+	((MGSLPC_INFO *)link->priv)->stop = 1;
+	mgslpc_release((u_long)link);
+
+	mgslpc_remove_device((MGSLPC_INFO *)link->priv);
+}
+
+static int mgslpc_suspend(struct pcmcia_device *link)
+{
+	MGSLPC_INFO *info = link->priv;
+
+	info->stop = 1;
+
+	return 0;
+}
+
+static int mgslpc_resume(struct pcmcia_device *link)
+{
+	MGSLPC_INFO *info = link->priv;
+
+	info->stop = 0;
+
+	return 0;
+}
+
+
+static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
+					char *name, const char *routine)
+{
+#ifdef MGSLPC_PARANOIA_CHECK
+	static const char *badmagic =
+		"Warning: bad magic number for mgsl struct (%s) in %s\n";
+	static const char *badinfo =
+		"Warning: null mgslpc_info for (%s) in %s\n";
+
+	if (!info) {
+		printk(badinfo, name, routine);
+		return true;
+	}
+	if (info->magic != MGSLPC_MAGIC) {
+		printk(badmagic, name, routine);
+		return true;
+	}
+#else
+	if (!info)
+		return true;
+#endif
+	return false;
+}
+
+
+#define CMD_RXFIFO      BIT7	// release current rx FIFO
+#define CMD_RXRESET     BIT6	// receiver reset
+#define CMD_RXFIFO_READ BIT5
+#define CMD_START_TIMER BIT4
+#define CMD_TXFIFO      BIT3	// release current tx FIFO
+#define CMD_TXEOM       BIT1	// transmit end message
+#define CMD_TXRESET     BIT0	// transmit reset
+
+static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
+{
+	int i = 0;
+	/* wait for command completion */
+	while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
+		udelay(1);
+		if (i++ == 1000)
+			return false;
+	}
+	return true;
+}
+
+static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
+{
+	wait_command_complete(info, channel);
+	write_reg(info, (unsigned char) (channel + CMDR), cmd);
+}
+
+static void tx_pause(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
+		return;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("tx_pause(%s)\n", info->device_name);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (info->tx_enabled)
+		tx_stop(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void tx_release(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
+		return;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("tx_release(%s)\n", info->device_name);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (!info->tx_enabled)
+		tx_start(info, tty);
+	spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/* Return next bottom half action to perform.
+ * or 0 if nothing to do.
+ */
+static int bh_action(MGSLPC_INFO *info)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&info->lock, flags);
+
+	if (info->pending_bh & BH_RECEIVE) {
+		info->pending_bh &= ~BH_RECEIVE;
+		rc = BH_RECEIVE;
+	} else if (info->pending_bh & BH_TRANSMIT) {
+		info->pending_bh &= ~BH_TRANSMIT;
+		rc = BH_TRANSMIT;
+	} else if (info->pending_bh & BH_STATUS) {
+		info->pending_bh &= ~BH_STATUS;
+		rc = BH_STATUS;
+	}
+
+	if (!rc) {
+		/* Mark BH routine as complete */
+		info->bh_running = false;
+		info->bh_requested = false;
+	}
+
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	return rc;
+}
+
+static void bh_handler(struct work_struct *work)
+{
+	MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
+	struct tty_struct *tty;
+	int action;
+
+	if (debug_level >= DEBUG_LEVEL_BH)
+		printk("%s(%d):bh_handler(%s) entry\n",
+			__FILE__,__LINE__,info->device_name);
+
+	info->bh_running = true;
+	tty = tty_port_tty_get(&info->port);
+
+	while((action = bh_action(info)) != 0) {
+
+		/* Process work item */
+		if (debug_level >= DEBUG_LEVEL_BH)
+			printk("%s(%d):bh_handler() work item action=%d\n",
+				__FILE__,__LINE__,action);
+
+		switch (action) {
+
+		case BH_RECEIVE:
+			while(rx_get_frame(info, tty));
+			break;
+		case BH_TRANSMIT:
+			bh_transmit(info, tty);
+			break;
+		case BH_STATUS:
+			bh_status(info);
+			break;
+		default:
+			/* unknown work item ID */
+			printk("Unknown work item ID=%08X!\n", action);
+			break;
+		}
+	}
+
+	tty_kref_put(tty);
+	if (debug_level >= DEBUG_LEVEL_BH)
+		printk("%s(%d):bh_handler(%s) exit\n",
+			__FILE__,__LINE__,info->device_name);
+}
+
+static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	if (debug_level >= DEBUG_LEVEL_BH)
+		printk("bh_transmit() entry on %s\n", info->device_name);
+
+	if (tty)
+		tty_wakeup(tty);
+}
+
+static void bh_status(MGSLPC_INFO *info)
+{
+	info->ri_chkcount = 0;
+	info->dsr_chkcount = 0;
+	info->dcd_chkcount = 0;
+	info->cts_chkcount = 0;
+}
+
+/* eom: non-zero = end of frame */
+static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
+{
+	unsigned char data[2];
+	unsigned char fifo_count, read_count, i;
+	RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
+
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
+
+	if (!info->rx_enabled)
+		return;
+
+	if (info->rx_frame_count >= info->rx_buf_count) {
+		/* no more free buffers */
+		issue_command(info, CHA, CMD_RXRESET);
+		info->pending_bh |= BH_RECEIVE;
+		info->rx_overflow = true;
+		info->icount.buf_overrun++;
+		return;
+	}
+
+	if (eom) {
+		/* end of frame, get FIFO count from RBCL register */
+		fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+		if (fifo_count == 0)
+			fifo_count = 32;
+	} else
+		fifo_count = 32;
+
+	do {
+		if (fifo_count == 1) {
+			read_count = 1;
+			data[0] = read_reg(info, CHA + RXFIFO);
+		} else {
+			read_count = 2;
+			*((unsigned short *) data) = read_reg16(info, CHA + RXFIFO);
+		}
+		fifo_count -= read_count;
+		if (!fifo_count && eom)
+			buf->status = data[--read_count];
+
+		for (i = 0; i < read_count; i++) {
+			if (buf->count >= info->max_frame_size) {
+				/* frame too large, reset receiver and reset current buffer */
+				issue_command(info, CHA, CMD_RXRESET);
+				buf->count = 0;
+				return;
+			}
+			*(buf->data + buf->count) = data[i];
+			buf->count++;
+		}
+	} while (fifo_count);
+
+	if (eom) {
+		info->pending_bh |= BH_RECEIVE;
+		info->rx_frame_count++;
+		info->rx_put++;
+		if (info->rx_put >= info->rx_buf_count)
+			info->rx_put = 0;
+	}
+	issue_command(info, CHA, CMD_RXFIFO);
+}
+
+static void rx_ready_async(MGSLPC_INFO *info, int tcd)
+{
+	struct tty_port *port = &info->port;
+	unsigned char data, status, flag;
+	int fifo_count;
+	int work = 0;
+	struct mgsl_icount *icount = &info->icount;
+
+	if (tcd) {
+		/* early termination, get FIFO count from RBCL register */
+		fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+
+		/* Zero fifo count could mean 0 or 32 bytes available.
+		 * If BIT5 of STAR is set then at least 1 byte is available.
+		 */
+		if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5))
+			fifo_count = 32;
+	} else
+		fifo_count = 32;
+
+	tty_buffer_request_room(port, fifo_count);
+	/* Flush received async data to receive data buffer. */
+	while (fifo_count) {
+		data   = read_reg(info, CHA + RXFIFO);
+		status = read_reg(info, CHA + RXFIFO);
+		fifo_count -= 2;
+
+		icount->rx++;
+		flag = TTY_NORMAL;
+
+		// if no frameing/crc error then save data
+		// BIT7:parity error
+		// BIT6:framing error
+
+		if (status & (BIT7 + BIT6)) {
+			if (status & BIT7)
+				icount->parity++;
+			else
+				icount->frame++;
+
+			/* discard char if tty control flags say so */
+			if (status & info->ignore_status_mask)
+				continue;
+
+			status &= info->read_status_mask;
+
+			if (status & BIT7)
+				flag = TTY_PARITY;
+			else if (status & BIT6)
+				flag = TTY_FRAME;
+		}
+		work += tty_insert_flip_char(port, data, flag);
+	}
+	issue_command(info, CHA, CMD_RXFIFO);
+
+	if (debug_level >= DEBUG_LEVEL_ISR) {
+		printk("%s(%d):rx_ready_async",
+			__FILE__,__LINE__);
+		printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
+			__FILE__,__LINE__,icount->rx,icount->brk,
+			icount->parity,icount->frame,icount->overrun);
+	}
+
+	if (work)
+		tty_flip_buffer_push(port);
+}
+
+
+static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	if (!info->tx_active)
+		return;
+
+	info->tx_active = false;
+	info->tx_aborting = false;
+
+	if (info->params.mode == MGSL_MODE_ASYNC)
+		return;
+
+	info->tx_count = info->tx_put = info->tx_get = 0;
+	del_timer(&info->tx_timer);
+
+	if (info->drop_rts_on_tx_done) {
+		get_signals(info);
+		if (info->serial_signals & SerialSignal_RTS) {
+			info->serial_signals &= ~SerialSignal_RTS;
+			set_signals(info);
+		}
+		info->drop_rts_on_tx_done = false;
+	}
+
+#if SYNCLINK_GENERIC_HDLC
+	if (info->netcount)
+		hdlcdev_tx_done(info);
+	else
+#endif
+	{
+		if (tty && (tty->stopped || tty->hw_stopped)) {
+			tx_stop(info);
+			return;
+		}
+		info->pending_bh |= BH_TRANSMIT;
+	}
+}
+
+static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	unsigned char fifo_count = 32;
+	int c;
+
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
+
+	if (info->params.mode == MGSL_MODE_HDLC) {
+		if (!info->tx_active)
+			return;
+	} else {
+		if (tty && (tty->stopped || tty->hw_stopped)) {
+			tx_stop(info);
+			return;
+		}
+		if (!info->tx_count)
+			info->tx_active = false;
+	}
+
+	if (!info->tx_count)
+		return;
+
+	while (info->tx_count && fifo_count) {
+		c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get)));
+
+		if (c == 1) {
+			write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get));
+		} else {
+			write_reg16(info, CHA + TXFIFO,
+					  *((unsigned short*)(info->tx_buf + info->tx_get)));
+		}
+		info->tx_count -= c;
+		info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1);
+		fifo_count -= c;
+	}
+
+	if (info->params.mode == MGSL_MODE_ASYNC) {
+		if (info->tx_count < WAKEUP_CHARS)
+			info->pending_bh |= BH_TRANSMIT;
+		issue_command(info, CHA, CMD_TXFIFO);
+	} else {
+		if (info->tx_count)
+			issue_command(info, CHA, CMD_TXFIFO);
+		else
+			issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM);
+	}
+}
+
+static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	get_signals(info);
+	if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+		irq_disable(info, CHB, IRQ_CTS);
+	info->icount.cts++;
+	if (info->serial_signals & SerialSignal_CTS)
+		info->input_signal_events.cts_up++;
+	else
+		info->input_signal_events.cts_down++;
+	wake_up_interruptible(&info->status_event_wait_q);
+	wake_up_interruptible(&info->event_wait_q);
+
+	if (tty && tty_port_cts_enabled(&info->port)) {
+		if (tty->hw_stopped) {
+			if (info->serial_signals & SerialSignal_CTS) {
+				if (debug_level >= DEBUG_LEVEL_ISR)
+					printk("CTS tx start...");
+				tty->hw_stopped = 0;
+				tx_start(info, tty);
+				info->pending_bh |= BH_TRANSMIT;
+				return;
+			}
+		} else {
+			if (!(info->serial_signals & SerialSignal_CTS)) {
+				if (debug_level >= DEBUG_LEVEL_ISR)
+					printk("CTS tx stop...");
+				tty->hw_stopped = 1;
+				tx_stop(info);
+			}
+		}
+	}
+	info->pending_bh |= BH_STATUS;
+}
+
+static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	get_signals(info);
+	if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+		irq_disable(info, CHB, IRQ_DCD);
+	info->icount.dcd++;
+	if (info->serial_signals & SerialSignal_DCD) {
+		info->input_signal_events.dcd_up++;
+	}
+	else
+		info->input_signal_events.dcd_down++;
+#if SYNCLINK_GENERIC_HDLC
+	if (info->netcount) {
+		if (info->serial_signals & SerialSignal_DCD)
+			netif_carrier_on(info->netdev);
+		else
+			netif_carrier_off(info->netdev);
+	}
+#endif
+	wake_up_interruptible(&info->status_event_wait_q);
+	wake_up_interruptible(&info->event_wait_q);
+
+	if (tty_port_check_carrier(&info->port)) {
+		if (debug_level >= DEBUG_LEVEL_ISR)
+			printk("%s CD now %s...", info->device_name,
+			       (info->serial_signals & SerialSignal_DCD) ? "on" : "off");
+		if (info->serial_signals & SerialSignal_DCD)
+			wake_up_interruptible(&info->port.open_wait);
+		else {
+			if (debug_level >= DEBUG_LEVEL_ISR)
+				printk("doing serial hangup...");
+			if (tty)
+				tty_hangup(tty);
+		}
+	}
+	info->pending_bh |= BH_STATUS;
+}
+
+static void dsr_change(MGSLPC_INFO *info)
+{
+	get_signals(info);
+	if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+		port_irq_disable(info, PVR_DSR);
+	info->icount.dsr++;
+	if (info->serial_signals & SerialSignal_DSR)
+		info->input_signal_events.dsr_up++;
+	else
+		info->input_signal_events.dsr_down++;
+	wake_up_interruptible(&info->status_event_wait_q);
+	wake_up_interruptible(&info->event_wait_q);
+	info->pending_bh |= BH_STATUS;
+}
+
+static void ri_change(MGSLPC_INFO *info)
+{
+	get_signals(info);
+	if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+		port_irq_disable(info, PVR_RI);
+	info->icount.rng++;
+	if (info->serial_signals & SerialSignal_RI)
+		info->input_signal_events.ri_up++;
+	else
+		info->input_signal_events.ri_down++;
+	wake_up_interruptible(&info->status_event_wait_q);
+	wake_up_interruptible(&info->event_wait_q);
+	info->pending_bh |= BH_STATUS;
+}
+
+/* Interrupt service routine entry point.
+ *
+ * Arguments:
+ *
+ * irq     interrupt number that caused interrupt
+ * dev_id  device ID supplied during interrupt registration
+ */
+static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
+{
+	MGSLPC_INFO *info = dev_id;
+	struct tty_struct *tty;
+	unsigned short isr;
+	unsigned char gis, pis;
+	int count=0;
+
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("mgslpc_isr(%d) entry.\n", info->irq_level);
+
+	if (!(info->p_dev->_locked))
+		return IRQ_HANDLED;
+
+	tty = tty_port_tty_get(&info->port);
+
+	spin_lock(&info->lock);
+
+	while ((gis = read_reg(info, CHA + GIS))) {
+		if (debug_level >= DEBUG_LEVEL_ISR)
+			printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis);
+
+		if ((gis & 0x70) || count > 1000) {
+			printk("synclink_cs:hardware failed or ejected\n");
+			break;
+		}
+		count++;
+
+		if (gis & (BIT1 | BIT0)) {
+			isr = read_reg16(info, CHB + ISR);
+			if (isr & IRQ_DCD)
+				dcd_change(info, tty);
+			if (isr & IRQ_CTS)
+				cts_change(info, tty);
+		}
+		if (gis & (BIT3 | BIT2))
+		{
+			isr = read_reg16(info, CHA + ISR);
+			if (isr & IRQ_TIMER) {
+				info->irq_occurred = true;
+				irq_disable(info, CHA, IRQ_TIMER);
+			}
+
+			/* receive IRQs */
+			if (isr & IRQ_EXITHUNT) {
+				info->icount.exithunt++;
+				wake_up_interruptible(&info->event_wait_q);
+			}
+			if (isr & IRQ_BREAK_ON) {
+				info->icount.brk++;
+				if (info->port.flags & ASYNC_SAK)
+					do_SAK(tty);
+			}
+			if (isr & IRQ_RXTIME) {
+				issue_command(info, CHA, CMD_RXFIFO_READ);
+			}
+			if (isr & (IRQ_RXEOM | IRQ_RXFIFO)) {
+				if (info->params.mode == MGSL_MODE_HDLC)
+					rx_ready_hdlc(info, isr & IRQ_RXEOM);
+				else
+					rx_ready_async(info, isr & IRQ_RXEOM);
+			}
+
+			/* transmit IRQs */
+			if (isr & IRQ_UNDERRUN) {
+				if (info->tx_aborting)
+					info->icount.txabort++;
+				else
+					info->icount.txunder++;
+				tx_done(info, tty);
+			}
+			else if (isr & IRQ_ALLSENT) {
+				info->icount.txok++;
+				tx_done(info, tty);
+			}
+			else if (isr & IRQ_TXFIFO)
+				tx_ready(info, tty);
+		}
+		if (gis & BIT7) {
+			pis = read_reg(info, CHA + PIS);
+			if (pis & BIT1)
+				dsr_change(info);
+			if (pis & BIT2)
+				ri_change(info);
+		}
+	}
+
+	/* Request bottom half processing if there's something
+	 * for it to do and the bh is not already running
+	 */
+
+	if (info->pending_bh && !info->bh_running && !info->bh_requested) {
+		if (debug_level >= DEBUG_LEVEL_ISR)
+			printk("%s(%d):%s queueing bh task.\n",
+				__FILE__,__LINE__,info->device_name);
+		schedule_work(&info->task);
+		info->bh_requested = true;
+	}
+
+	spin_unlock(&info->lock);
+	tty_kref_put(tty);
+
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):mgslpc_isr(%d)exit.\n",
+		       __FILE__, __LINE__, info->irq_level);
+
+	return IRQ_HANDLED;
+}
+
+/* Initialize and start device.
+ */
+static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
+{
+	int retval = 0;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
+
+	if (tty_port_initialized(&info->port))
+		return 0;
+
+	if (!info->tx_buf) {
+		/* allocate a page of memory for a transmit buffer */
+		info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+		if (!info->tx_buf) {
+			printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
+				__FILE__, __LINE__, info->device_name);
+			return -ENOMEM;
+		}
+	}
+
+	info->pending_bh = 0;
+
+	memset(&info->icount, 0, sizeof(info->icount));
+
+	timer_setup(&info->tx_timer, tx_timeout, 0);
+
+	/* Allocate and claim adapter resources */
+	retval = claim_resources(info);
+
+	/* perform existence check and diagnostics */
+	if (!retval)
+		retval = adapter_test(info);
+
+	if (retval) {
+		if (capable(CAP_SYS_ADMIN) && tty)
+			set_bit(TTY_IO_ERROR, &tty->flags);
+		release_resources(info);
+		return retval;
+	}
+
+	/* program hardware for current parameters */
+	mgslpc_change_params(info, tty);
+
+	if (tty)
+		clear_bit(TTY_IO_ERROR, &tty->flags);
+
+	tty_port_set_initialized(&info->port, 1);
+
+	return 0;
+}
+
+/* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware
+ */
+static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	if (!tty_port_initialized(&info->port))
+		return;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_shutdown(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	/* clear status wait queue because status changes */
+	/* can't happen after shutting down the hardware */
+	wake_up_interruptible(&info->status_event_wait_q);
+	wake_up_interruptible(&info->event_wait_q);
+
+	del_timer_sync(&info->tx_timer);
+
+	if (info->tx_buf) {
+		free_page((unsigned long) info->tx_buf);
+		info->tx_buf = NULL;
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+
+	rx_stop(info);
+	tx_stop(info);
+
+	/* TODO:disable interrupts instead of reset to preserve signal states */
+	reset_device(info);
+
+	if (!tty || C_HUPCL(tty)) {
+		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+		set_signals(info);
+	}
+
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	release_resources(info);
+
+	if (tty)
+		set_bit(TTY_IO_ERROR, &tty->flags);
+
+	tty_port_set_initialized(&info->port, 0);
+}
+
+static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+
+	rx_stop(info);
+	tx_stop(info);
+	info->tx_count = info->tx_put = info->tx_get = 0;
+
+	if (info->params.mode == MGSL_MODE_HDLC || info->netcount)
+		hdlc_mode(info);
+	else
+		async_mode(info);
+
+	set_signals(info);
+
+	info->dcd_chkcount = 0;
+	info->cts_chkcount = 0;
+	info->ri_chkcount = 0;
+	info->dsr_chkcount = 0;
+
+	irq_enable(info, CHB, IRQ_DCD | IRQ_CTS);
+	port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
+	get_signals(info);
+
+	if (info->netcount || (tty && C_CREAD(tty)))
+		rx_start(info);
+
+	spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/* Reconfigure adapter based on new parameters
+ */
+static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	unsigned cflag;
+	int bits_per_char;
+
+	if (!tty)
+		return;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_change_params(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	cflag = tty->termios.c_cflag;
+
+	/* if B0 rate (hangup) specified then negate RTS and DTR */
+	/* otherwise assert RTS and DTR */
+	if (cflag & CBAUD)
+		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
+	else
+		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+
+	/* byte size and parity */
+
+	switch (cflag & CSIZE) {
+	case CS5: info->params.data_bits = 5; break;
+	case CS6: info->params.data_bits = 6; break;
+	case CS7: info->params.data_bits = 7; break;
+	case CS8: info->params.data_bits = 8; break;
+	default:  info->params.data_bits = 7; break;
+	}
+
+	if (cflag & CSTOPB)
+		info->params.stop_bits = 2;
+	else
+		info->params.stop_bits = 1;
+
+	info->params.parity = ASYNC_PARITY_NONE;
+	if (cflag & PARENB) {
+		if (cflag & PARODD)
+			info->params.parity = ASYNC_PARITY_ODD;
+		else
+			info->params.parity = ASYNC_PARITY_EVEN;
+#ifdef CMSPAR
+		if (cflag & CMSPAR)
+			info->params.parity = ASYNC_PARITY_SPACE;
+#endif
+	}
+
+	/* calculate number of jiffies to transmit a full
+	 * FIFO (32 bytes) at specified data rate
+	 */
+	bits_per_char = info->params.data_bits +
+			info->params.stop_bits + 1;
+
+	/* if port data rate is set to 460800 or less then
+	 * allow tty settings to override, otherwise keep the
+	 * current data rate.
+	 */
+	if (info->params.data_rate <= 460800) {
+		info->params.data_rate = tty_get_baud_rate(tty);
+	}
+
+	if (info->params.data_rate) {
+		info->timeout = (32*HZ*bits_per_char) /
+				info->params.data_rate;
+	}
+	info->timeout += HZ/50;		/* Add .02 seconds of slop */
+
+	tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+	tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
+
+	/* process tty input control flags */
+
+	info->read_status_mask = 0;
+	if (I_INPCK(tty))
+		info->read_status_mask |= BIT7 | BIT6;
+	if (I_IGNPAR(tty))
+		info->ignore_status_mask |= BIT7 | BIT6;
+
+	mgslpc_program_hw(info, tty);
+}
+
+/* Add a character to the transmit buffer
+ */
+static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO) {
+		printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+			__FILE__, __LINE__, ch, info->device_name);
+	}
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
+		return 0;
+
+	if (!info->tx_buf)
+		return 0;
+
+	spin_lock_irqsave(&info->lock, flags);
+
+	if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
+		if (info->tx_count < TXBUFSIZE - 1) {
+			info->tx_buf[info->tx_put++] = ch;
+			info->tx_put &= TXBUFSIZE-1;
+			info->tx_count++;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 1;
+}
+
+/* Enable transmitter so remaining characters in the
+ * transmit buffer are sent.
+ */
+static void mgslpc_flush_chars(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+			__FILE__, __LINE__, info->device_name, info->tx_count);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
+		return;
+
+	if (info->tx_count <= 0 || tty->stopped ||
+	    tty->hw_stopped || !info->tx_buf)
+		return;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+			__FILE__, __LINE__, info->device_name);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (!info->tx_active)
+		tx_start(info, tty);
+	spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/* Send a block of data
+ *
+ * Arguments:
+ *
+ * tty        pointer to tty information structure
+ * buf	      pointer to buffer containing send data
+ * count      size of send data in bytes
+ *
+ * Returns: number of characters written
+ */
+static int mgslpc_write(struct tty_struct * tty,
+			const unsigned char *buf, int count)
+{
+	int c, ret = 0;
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_write(%s) count=%d\n",
+			__FILE__, __LINE__, info->device_name, count);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
+		!info->tx_buf)
+		goto cleanup;
+
+	if (info->params.mode == MGSL_MODE_HDLC) {
+		if (count > TXBUFSIZE) {
+			ret = -EIO;
+			goto cleanup;
+		}
+		if (info->tx_active)
+			goto cleanup;
+		else if (info->tx_count)
+			goto start;
+	}
+
+	for (;;) {
+		c = min(count,
+			min(TXBUFSIZE - info->tx_count - 1,
+			    TXBUFSIZE - info->tx_put));
+		if (c <= 0)
+			break;
+
+		memcpy(info->tx_buf + info->tx_put, buf, c);
+
+		spin_lock_irqsave(&info->lock, flags);
+		info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
+		info->tx_count += c;
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		buf += c;
+		count -= c;
+		ret += c;
+	}
+start:
+	if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+		spin_lock_irqsave(&info->lock, flags);
+		if (!info->tx_active)
+			tx_start(info, tty);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+cleanup:
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+			__FILE__, __LINE__, info->device_name, ret);
+	return ret;
+}
+
+/* Return the count of free bytes in transmit buffer
+ */
+static int mgslpc_write_room(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	int ret;
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room"))
+		return 0;
+
+	if (info->params.mode == MGSL_MODE_HDLC) {
+		/* HDLC (frame oriented) mode */
+		if (info->tx_active)
+			return 0;
+		else
+			return HDLC_MAX_FRAME_SIZE;
+	} else {
+		ret = TXBUFSIZE - info->tx_count - 1;
+		if (ret < 0)
+			ret = 0;
+	}
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_write_room(%s)=%d\n",
+			 __FILE__, __LINE__, info->device_name, ret);
+	return ret;
+}
+
+/* Return the count of bytes in transmit buffer
+ */
+static int mgslpc_chars_in_buffer(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	int rc;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
+		return 0;
+
+	if (info->params.mode == MGSL_MODE_HDLC)
+		rc = info->tx_active ? info->max_frame_size : 0;
+	else
+		rc = info->tx_count;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
+			 __FILE__, __LINE__, info->device_name, rc);
+
+	return rc;
+}
+
+/* Discard all data in the send buffer
+ */
+static void mgslpc_flush_buffer(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
+		return;
+
+	spin_lock_irqsave(&info->lock, flags);
+	info->tx_count = info->tx_put = info->tx_get = 0;
+	del_timer(&info->tx_timer);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	wake_up_interruptible(&tty->write_wait);
+	tty_wakeup(tty);
+}
+
+/* Send a high-priority XON/XOFF character
+ */
+static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
+			 __FILE__, __LINE__, info->device_name, ch);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
+		return;
+
+	info->x_char = ch;
+	if (ch) {
+		spin_lock_irqsave(&info->lock, flags);
+		if (!info->tx_enabled)
+			tx_start(info, tty);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+}
+
+/* Signal remote device to throttle send data (our receive data)
+ */
+static void mgslpc_throttle(struct tty_struct * tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_throttle(%s) entry\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
+		return;
+
+	if (I_IXOFF(tty))
+		mgslpc_send_xchar(tty, STOP_CHAR(tty));
+
+	if (C_CRTSCTS(tty)) {
+		spin_lock_irqsave(&info->lock, flags);
+		info->serial_signals &= ~SerialSignal_RTS;
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+}
+
+/* Signal remote device to stop throttling send data (our receive data)
+ */
+static void mgslpc_unthrottle(struct tty_struct * tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
+		return;
+
+	if (I_IXOFF(tty)) {
+		if (info->x_char)
+			info->x_char = 0;
+		else
+			mgslpc_send_xchar(tty, START_CHAR(tty));
+	}
+
+	if (C_CRTSCTS(tty)) {
+		spin_lock_irqsave(&info->lock, flags);
+		info->serial_signals |= SerialSignal_RTS;
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+}
+
+/* get the current serial statistics
+ */
+static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount)
+{
+	int err;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("get_params(%s)\n", info->device_name);
+	if (!user_icount) {
+		memset(&info->icount, 0, sizeof(info->icount));
+	} else {
+		COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+		if (err)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/* get the current serial parameters
+ */
+static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
+{
+	int err;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("get_params(%s)\n", info->device_name);
+	COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+	if (err)
+		return -EFAULT;
+	return 0;
+}
+
+/* set the serial parameters
+ *
+ * Arguments:
+ *
+ *	info		pointer to device instance data
+ *	new_params	user buffer containing new serial params
+ *
+ * Returns:	0 if success, otherwise error code
+ */
+static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
+{
+	unsigned long flags;
+	MGSL_PARAMS tmp_params;
+	int err;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
+			info->device_name);
+	COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
+	if (err) {
+		if (debug_level >= DEBUG_LEVEL_INFO)
+			printk("%s(%d):set_params(%s) user buffer copy failed\n",
+				__FILE__, __LINE__, info->device_name);
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	mgslpc_change_params(info, tty);
+
+	return 0;
+}
+
+static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
+{
+	int err;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode);
+	COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
+	if (err)
+		return -EFAULT;
+	return 0;
+}
+
+static int set_txidle(MGSLPC_INFO * info, int idle_mode)
+{
+	unsigned long flags;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
+	spin_lock_irqsave(&info->lock, flags);
+	info->idle_mode = idle_mode;
+	tx_set_idle(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
+{
+	int err;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("get_interface(%s)=%d\n", info->device_name, info->if_mode);
+	COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int));
+	if (err)
+		return -EFAULT;
+	return 0;
+}
+
+static int set_interface(MGSLPC_INFO * info, int if_mode)
+{
+	unsigned long flags;
+	unsigned char val;
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("set_interface(%s,%d)\n", info->device_name, if_mode);
+	spin_lock_irqsave(&info->lock, flags);
+	info->if_mode = if_mode;
+
+	val = read_reg(info, PVR) & 0x0f;
+	switch (info->if_mode)
+	{
+	case MGSL_INTERFACE_RS232: val |= PVR_RS232; break;
+	case MGSL_INTERFACE_V35:   val |= PVR_V35;   break;
+	case MGSL_INTERFACE_RS422: val |= PVR_RS422; break;
+	}
+	write_reg(info, PVR, val);
+
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("set_txenable(%s,%d)\n", info->device_name, enable);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (enable) {
+		if (!info->tx_enabled)
+			tx_start(info, tty);
+	} else {
+		if (info->tx_enabled)
+			tx_stop(info);
+	}
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+static int tx_abort(MGSLPC_INFO * info)
+{
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("tx_abort(%s)\n", info->device_name);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (info->tx_active && info->tx_count &&
+	    info->params.mode == MGSL_MODE_HDLC) {
+		/* clear data count so FIFO is not filled on next IRQ.
+		 * This results in underrun and abort transmission.
+		 */
+		info->tx_count = info->tx_put = info->tx_get = 0;
+		info->tx_aborting = true;
+	}
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+static int set_rxenable(MGSLPC_INFO * info, int enable)
+{
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("set_rxenable(%s,%d)\n", info->device_name, enable);
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (enable) {
+		if (!info->rx_enabled)
+			rx_start(info);
+	} else {
+		if (info->rx_enabled)
+			rx_stop(info);
+	}
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+/* wait for specified event to occur
+ *
+ * Arguments:		info	pointer to device instance data
+ *			mask	pointer to bitmask of events to wait for
+ * Return Value:	0	if successful and bit mask updated with
+ *				of events triggerred,
+ *			otherwise error code
+ */
+static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
+{
+	unsigned long flags;
+	int s;
+	int rc=0;
+	struct mgsl_icount cprev, cnow;
+	int events;
+	int mask;
+	struct	_input_signal_events oldsigs, newsigs;
+	DECLARE_WAITQUEUE(wait, current);
+
+	COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
+	if (rc)
+		return  -EFAULT;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("wait_events(%s,%d)\n", info->device_name, mask);
+
+	spin_lock_irqsave(&info->lock, flags);
+
+	/* return immediately if state matches requested events */
+	get_signals(info);
+	s = info->serial_signals;
+	events = mask &
+		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+	if (events) {
+		spin_unlock_irqrestore(&info->lock, flags);
+		goto exit;
+	}
+
+	/* save current irq counts */
+	cprev = info->icount;
+	oldsigs = info->input_signal_events;
+
+	if ((info->params.mode == MGSL_MODE_HDLC) &&
+	    (mask & MgslEvent_ExitHuntMode))
+		irq_enable(info, CHA, IRQ_EXITHUNT);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(&info->event_wait_q, &wait);
+
+	spin_unlock_irqrestore(&info->lock, flags);
+
+
+	for(;;) {
+		schedule();
+		if (signal_pending(current)) {
+			rc = -ERESTARTSYS;
+			break;
+		}
+
+		/* get current irq counts */
+		spin_lock_irqsave(&info->lock, flags);
+		cnow = info->icount;
+		newsigs = info->input_signal_events;
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		/* if no change, wait aborted for some reason */
+		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
+		    newsigs.dsr_down == oldsigs.dsr_down &&
+		    newsigs.dcd_up   == oldsigs.dcd_up   &&
+		    newsigs.dcd_down == oldsigs.dcd_down &&
+		    newsigs.cts_up   == oldsigs.cts_up   &&
+		    newsigs.cts_down == oldsigs.cts_down &&
+		    newsigs.ri_up    == oldsigs.ri_up    &&
+		    newsigs.ri_down  == oldsigs.ri_down  &&
+		    cnow.exithunt    == cprev.exithunt   &&
+		    cnow.rxidle      == cprev.rxidle) {
+			rc = -EIO;
+			break;
+		}
+
+		events = mask &
+			( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
+			  (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+			  (newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
+			  (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+			  (newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
+			  (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+			  (newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
+			  (newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
+			  (cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
+			  (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
+		if (events)
+			break;
+
+		cprev = cnow;
+		oldsigs = newsigs;
+	}
+
+	remove_wait_queue(&info->event_wait_q, &wait);
+	set_current_state(TASK_RUNNING);
+
+	if (mask & MgslEvent_ExitHuntMode) {
+		spin_lock_irqsave(&info->lock, flags);
+		if (!waitqueue_active(&info->event_wait_q))
+			irq_disable(info, CHA, IRQ_EXITHUNT);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+exit:
+	if (rc == 0)
+		PUT_USER(rc, events, mask_ptr);
+	return rc;
+}
+
+static int modem_input_wait(MGSLPC_INFO *info,int arg)
+{
+	unsigned long flags;
+	int rc;
+	struct mgsl_icount cprev, cnow;
+	DECLARE_WAITQUEUE(wait, current);
+
+	/* save current irq counts */
+	spin_lock_irqsave(&info->lock, flags);
+	cprev = info->icount;
+	add_wait_queue(&info->status_event_wait_q, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	for(;;) {
+		schedule();
+		if (signal_pending(current)) {
+			rc = -ERESTARTSYS;
+			break;
+		}
+
+		/* get new irq counts */
+		spin_lock_irqsave(&info->lock, flags);
+		cnow = info->icount;
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		/* if no change, wait aborted for some reason */
+		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+		    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+			rc = -EIO;
+			break;
+		}
+
+		/* check for change in caller specified modem input */
+		if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+		    (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+		    (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
+		    (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+			rc = 0;
+			break;
+		}
+
+		cprev = cnow;
+	}
+	remove_wait_queue(&info->status_event_wait_q, &wait);
+	set_current_state(TASK_RUNNING);
+	return rc;
+}
+
+/* return the state of the serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+		((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+		((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+		((info->serial_signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
+		((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+		((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):%s tiocmget() value=%08X\n",
+			 __FILE__, __LINE__, info->device_name, result);
+	return result;
+}
+
+/* set modem control signals (DTR/RTS)
+ */
+static int tiocmset(struct tty_struct *tty,
+		    unsigned int set, unsigned int clear)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):%s tiocmset(%x,%x)\n",
+			__FILE__, __LINE__, info->device_name, set, clear);
+
+	if (set & TIOCM_RTS)
+		info->serial_signals |= SerialSignal_RTS;
+	if (set & TIOCM_DTR)
+		info->serial_signals |= SerialSignal_DTR;
+	if (clear & TIOCM_RTS)
+		info->serial_signals &= ~SerialSignal_RTS;
+	if (clear & TIOCM_DTR)
+		info->serial_signals &= ~SerialSignal_DTR;
+
+	spin_lock_irqsave(&info->lock, flags);
+	set_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	return 0;
+}
+
+/* Set or clear transmit break condition
+ *
+ * Arguments:		tty		pointer to tty instance data
+ *			break_state	-1=set break condition, 0=clear
+ */
+static int mgslpc_break(struct tty_struct *tty, int break_state)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_break(%s,%d)\n",
+			 __FILE__, __LINE__, info->device_name, break_state);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
+		return -EINVAL;
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (break_state == -1)
+		set_reg_bits(info, CHA+DAFO, BIT6);
+	else
+		clear_reg_bits(info, CHA+DAFO, BIT6);
+	spin_unlock_irqrestore(&info->lock, flags);
+	return 0;
+}
+
+static int mgslpc_get_icount(struct tty_struct *tty,
+				struct serial_icounter_struct *icount)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+	struct mgsl_icount cnow;	/* kernel counter temps */
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	cnow = info->icount;
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	icount->cts = cnow.cts;
+	icount->dsr = cnow.dsr;
+	icount->rng = cnow.rng;
+	icount->dcd = cnow.dcd;
+	icount->rx = cnow.rx;
+	icount->tx = cnow.tx;
+	icount->frame = cnow.frame;
+	icount->overrun = cnow.overrun;
+	icount->parity = cnow.parity;
+	icount->brk = cnow.brk;
+	icount->buf_overrun = cnow.buf_overrun;
+
+	return 0;
+}
+
+/* Service an IOCTL request
+ *
+ * Arguments:
+ *
+ *	tty	pointer to tty instance data
+ *	cmd	IOCTL command code
+ *	arg	command argument/context
+ *
+ * Return Value:	0 if success, otherwise error code
+ */
+static int mgslpc_ioctl(struct tty_struct *tty,
+			unsigned int cmd, unsigned long arg)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+	void __user *argp = (void __user *)arg;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+			info->device_name, cmd);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
+		return -ENODEV;
+
+	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+	    (cmd != TIOCMIWAIT)) {
+		if (tty_io_error(tty))
+		    return -EIO;
+	}
+
+	switch (cmd) {
+	case MGSL_IOCGPARAMS:
+		return get_params(info, argp);
+	case MGSL_IOCSPARAMS:
+		return set_params(info, argp, tty);
+	case MGSL_IOCGTXIDLE:
+		return get_txidle(info, argp);
+	case MGSL_IOCSTXIDLE:
+		return set_txidle(info, (int)arg);
+	case MGSL_IOCGIF:
+		return get_interface(info, argp);
+	case MGSL_IOCSIF:
+		return set_interface(info,(int)arg);
+	case MGSL_IOCTXENABLE:
+		return set_txenable(info,(int)arg, tty);
+	case MGSL_IOCRXENABLE:
+		return set_rxenable(info,(int)arg);
+	case MGSL_IOCTXABORT:
+		return tx_abort(info);
+	case MGSL_IOCGSTATS:
+		return get_stats(info, argp);
+	case MGSL_IOCWAITEVENT:
+		return wait_events(info, argp);
+	case TIOCMIWAIT:
+		return modem_input_wait(info,(int)arg);
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+
+/* Set new termios settings
+ *
+ * Arguments:
+ *
+ *	tty		pointer to tty structure
+ *	termios		pointer to buffer to hold returned old termios
+ */
+static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+			tty->driver->name);
+
+	/* just return if nothing has changed */
+	if ((tty->termios.c_cflag == old_termios->c_cflag)
+	    && (RELEVANT_IFLAG(tty->termios.c_iflag)
+		== RELEVANT_IFLAG(old_termios->c_iflag)))
+	  return;
+
+	mgslpc_change_params(info, tty);
+
+	/* Handle transition to B0 status */
+	if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
+		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+		spin_lock_irqsave(&info->lock, flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+
+	/* Handle transition away from B0 status */
+	if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
+		info->serial_signals |= SerialSignal_DTR;
+		if (!C_CRTSCTS(tty) || !tty_throttled(tty))
+			info->serial_signals |= SerialSignal_RTS;
+		spin_lock_irqsave(&info->lock, flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
+
+	/* Handle turning off CRTSCTS */
+	if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
+		tty->hw_stopped = 0;
+		tx_release(tty);
+	}
+}
+
+static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+	struct tty_port *port = &info->port;
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close"))
+		return;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
+			 __FILE__, __LINE__, info->device_name, port->count);
+
+	if (tty_port_close_start(port, tty, filp) == 0)
+		goto cleanup;
+
+	if (tty_port_initialized(port))
+		mgslpc_wait_until_sent(tty, info->timeout);
+
+	mgslpc_flush_buffer(tty);
+
+	tty_ldisc_flush(tty);
+	shutdown(info, tty);
+	
+	tty_port_close_end(port, tty);
+	tty_port_tty_set(port, NULL);
+cleanup:
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
+			tty->driver->name, port->count);
+}
+
+/* Wait until the transmitter is empty.
+ */
+static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+	unsigned long orig_jiffies, char_time;
+
+	if (!info)
+		return;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
+		return;
+
+	if (!tty_port_initialized(&info->port))
+		goto exit;
+
+	orig_jiffies = jiffies;
+
+	/* Set check interval to 1/5 of estimated time to
+	 * send a character, and make it at least 1. The check
+	 * interval should also be less than the timeout.
+	 * Note: use tight timings here to satisfy the NIST-PCTS.
+	 */
+
+	if (info->params.data_rate) {
+	     	char_time = info->timeout/(32 * 5);
+		if (!char_time)
+			char_time++;
+	} else
+		char_time = 1;
+
+	if (timeout)
+		char_time = min_t(unsigned long, char_time, timeout);
+
+	if (info->params.mode == MGSL_MODE_HDLC) {
+		while (info->tx_active) {
+			msleep_interruptible(jiffies_to_msecs(char_time));
+			if (signal_pending(current))
+				break;
+			if (timeout && time_after(jiffies, orig_jiffies + timeout))
+				break;
+		}
+	} else {
+		while ((info->tx_count || info->tx_active) &&
+			info->tx_enabled) {
+			msleep_interruptible(jiffies_to_msecs(char_time));
+			if (signal_pending(current))
+				break;
+			if (timeout && time_after(jiffies, orig_jiffies + timeout))
+				break;
+		}
+	}
+
+exit:
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
+			 __FILE__, __LINE__, info->device_name);
+}
+
+/* Called by tty_hangup() when a hangup is signaled.
+ * This is the same as closing all open files for the port.
+ */
+static void mgslpc_hangup(struct tty_struct *tty)
+{
+	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_hangup(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
+		return;
+
+	mgslpc_flush_buffer(tty);
+	shutdown(info, tty);
+	tty_port_hangup(&info->port);
+}
+
+static int carrier_raised(struct tty_port *port)
+{
+	MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	if (info->serial_signals & SerialSignal_DCD)
+		return 1;
+	return 0;
+}
+
+static void dtr_rts(struct tty_port *port, int onoff)
+{
+	MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	if (onoff)
+		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
+	else
+		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+	set_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+}
+
+
+static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+{
+	MGSLPC_INFO	*info;
+	struct tty_port *port;
+	int		retval, line;
+	unsigned long	flags;
+
+	/* verify range of specified line number */
+	line = tty->index;
+	if (line >= mgslpc_device_count) {
+		printk("%s(%d):mgslpc_open with invalid line #%d.\n",
+			__FILE__, __LINE__, line);
+		return -ENODEV;
+	}
+
+	/* find the info structure for the specified line */
+	info = mgslpc_device_list;
+	while(info && info->line != line)
+		info = info->next_device;
+	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open"))
+		return -ENODEV;
+
+	port = &info->port;
+	tty->driver_data = info;
+	tty_port_tty_set(port, tty);
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
+			 __FILE__, __LINE__, tty->driver->name, port->count);
+
+	port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+	spin_lock_irqsave(&info->netlock, flags);
+	if (info->netcount) {
+		retval = -EBUSY;
+		spin_unlock_irqrestore(&info->netlock, flags);
+		goto cleanup;
+	}
+	spin_lock(&port->lock);
+	port->count++;
+	spin_unlock(&port->lock);
+	spin_unlock_irqrestore(&info->netlock, flags);
+
+	if (port->count == 1) {
+		/* 1st open on this device, init hardware */
+		retval = startup(info, tty);
+		if (retval < 0)
+			goto cleanup;
+	}
+
+	retval = tty_port_block_til_ready(&info->port, tty, filp);
+	if (retval) {
+		if (debug_level >= DEBUG_LEVEL_INFO)
+			printk("%s(%d):block_til_ready(%s) returned %d\n",
+				 __FILE__, __LINE__, info->device_name, retval);
+		goto cleanup;
+	}
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):mgslpc_open(%s) success\n",
+			 __FILE__, __LINE__, info->device_name);
+	retval = 0;
+
+cleanup:
+	return retval;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
+{
+	char	stat_buf[30];
+	unsigned long flags;
+
+	seq_printf(m, "%s:io:%04X irq:%d",
+		      info->device_name, info->io_base, info->irq_level);
+
+	/* output current serial signal states */
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	stat_buf[0] = 0;
+	stat_buf[1] = 0;
+	if (info->serial_signals & SerialSignal_RTS)
+		strcat(stat_buf, "|RTS");
+	if (info->serial_signals & SerialSignal_CTS)
+		strcat(stat_buf, "|CTS");
+	if (info->serial_signals & SerialSignal_DTR)
+		strcat(stat_buf, "|DTR");
+	if (info->serial_signals & SerialSignal_DSR)
+		strcat(stat_buf, "|DSR");
+	if (info->serial_signals & SerialSignal_DCD)
+		strcat(stat_buf, "|CD");
+	if (info->serial_signals & SerialSignal_RI)
+		strcat(stat_buf, "|RI");
+
+	if (info->params.mode == MGSL_MODE_HDLC) {
+		seq_printf(m, " HDLC txok:%d rxok:%d",
+			      info->icount.txok, info->icount.rxok);
+		if (info->icount.txunder)
+			seq_printf(m, " txunder:%d", info->icount.txunder);
+		if (info->icount.txabort)
+			seq_printf(m, " txabort:%d", info->icount.txabort);
+		if (info->icount.rxshort)
+			seq_printf(m, " rxshort:%d", info->icount.rxshort);
+		if (info->icount.rxlong)
+			seq_printf(m, " rxlong:%d", info->icount.rxlong);
+		if (info->icount.rxover)
+			seq_printf(m, " rxover:%d", info->icount.rxover);
+		if (info->icount.rxcrc)
+			seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
+	} else {
+		seq_printf(m, " ASYNC tx:%d rx:%d",
+			      info->icount.tx, info->icount.rx);
+		if (info->icount.frame)
+			seq_printf(m, " fe:%d", info->icount.frame);
+		if (info->icount.parity)
+			seq_printf(m, " pe:%d", info->icount.parity);
+		if (info->icount.brk)
+			seq_printf(m, " brk:%d", info->icount.brk);
+		if (info->icount.overrun)
+			seq_printf(m, " oe:%d", info->icount.overrun);
+	}
+
+	/* Append serial signal status to end */
+	seq_printf(m, " %s\n", stat_buf+1);
+
+	seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+		       info->tx_active,info->bh_requested,info->bh_running,
+		       info->pending_bh);
+}
+
+/* Called to print information about devices
+ */
+static int mgslpc_proc_show(struct seq_file *m, void *v)
+{
+	MGSLPC_INFO *info;
+
+	seq_printf(m, "synclink driver:%s\n", driver_version);
+
+	info = mgslpc_device_list;
+	while (info) {
+		line_info(m, info);
+		info = info->next_device;
+	}
+	return 0;
+}
+
+static int rx_alloc_buffers(MGSLPC_INFO *info)
+{
+	/* each buffer has header and data */
+	info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size;
+
+	/* calculate total allocation size for 8 buffers */
+	info->rx_buf_total_size = info->rx_buf_size * 8;
+
+	/* limit total allocated memory */
+	if (info->rx_buf_total_size > 0x10000)
+		info->rx_buf_total_size = 0x10000;
+
+	/* calculate number of buffers */
+	info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size;
+
+	info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL);
+	if (info->rx_buf == NULL)
+		return -ENOMEM;
+
+	/* unused flag buffer to satisfy receive_buf calling interface */
+	info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+	if (!info->flag_buf) {
+		kfree(info->rx_buf);
+		info->rx_buf = NULL;
+		return -ENOMEM;
+	}
+	
+	rx_reset_buffers(info);
+	return 0;
+}
+
+static void rx_free_buffers(MGSLPC_INFO *info)
+{
+	kfree(info->rx_buf);
+	info->rx_buf = NULL;
+	kfree(info->flag_buf);
+	info->flag_buf = NULL;
+}
+
+static int claim_resources(MGSLPC_INFO *info)
+{
+	if (rx_alloc_buffers(info) < 0) {
+		printk("Can't allocate rx buffer %s\n", info->device_name);
+		release_resources(info);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void release_resources(MGSLPC_INFO *info)
+{
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("release_resources(%s)\n", info->device_name);
+	rx_free_buffers(info);
+}
+
+/* Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ *
+ * Arguments:		info	pointer to device instance data
+ */
+static int mgslpc_add_device(MGSLPC_INFO *info)
+{
+	MGSLPC_INFO *current_dev = NULL;
+	struct device *tty_dev;
+	int ret;
+
+	info->next_device = NULL;
+	info->line = mgslpc_device_count;
+	sprintf(info->device_name,"ttySLP%d",info->line);
+
+	if (info->line < MAX_DEVICE_COUNT) {
+		if (maxframe[info->line])
+			info->max_frame_size = maxframe[info->line];
+	}
+
+	mgslpc_device_count++;
+
+	if (!mgslpc_device_list)
+		mgslpc_device_list = info;
+	else {
+		current_dev = mgslpc_device_list;
+		while (current_dev->next_device)
+			current_dev = current_dev->next_device;
+		current_dev->next_device = info;
+	}
+
+	if (info->max_frame_size < 4096)
+		info->max_frame_size = 4096;
+	else if (info->max_frame_size > 65535)
+		info->max_frame_size = 65535;
+
+	printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+		info->device_name, info->io_base, info->irq_level);
+
+#if SYNCLINK_GENERIC_HDLC
+	ret = hdlcdev_init(info);
+	if (ret != 0)
+		goto failed;
+#endif
+
+	tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
+			&info->p_dev->dev);
+	if (IS_ERR(tty_dev)) {
+		ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+		hdlcdev_exit(info);
+#endif
+		goto failed;
+	}
+
+	return 0;
+
+failed:
+	if (current_dev)
+		current_dev->next_device = NULL;
+	else
+		mgslpc_device_list = NULL;
+	mgslpc_device_count--;
+	return ret;
+}
+
+static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
+{
+	MGSLPC_INFO *info = mgslpc_device_list;
+	MGSLPC_INFO *last = NULL;
+
+	while(info) {
+		if (info == remove_info) {
+			if (last)
+				last->next_device = info->next_device;
+			else
+				mgslpc_device_list = info->next_device;
+			tty_unregister_device(serial_driver, info->line);
+#if SYNCLINK_GENERIC_HDLC
+			hdlcdev_exit(info);
+#endif
+			release_resources(info);
+			tty_port_destroy(&info->port);
+			kfree(info);
+			mgslpc_device_count--;
+			return;
+		}
+		last = info;
+		info = info->next_device;
+	}
+}
+
+static const struct pcmcia_device_id mgslpc_ids[] = {
+	PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050),
+	PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids);
+
+static struct pcmcia_driver mgslpc_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "synclink_cs",
+	.probe		= mgslpc_probe,
+	.remove		= mgslpc_detach,
+	.id_table	= mgslpc_ids,
+	.suspend	= mgslpc_suspend,
+	.resume		= mgslpc_resume,
+};
+
+static const struct tty_operations mgslpc_ops = {
+	.open = mgslpc_open,
+	.close = mgslpc_close,
+	.write = mgslpc_write,
+	.put_char = mgslpc_put_char,
+	.flush_chars = mgslpc_flush_chars,
+	.write_room = mgslpc_write_room,
+	.chars_in_buffer = mgslpc_chars_in_buffer,
+	.flush_buffer = mgslpc_flush_buffer,
+	.ioctl = mgslpc_ioctl,
+	.throttle = mgslpc_throttle,
+	.unthrottle = mgslpc_unthrottle,
+	.send_xchar = mgslpc_send_xchar,
+	.break_ctl = mgslpc_break,
+	.wait_until_sent = mgslpc_wait_until_sent,
+	.set_termios = mgslpc_set_termios,
+	.stop = tx_pause,
+	.start = tx_release,
+	.hangup = mgslpc_hangup,
+	.tiocmget = tiocmget,
+	.tiocmset = tiocmset,
+	.get_icount = mgslpc_get_icount,
+	.proc_show = mgslpc_proc_show,
+};
+
+static int __init synclink_cs_init(void)
+{
+	int rc;
+
+	if (break_on_load) {
+		mgslpc_get_text_ptr();
+		BREAKPOINT();
+	}
+
+	serial_driver = tty_alloc_driver(MAX_DEVICE_COUNT,
+			TTY_DRIVER_REAL_RAW |
+			TTY_DRIVER_DYNAMIC_DEV);
+	if (IS_ERR(serial_driver)) {
+		rc = PTR_ERR(serial_driver);
+		goto err;
+	}
+
+	/* Initialize the tty_driver structure */
+	serial_driver->driver_name = "synclink_cs";
+	serial_driver->name = "ttySLP";
+	serial_driver->major = ttymajor;
+	serial_driver->minor_start = 64;
+	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	serial_driver->subtype = SERIAL_TYPE_NORMAL;
+	serial_driver->init_termios = tty_std_termios;
+	serial_driver->init_termios.c_cflag =
+	B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	tty_set_operations(serial_driver, &mgslpc_ops);
+
+	rc = tty_register_driver(serial_driver);
+	if (rc < 0) {
+		printk(KERN_ERR "%s(%d):Couldn't register serial driver\n",
+				__FILE__, __LINE__);
+		goto err_put_tty;
+	}
+
+	rc = pcmcia_register_driver(&mgslpc_driver);
+	if (rc < 0)
+		goto err_unreg_tty;
+
+	printk(KERN_INFO "%s %s, tty major#%d\n", driver_name, driver_version,
+			serial_driver->major);
+
+	return 0;
+err_unreg_tty:
+	tty_unregister_driver(serial_driver);
+err_put_tty:
+	put_tty_driver(serial_driver);
+err:
+	return rc;
+}
+
+static void __exit synclink_cs_exit(void)
+{
+	pcmcia_unregister_driver(&mgslpc_driver);
+	tty_unregister_driver(serial_driver);
+	put_tty_driver(serial_driver);
+}
+
+module_init(synclink_cs_init);
+module_exit(synclink_cs_exit);
+
+static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate)
+{
+	unsigned int M, N;
+	unsigned char val;
+
+	/* note:standard BRG mode is broken in V3.2 chip
+	 * so enhanced mode is always used
+	 */
+
+	if (rate) {
+		N = 3686400 / rate;
+		if (!N)
+			N = 1;
+		N >>= 1;
+		for (M = 1; N > 64 && M < 16; M++)
+			N >>= 1;
+		N--;
+
+		/* BGR[5..0] = N
+		 * BGR[9..6] = M
+		 * BGR[7..0] contained in BGR register
+		 * BGR[9..8] contained in CCR2[7..6]
+		 * divisor = (N+1)*2^M
+		 *
+		 * Note: M *must* not be zero (causes asymetric duty cycle)
+		 */
+		write_reg(info, (unsigned char) (channel + BGR),
+				  (unsigned char) ((M << 6) + N));
+		val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f;
+		val |= ((M << 4) & 0xc0);
+		write_reg(info, (unsigned char) (channel + CCR2), val);
+	}
+}
+
+/* Enabled the AUX clock output at the specified frequency.
+ */
+static void enable_auxclk(MGSLPC_INFO *info)
+{
+	unsigned char val;
+
+	/* MODE
+	 *
+	 * 07..06  MDS[1..0] 10 = transparent HDLC mode
+	 * 05      ADM Address Mode, 0 = no addr recognition
+	 * 04      TMD Timer Mode, 0 = external
+	 * 03      RAC Receiver Active, 0 = inactive
+	 * 02      RTS 0=RTS active during xmit, 1=RTS always active
+	 * 01      TRS Timer Resolution, 1=512
+	 * 00      TLP Test Loop, 0 = no loop
+	 *
+	 * 1000 0010
+	 */
+	val = 0x82;
+
+	/* channel B RTS is used to enable AUXCLK driver on SP505 */
+	if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+		val |= BIT2;
+	write_reg(info, CHB + MODE, val);
+
+	/* CCR0
+	 *
+	 * 07      PU Power Up, 1=active, 0=power down
+	 * 06      MCE Master Clock Enable, 1=enabled
+	 * 05      Reserved, 0
+	 * 04..02  SC[2..0] Encoding
+	 * 01..00  SM[1..0] Serial Mode, 00=HDLC
+	 *
+	 * 11000000
+	 */
+	write_reg(info, CHB + CCR0, 0xc0);
+
+	/* CCR1
+	 *
+	 * 07      SFLG Shared Flag, 0 = disable shared flags
+	 * 06      GALP Go Active On Loop, 0 = not used
+	 * 05      GLP Go On Loop, 0 = not used
+	 * 04      ODS Output Driver Select, 1=TxD is push-pull output
+	 * 03      ITF Interframe Time Fill, 0=mark, 1=flag
+	 * 02..00  CM[2..0] Clock Mode
+	 *
+	 * 0001 0111
+	 */
+	write_reg(info, CHB + CCR1, 0x17);
+
+	/* CCR2 (Channel B)
+	 *
+	 * 07..06  BGR[9..8] Baud rate bits 9..8
+	 * 05      BDF Baud rate divisor factor, 0=1, 1=BGR value
+	 * 04      SSEL Clock source select, 1=submode b
+	 * 03      TOE 0=TxCLK is input, 1=TxCLK is output
+	 * 02      RWX Read/Write Exchange 0=disabled
+	 * 01      C32, CRC select, 0=CRC-16, 1=CRC-32
+	 * 00      DIV, data inversion 0=disabled, 1=enabled
+	 *
+	 * 0011 1000
+	 */
+	if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+		write_reg(info, CHB + CCR2, 0x38);
+	else
+		write_reg(info, CHB + CCR2, 0x30);
+
+	/* CCR4
+	 *
+	 * 07      MCK4 Master Clock Divide by 4, 1=enabled
+	 * 06      EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+	 * 05      TST1 Test Pin, 0=normal operation
+	 * 04      ICD Ivert Carrier Detect, 1=enabled (active low)
+	 * 03..02  Reserved, must be 0
+	 * 01..00  RFT[1..0] RxFIFO Threshold 00=32 bytes
+	 *
+	 * 0101 0000
+	 */
+	write_reg(info, CHB + CCR4, 0x50);
+
+	/* if auxclk not enabled, set internal BRG so
+	 * CTS transitions can be detected (requires TxC)
+	 */
+	if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+		mgslpc_set_rate(info, CHB, info->params.clock_speed);
+	else
+		mgslpc_set_rate(info, CHB, 921600);
+}
+
+static void loopback_enable(MGSLPC_INFO *info)
+{
+	unsigned char val;
+
+	/* CCR1:02..00  CM[2..0] Clock Mode = 111 (clock mode 7) */
+	val = read_reg(info, CHA + CCR1) | (BIT2 | BIT1 | BIT0);
+	write_reg(info, CHA + CCR1, val);
+
+	/* CCR2:04 SSEL Clock source select, 1=submode b */
+	val = read_reg(info, CHA + CCR2) | (BIT4 | BIT5);
+	write_reg(info, CHA + CCR2, val);
+
+	/* set LinkSpeed if available, otherwise default to 2Mbps */
+	if (info->params.clock_speed)
+		mgslpc_set_rate(info, CHA, info->params.clock_speed);
+	else
+		mgslpc_set_rate(info, CHA, 1843200);
+
+	/* MODE:00 TLP Test Loop, 1=loopback enabled */
+	val = read_reg(info, CHA + MODE) | BIT0;
+	write_reg(info, CHA + MODE, val);
+}
+
+static void hdlc_mode(MGSLPC_INFO *info)
+{
+	unsigned char val;
+	unsigned char clkmode, clksubmode;
+
+	/* disable all interrupts */
+	irq_disable(info, CHA, 0xffff);
+	irq_disable(info, CHB, 0xffff);
+	port_irq_disable(info, 0xff);
+
+	/* assume clock mode 0a, rcv=RxC xmt=TxC */
+	clkmode = clksubmode = 0;
+	if (info->params.flags & HDLC_FLAG_RXC_DPLL
+	    && info->params.flags & HDLC_FLAG_TXC_DPLL) {
+		/* clock mode 7a, rcv = DPLL, xmt = DPLL */
+		clkmode = 7;
+	} else if (info->params.flags & HDLC_FLAG_RXC_BRG
+		 && info->params.flags & HDLC_FLAG_TXC_BRG) {
+		/* clock mode 7b, rcv = BRG, xmt = BRG */
+		clkmode = 7;
+		clksubmode = 1;
+	} else if (info->params.flags & HDLC_FLAG_RXC_DPLL) {
+		if (info->params.flags & HDLC_FLAG_TXC_BRG) {
+			/* clock mode 6b, rcv = DPLL, xmt = BRG/16 */
+			clkmode = 6;
+			clksubmode = 1;
+		} else {
+			/* clock mode 6a, rcv = DPLL, xmt = TxC */
+			clkmode = 6;
+		}
+	} else if (info->params.flags & HDLC_FLAG_TXC_BRG) {
+		/* clock mode 0b, rcv = RxC, xmt = BRG */
+		clksubmode = 1;
+	}
+
+	/* MODE
+	 *
+	 * 07..06  MDS[1..0] 10 = transparent HDLC mode
+	 * 05      ADM Address Mode, 0 = no addr recognition
+	 * 04      TMD Timer Mode, 0 = external
+	 * 03      RAC Receiver Active, 0 = inactive
+	 * 02      RTS 0=RTS active during xmit, 1=RTS always active
+	 * 01      TRS Timer Resolution, 1=512
+	 * 00      TLP Test Loop, 0 = no loop
+	 *
+	 * 1000 0010
+	 */
+	val = 0x82;
+	if (info->params.loopback)
+		val |= BIT0;
+
+	/* preserve RTS state */
+	if (info->serial_signals & SerialSignal_RTS)
+		val |= BIT2;
+	write_reg(info, CHA + MODE, val);
+
+	/* CCR0
+	 *
+	 * 07      PU Power Up, 1=active, 0=power down
+	 * 06      MCE Master Clock Enable, 1=enabled
+	 * 05      Reserved, 0
+	 * 04..02  SC[2..0] Encoding
+	 * 01..00  SM[1..0] Serial Mode, 00=HDLC
+	 *
+	 * 11000000
+	 */
+	val = 0xc0;
+	switch (info->params.encoding)
+	{
+	case HDLC_ENCODING_NRZI:
+		val |= BIT3;
+		break;
+	case HDLC_ENCODING_BIPHASE_SPACE:
+		val |= BIT4;
+		break;		// FM0
+	case HDLC_ENCODING_BIPHASE_MARK:
+		val |= BIT4 | BIT2;
+		break;		// FM1
+	case HDLC_ENCODING_BIPHASE_LEVEL:
+		val |= BIT4 | BIT3;
+		break;		// Manchester
+	}
+	write_reg(info, CHA + CCR0, val);
+
+	/* CCR1
+	 *
+	 * 07      SFLG Shared Flag, 0 = disable shared flags
+	 * 06      GALP Go Active On Loop, 0 = not used
+	 * 05      GLP Go On Loop, 0 = not used
+	 * 04      ODS Output Driver Select, 1=TxD is push-pull output
+	 * 03      ITF Interframe Time Fill, 0=mark, 1=flag
+	 * 02..00  CM[2..0] Clock Mode
+	 *
+	 * 0001 0000
+	 */
+	val = 0x10 + clkmode;
+	write_reg(info, CHA + CCR1, val);
+
+	/* CCR2
+	 *
+	 * 07..06  BGR[9..8] Baud rate bits 9..8
+	 * 05      BDF Baud rate divisor factor, 0=1, 1=BGR value
+	 * 04      SSEL Clock source select, 1=submode b
+	 * 03      TOE 0=TxCLK is input, 0=TxCLK is input
+	 * 02      RWX Read/Write Exchange 0=disabled
+	 * 01      C32, CRC select, 0=CRC-16, 1=CRC-32
+	 * 00      DIV, data inversion 0=disabled, 1=enabled
+	 *
+	 * 0000 0000
+	 */
+	val = 0x00;
+	if (clkmode == 2 || clkmode == 3 || clkmode == 6
+	    || clkmode == 7 || (clkmode == 0 && clksubmode == 1))
+		val |= BIT5;
+	if (clksubmode)
+		val |= BIT4;
+	if (info->params.crc_type == HDLC_CRC_32_CCITT)
+		val |= BIT1;
+	if (info->params.encoding == HDLC_ENCODING_NRZB)
+		val |= BIT0;
+	write_reg(info, CHA + CCR2, val);
+
+	/* CCR3
+	 *
+	 * 07..06  PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8
+	 * 05      EPT Enable preamble transmission, 1=enabled
+	 * 04      RADD Receive address pushed to FIFO, 0=disabled
+	 * 03      CRL CRC Reset Level, 0=FFFF
+	 * 02      RCRC Rx CRC 0=On 1=Off
+	 * 01      TCRC Tx CRC 0=On 1=Off
+	 * 00      PSD DPLL Phase Shift Disable
+	 *
+	 * 0000 0000
+	 */
+	val = 0x00;
+	if (info->params.crc_type == HDLC_CRC_NONE)
+		val |= BIT2 | BIT1;
+	if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
+		val |= BIT5;
+	switch (info->params.preamble_length)
+	{
+	case HDLC_PREAMBLE_LENGTH_16BITS:
+		val |= BIT6;
+		break;
+	case HDLC_PREAMBLE_LENGTH_32BITS:
+		val |= BIT6;
+		break;
+	case HDLC_PREAMBLE_LENGTH_64BITS:
+		val |= BIT7 | BIT6;
+		break;
+	}
+	write_reg(info, CHA + CCR3, val);
+
+	/* PRE - Preamble pattern */
+	val = 0;
+	switch (info->params.preamble)
+	{
+	case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
+	case HDLC_PREAMBLE_PATTERN_10:    val = 0xaa; break;
+	case HDLC_PREAMBLE_PATTERN_01:    val = 0x55; break;
+	case HDLC_PREAMBLE_PATTERN_ONES:  val = 0xff; break;
+	}
+	write_reg(info, CHA + PRE, val);
+
+	/* CCR4
+	 *
+	 * 07      MCK4 Master Clock Divide by 4, 1=enabled
+	 * 06      EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+	 * 05      TST1 Test Pin, 0=normal operation
+	 * 04      ICD Ivert Carrier Detect, 1=enabled (active low)
+	 * 03..02  Reserved, must be 0
+	 * 01..00  RFT[1..0] RxFIFO Threshold 00=32 bytes
+	 *
+	 * 0101 0000
+	 */
+	val = 0x50;
+	write_reg(info, CHA + CCR4, val);
+	if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+		mgslpc_set_rate(info, CHA, info->params.clock_speed * 16);
+	else
+		mgslpc_set_rate(info, CHA, info->params.clock_speed);
+
+	/* RLCR Receive length check register
+	 *
+	 * 7     1=enable receive length check
+	 * 6..0  Max frame length = (RL + 1) * 32
+	 */
+	write_reg(info, CHA + RLCR, 0);
+
+	/* XBCH Transmit Byte Count High
+	 *
+	 * 07      DMA mode, 0 = interrupt driven
+	 * 06      NRM, 0=ABM (ignored)
+	 * 05      CAS Carrier Auto Start
+	 * 04      XC Transmit Continuously (ignored)
+	 * 03..00  XBC[10..8] Transmit byte count bits 10..8
+	 *
+	 * 0000 0000
+	 */
+	val = 0x00;
+	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+		val |= BIT5;
+	write_reg(info, CHA + XBCH, val);
+	enable_auxclk(info);
+	if (info->params.loopback || info->testing_irq)
+		loopback_enable(info);
+	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+	{
+		irq_enable(info, CHB, IRQ_CTS);
+		/* PVR[3] 1=AUTO CTS active */
+		set_reg_bits(info, CHA + PVR, BIT3);
+	} else
+		clear_reg_bits(info, CHA + PVR, BIT3);
+
+	irq_enable(info, CHA,
+			 IRQ_RXEOM | IRQ_RXFIFO | IRQ_ALLSENT |
+			 IRQ_UNDERRUN | IRQ_TXFIFO);
+	issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
+	wait_command_complete(info, CHA);
+	read_reg16(info, CHA + ISR);	/* clear pending IRQs */
+
+	/* Master clock mode enabled above to allow reset commands
+	 * to complete even if no data clocks are present.
+	 *
+	 * Disable master clock mode for normal communications because
+	 * V3.2 of the ESCC2 has a bug that prevents the transmit all sent
+	 * IRQ when in master clock mode.
+	 *
+	 * Leave master clock mode enabled for IRQ test because the
+	 * timer IRQ used by the test can only happen in master clock mode.
+	 */
+	if (!info->testing_irq)
+		clear_reg_bits(info, CHA + CCR0, BIT6);
+
+	tx_set_idle(info);
+
+	tx_stop(info);
+	rx_stop(info);
+}
+
+static void rx_stop(MGSLPC_INFO *info)
+{
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):rx_stop(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	/* MODE:03 RAC Receiver Active, 0=inactive */
+	clear_reg_bits(info, CHA + MODE, BIT3);
+
+	info->rx_enabled = false;
+	info->rx_overflow = false;
+}
+
+static void rx_start(MGSLPC_INFO *info)
+{
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):rx_start(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	rx_reset_buffers(info);
+	info->rx_enabled = false;
+	info->rx_overflow = false;
+
+	/* MODE:03 RAC Receiver Active, 1=active */
+	set_reg_bits(info, CHA + MODE, BIT3);
+
+	info->rx_enabled = true;
+}
+
+static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):tx_start(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	if (info->tx_count) {
+		/* If auto RTS enabled and RTS is inactive, then assert */
+		/* RTS and set a flag indicating that the driver should */
+		/* negate RTS when the transmission completes. */
+		info->drop_rts_on_tx_done = false;
+
+		if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
+			get_signals(info);
+			if (!(info->serial_signals & SerialSignal_RTS)) {
+				info->serial_signals |= SerialSignal_RTS;
+				set_signals(info);
+				info->drop_rts_on_tx_done = true;
+			}
+		}
+
+		if (info->params.mode == MGSL_MODE_ASYNC) {
+			if (!info->tx_active) {
+				info->tx_active = true;
+				tx_ready(info, tty);
+			}
+		} else {
+			info->tx_active = true;
+			tx_ready(info, tty);
+			mod_timer(&info->tx_timer, jiffies +
+					msecs_to_jiffies(5000));
+		}
+	}
+
+	if (!info->tx_enabled)
+		info->tx_enabled = true;
+}
+
+static void tx_stop(MGSLPC_INFO *info)
+{
+	if (debug_level >= DEBUG_LEVEL_ISR)
+		printk("%s(%d):tx_stop(%s)\n",
+			 __FILE__, __LINE__, info->device_name);
+
+	del_timer(&info->tx_timer);
+
+	info->tx_enabled = false;
+	info->tx_active = false;
+}
+
+/* Reset the adapter to a known state and prepare it for further use.
+ */
+static void reset_device(MGSLPC_INFO *info)
+{
+	/* power up both channels (set BIT7) */
+	write_reg(info, CHA + CCR0, 0x80);
+	write_reg(info, CHB + CCR0, 0x80);
+	write_reg(info, CHA + MODE, 0);
+	write_reg(info, CHB + MODE, 0);
+
+	/* disable all interrupts */
+	irq_disable(info, CHA, 0xffff);
+	irq_disable(info, CHB, 0xffff);
+	port_irq_disable(info, 0xff);
+
+	/* PCR Port Configuration Register
+	 *
+	 * 07..04  DEC[3..0] Serial I/F select outputs
+	 * 03      output, 1=AUTO CTS control enabled
+	 * 02      RI Ring Indicator input 0=active
+	 * 01      DSR input 0=active
+	 * 00      DTR output 0=active
+	 *
+	 * 0000 0110
+	 */
+	write_reg(info, PCR, 0x06);
+
+	/* PVR Port Value Register
+	 *
+	 * 07..04  DEC[3..0] Serial I/F select (0000=disabled)
+	 * 03      AUTO CTS output 1=enabled
+	 * 02      RI Ring Indicator input
+	 * 01      DSR input
+	 * 00      DTR output (1=inactive)
+	 *
+	 * 0000 0001
+	 */
+//	write_reg(info, PVR, PVR_DTR);
+
+	/* IPC Interrupt Port Configuration
+	 *
+	 * 07      VIS 1=Masked interrupts visible
+	 * 06..05  Reserved, 0
+	 * 04..03  SLA Slave address, 00 ignored
+	 * 02      CASM Cascading Mode, 1=daisy chain
+	 * 01..00  IC[1..0] Interrupt Config, 01=push-pull output, active low
+	 *
+	 * 0000 0101
+	 */
+	write_reg(info, IPC, 0x05);
+}
+
+static void async_mode(MGSLPC_INFO *info)
+{
+	unsigned char val;
+
+	/* disable all interrupts */
+	irq_disable(info, CHA, 0xffff);
+	irq_disable(info, CHB, 0xffff);
+	port_irq_disable(info, 0xff);
+
+	/* MODE
+	 *
+	 * 07      Reserved, 0
+	 * 06      FRTS RTS State, 0=active
+	 * 05      FCTS Flow Control on CTS
+	 * 04      FLON Flow Control Enable
+	 * 03      RAC Receiver Active, 0 = inactive
+	 * 02      RTS 0=Auto RTS, 1=manual RTS
+	 * 01      TRS Timer Resolution, 1=512
+	 * 00      TLP Test Loop, 0 = no loop
+	 *
+	 * 0000 0110
+	 */
+	val = 0x06;
+	if (info->params.loopback)
+		val |= BIT0;
+
+	/* preserve RTS state */
+	if (!(info->serial_signals & SerialSignal_RTS))
+		val |= BIT6;
+	write_reg(info, CHA + MODE, val);
+
+	/* CCR0
+	 *
+	 * 07      PU Power Up, 1=active, 0=power down
+	 * 06      MCE Master Clock Enable, 1=enabled
+	 * 05      Reserved, 0
+	 * 04..02  SC[2..0] Encoding, 000=NRZ
+	 * 01..00  SM[1..0] Serial Mode, 11=Async
+	 *
+	 * 1000 0011
+	 */
+	write_reg(info, CHA + CCR0, 0x83);
+
+	/* CCR1
+	 *
+	 * 07..05  Reserved, 0
+	 * 04      ODS Output Driver Select, 1=TxD is push-pull output
+	 * 03      BCR Bit Clock Rate, 1=16x
+	 * 02..00  CM[2..0] Clock Mode, 111=BRG
+	 *
+	 * 0001 1111
+	 */
+	write_reg(info, CHA + CCR1, 0x1f);
+
+	/* CCR2 (channel A)
+	 *
+	 * 07..06  BGR[9..8] Baud rate bits 9..8
+	 * 05      BDF Baud rate divisor factor, 0=1, 1=BGR value
+	 * 04      SSEL Clock source select, 1=submode b
+	 * 03      TOE 0=TxCLK is input, 0=TxCLK is input
+	 * 02      RWX Read/Write Exchange 0=disabled
+	 * 01      Reserved, 0
+	 * 00      DIV, data inversion 0=disabled, 1=enabled
+	 *
+	 * 0001 0000
+	 */
+	write_reg(info, CHA + CCR2, 0x10);
+
+	/* CCR3
+	 *
+	 * 07..01  Reserved, 0
+	 * 00      PSD DPLL Phase Shift Disable
+	 *
+	 * 0000 0000
+	 */
+	write_reg(info, CHA + CCR3, 0);
+
+	/* CCR4
+	 *
+	 * 07      MCK4 Master Clock Divide by 4, 1=enabled
+	 * 06      EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+	 * 05      TST1 Test Pin, 0=normal operation
+	 * 04      ICD Ivert Carrier Detect, 1=enabled (active low)
+	 * 03..00  Reserved, must be 0
+	 *
+	 * 0101 0000
+	 */
+	write_reg(info, CHA + CCR4, 0x50);
+	mgslpc_set_rate(info, CHA, info->params.data_rate * 16);
+
+	/* DAFO Data Format
+	 *
+	 * 07      Reserved, 0
+	 * 06      XBRK transmit break, 0=normal operation
+	 * 05      Stop bits (0=1, 1=2)
+	 * 04..03  PAR[1..0] Parity (01=odd, 10=even)
+	 * 02      PAREN Parity Enable
+	 * 01..00  CHL[1..0] Character Length (00=8, 01=7)
+	 *
+	 */
+	val = 0x00;
+	if (info->params.data_bits != 8)
+		val |= BIT0;	/* 7 bits */
+	if (info->params.stop_bits != 1)
+		val |= BIT5;
+	if (info->params.parity != ASYNC_PARITY_NONE)
+	{
+		val |= BIT2;	/* Parity enable */
+		if (info->params.parity == ASYNC_PARITY_ODD)
+			val |= BIT3;
+		else
+			val |= BIT4;
+	}
+	write_reg(info, CHA + DAFO, val);
+
+	/* RFC Rx FIFO Control
+	 *
+	 * 07      Reserved, 0
+	 * 06      DPS, 1=parity bit not stored in data byte
+	 * 05      DXS, 0=all data stored in FIFO (including XON/XOFF)
+	 * 04      RFDF Rx FIFO Data Format, 1=status byte stored in FIFO
+	 * 03..02  RFTH[1..0], rx threshold, 11=16 status + 16 data byte
+	 * 01      Reserved, 0
+	 * 00      TCDE Terminate Char Detect Enable, 0=disabled
+	 *
+	 * 0101 1100
+	 */
+	write_reg(info, CHA + RFC, 0x5c);
+
+	/* RLCR Receive length check register
+	 *
+	 * Max frame length = (RL + 1) * 32
+	 */
+	write_reg(info, CHA + RLCR, 0);
+
+	/* XBCH Transmit Byte Count High
+	 *
+	 * 07      DMA mode, 0 = interrupt driven
+	 * 06      NRM, 0=ABM (ignored)
+	 * 05      CAS Carrier Auto Start
+	 * 04      XC Transmit Continuously (ignored)
+	 * 03..00  XBC[10..8] Transmit byte count bits 10..8
+	 *
+	 * 0000 0000
+	 */
+	val = 0x00;
+	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+		val |= BIT5;
+	write_reg(info, CHA + XBCH, val);
+	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+		irq_enable(info, CHA, IRQ_CTS);
+
+	/* MODE:03 RAC Receiver Active, 1=active */
+	set_reg_bits(info, CHA + MODE, BIT3);
+	enable_auxclk(info);
+	if (info->params.flags & HDLC_FLAG_AUTO_CTS) {
+		irq_enable(info, CHB, IRQ_CTS);
+		/* PVR[3] 1=AUTO CTS active */
+		set_reg_bits(info, CHA + PVR, BIT3);
+	} else
+		clear_reg_bits(info, CHA + PVR, BIT3);
+	irq_enable(info, CHA,
+			  IRQ_RXEOM | IRQ_RXFIFO | IRQ_BREAK_ON | IRQ_RXTIME |
+			  IRQ_ALLSENT | IRQ_TXFIFO);
+	issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
+	wait_command_complete(info, CHA);
+	read_reg16(info, CHA + ISR);	/* clear pending IRQs */
+}
+
+/* Set the HDLC idle mode for the transmitter.
+ */
+static void tx_set_idle(MGSLPC_INFO *info)
+{
+	/* Note: ESCC2 only supports flags and one idle modes */
+	if (info->idle_mode == HDLC_TXIDLE_FLAGS)
+		set_reg_bits(info, CHA + CCR1, BIT3);
+	else
+		clear_reg_bits(info, CHA + CCR1, BIT3);
+}
+
+/* get state of the V24 status (input) signals.
+ */
+static void get_signals(MGSLPC_INFO *info)
+{
+	unsigned char status = 0;
+
+	/* preserve RTS and DTR */
+	info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
+
+	if (read_reg(info, CHB + VSTR) & BIT7)
+		info->serial_signals |= SerialSignal_DCD;
+	if (read_reg(info, CHB + STAR) & BIT1)
+		info->serial_signals |= SerialSignal_CTS;
+
+	status = read_reg(info, CHA + PVR);
+	if (!(status & PVR_RI))
+		info->serial_signals |= SerialSignal_RI;
+	if (!(status & PVR_DSR))
+		info->serial_signals |= SerialSignal_DSR;
+}
+
+/* Set the state of RTS and DTR based on contents of
+ * serial_signals member of device extension.
+ */
+static void set_signals(MGSLPC_INFO *info)
+{
+	unsigned char val;
+
+	val = read_reg(info, CHA + MODE);
+	if (info->params.mode == MGSL_MODE_ASYNC) {
+		if (info->serial_signals & SerialSignal_RTS)
+			val &= ~BIT6;
+		else
+			val |= BIT6;
+	} else {
+		if (info->serial_signals & SerialSignal_RTS)
+			val |= BIT2;
+		else
+			val &= ~BIT2;
+	}
+	write_reg(info, CHA + MODE, val);
+
+	if (info->serial_signals & SerialSignal_DTR)
+		clear_reg_bits(info, CHA + PVR, PVR_DTR);
+	else
+		set_reg_bits(info, CHA + PVR, PVR_DTR);
+}
+
+static void rx_reset_buffers(MGSLPC_INFO *info)
+{
+	RXBUF *buf;
+	int i;
+
+	info->rx_put = 0;
+	info->rx_get = 0;
+	info->rx_frame_count = 0;
+	for (i=0 ; i < info->rx_buf_count ; i++) {
+		buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size));
+		buf->status = buf->count = 0;
+	}
+}
+
+/* Attempt to return a received HDLC frame
+ * Only frames received without errors are returned.
+ *
+ * Returns true if frame returned, otherwise false
+ */
+static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+	unsigned short status;
+	RXBUF *buf;
+	unsigned int framesize = 0;
+	unsigned long flags;
+	bool return_frame = false;
+
+	if (info->rx_frame_count == 0)
+		return false;
+
+	buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
+
+	status = buf->status;
+
+	/* 07  VFR  1=valid frame
+	 * 06  RDO  1=data overrun
+	 * 05  CRC  1=OK, 0=error
+	 * 04  RAB  1=frame aborted
+	 */
+	if ((status & 0xf0) != 0xA0) {
+		if (!(status & BIT7) || (status & BIT4))
+			info->icount.rxabort++;
+		else if (status & BIT6)
+			info->icount.rxover++;
+		else if (!(status & BIT5)) {
+			info->icount.rxcrc++;
+			if (info->params.crc_type & HDLC_CRC_RETURN_EX)
+				return_frame = true;
+		}
+		framesize = 0;
+#if SYNCLINK_GENERIC_HDLC
+		{
+			info->netdev->stats.rx_errors++;
+			info->netdev->stats.rx_frame_errors++;
+		}
+#endif
+	} else
+		return_frame = true;
+
+	if (return_frame)
+		framesize = buf->count;
+
+	if (debug_level >= DEBUG_LEVEL_BH)
+		printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
+			__FILE__, __LINE__, info->device_name, status, framesize);
+
+	if (debug_level >= DEBUG_LEVEL_DATA)
+		trace_block(info, buf->data, framesize, 0);
+
+	if (framesize) {
+		if ((info->params.crc_type & HDLC_CRC_RETURN_EX &&
+		      framesize+1 > info->max_frame_size) ||
+		    framesize > info->max_frame_size)
+			info->icount.rxlong++;
+		else {
+			if (status & BIT5)
+				info->icount.rxok++;
+
+			if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
+				*(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR;
+				++framesize;
+			}
+
+#if SYNCLINK_GENERIC_HDLC
+			if (info->netcount)
+				hdlcdev_rx(info, buf->data, framesize);
+			else
+#endif
+				ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize);
+		}
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+	buf->status = buf->count = 0;
+	info->rx_frame_count--;
+	info->rx_get++;
+	if (info->rx_get >= info->rx_buf_count)
+		info->rx_get = 0;
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	return true;
+}
+
+static bool register_test(MGSLPC_INFO *info)
+{
+	static unsigned char patterns[] =
+	    { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
+	static unsigned int count = ARRAY_SIZE(patterns);
+	unsigned int i;
+	bool rc = true;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	reset_device(info);
+
+	for (i = 0; i < count; i++) {
+		write_reg(info, XAD1, patterns[i]);
+		write_reg(info, XAD2, patterns[(i + 1) % count]);
+		if ((read_reg(info, XAD1) != patterns[i]) ||
+		    (read_reg(info, XAD2) != patterns[(i + 1) % count])) {
+			rc = false;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->lock, flags);
+	return rc;
+}
+
+static bool irq_test(MGSLPC_INFO *info)
+{
+	unsigned long end_time;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->lock, flags);
+	reset_device(info);
+
+	info->testing_irq = true;
+	hdlc_mode(info);
+
+	info->irq_occurred = false;
+
+	/* init hdlc mode */
+
+	irq_enable(info, CHA, IRQ_TIMER);
+	write_reg(info, CHA + TIMR, 0);	/* 512 cycles */
+	issue_command(info, CHA, CMD_START_TIMER);
+
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	end_time=100;
+	while(end_time-- && !info->irq_occurred) {
+		msleep_interruptible(10);
+	}
+
+	info->testing_irq = false;
+
+	spin_lock_irqsave(&info->lock, flags);
+	reset_device(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	return info->irq_occurred;
+}
+
+static int adapter_test(MGSLPC_INFO *info)
+{
+	if (!register_test(info)) {
+		info->init_error = DiagStatus_AddressFailure;
+		printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+			__FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
+		return -ENODEV;
+	}
+
+	if (!irq_test(info)) {
+		info->init_error = DiagStatus_IrqFailure;
+		printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+			__FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
+		return -ENODEV;
+	}
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):device %s passed diagnostics\n",
+			__FILE__, __LINE__, info->device_name);
+	return 0;
+}
+
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
+{
+	int i;
+	int linecount;
+	if (xmit)
+		printk("%s tx data:\n", info->device_name);
+	else
+		printk("%s rx data:\n", info->device_name);
+
+	while(count) {
+		if (count > 16)
+			linecount = 16;
+		else
+			linecount = count;
+
+		for(i=0;i<linecount;i++)
+			printk("%02X ", (unsigned char)data[i]);
+		for(;i<17;i++)
+			printk("   ");
+		for(i=0;i<linecount;i++) {
+			if (data[i]>=040 && data[i]<=0176)
+				printk("%c", data[i]);
+			else
+				printk(".");
+		}
+		printk("\n");
+
+		data  += linecount;
+		count -= linecount;
+	}
+}
+
+/* HDLC frame time out
+ * update stats and do tx completion processing
+ */
+static void tx_timeout(struct timer_list *t)
+{
+	MGSLPC_INFO *info = from_timer(info, t, tx_timer);
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):tx_timeout(%s)\n",
+			__FILE__, __LINE__, info->device_name);
+	if (info->tx_active &&
+	    info->params.mode == MGSL_MODE_HDLC) {
+		info->icount.txtimeout++;
+	}
+	spin_lock_irqsave(&info->lock, flags);
+	info->tx_active = false;
+	info->tx_count = info->tx_put = info->tx_get = 0;
+
+	spin_unlock_irqrestore(&info->lock, flags);
+
+#if SYNCLINK_GENERIC_HDLC
+	if (info->netcount)
+		hdlcdev_tx_done(info);
+	else
+#endif
+	{
+		struct tty_struct *tty = tty_port_tty_get(&info->port);
+		bh_transmit(info, tty);
+		tty_kref_put(tty);
+	}
+}
+
+#if SYNCLINK_GENERIC_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev       pointer to network device structure
+ * encoding  serial encoding setting
+ * parity    FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+			  unsigned short parity)
+{
+	MGSLPC_INFO *info = dev_to_port(dev);
+	struct tty_struct *tty;
+	unsigned char  new_encoding;
+	unsigned short new_crctype;
+
+	/* return error if TTY interface open */
+	if (info->port.count)
+		return -EBUSY;
+
+	switch (encoding)
+	{
+	case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
+	case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+	case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+	case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+	case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+	default: return -EINVAL;
+	}
+
+	switch (parity)
+	{
+	case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
+	case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+	case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+	default: return -EINVAL;
+	}
+
+	info->params.encoding = new_encoding;
+	info->params.crc_type = new_crctype;
+
+	/* if network interface up, reprogram hardware */
+	if (info->netcount) {
+		tty = tty_port_tty_get(&info->port);
+		mgslpc_program_hw(info, tty);
+		tty_kref_put(tty);
+	}
+
+	return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb  socket buffer containing HDLC frame
+ * dev  pointer to network device structure
+ */
+static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	MGSLPC_INFO *info = dev_to_port(dev);
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
+
+	/* stop sending until this frame completes */
+	netif_stop_queue(dev);
+
+	/* copy data to device buffers */
+	skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
+	info->tx_get = 0;
+	info->tx_put = info->tx_count = skb->len;
+
+	/* update network statistics */
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+
+	/* done with socket buffer, so free it */
+	dev_kfree_skb(skb);
+
+	/* save start time for transmit timeout detection */
+	netif_trans_update(dev);
+
+	/* start hardware transmitter if necessary */
+	spin_lock_irqsave(&info->lock, flags);
+	if (!info->tx_active) {
+		struct tty_struct *tty = tty_port_tty_get(&info->port);
+		tx_start(info, tty);
+		tty_kref_put(tty);
+	}
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev  pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+	MGSLPC_INFO *info = dev_to_port(dev);
+	struct tty_struct *tty;
+	int rc;
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
+
+	/* generic HDLC layer open processing */
+	rc = hdlc_open(dev);
+	if (rc != 0)
+		return rc;
+
+	/* arbitrate between network and tty opens */
+	spin_lock_irqsave(&info->netlock, flags);
+	if (info->port.count != 0 || info->netcount != 0) {
+		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+		spin_unlock_irqrestore(&info->netlock, flags);
+		return -EBUSY;
+	}
+	info->netcount=1;
+	spin_unlock_irqrestore(&info->netlock, flags);
+
+	tty = tty_port_tty_get(&info->port);
+	/* claim resources and init adapter */
+	rc = startup(info, tty);
+	if (rc != 0) {
+		tty_kref_put(tty);
+		spin_lock_irqsave(&info->netlock, flags);
+		info->netcount=0;
+		spin_unlock_irqrestore(&info->netlock, flags);
+		return rc;
+	}
+	/* assert RTS and DTR, apply hardware settings */
+	info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
+	mgslpc_program_hw(info, tty);
+	tty_kref_put(tty);
+
+	/* enable network layer transmit */
+	netif_trans_update(dev);
+	netif_start_queue(dev);
+
+	/* inform generic HDLC layer of current DCD status */
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+	if (info->serial_signals & SerialSignal_DCD)
+		netif_carrier_on(dev);
+	else
+		netif_carrier_off(dev);
+	return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev  pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+	MGSLPC_INFO *info = dev_to_port(dev);
+	struct tty_struct *tty = tty_port_tty_get(&info->port);
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
+
+	netif_stop_queue(dev);
+
+	/* shutdown adapter and release resources */
+	shutdown(info, tty);
+	tty_kref_put(tty);
+	hdlc_close(dev);
+
+	spin_lock_irqsave(&info->netlock, flags);
+	info->netcount=0;
+	spin_unlock_irqrestore(&info->netlock, flags);
+
+	return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev  pointer to network device structure
+ * ifr  pointer to network interface request structure
+ * cmd  IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	MGSLPC_INFO *info = dev_to_port(dev);
+	unsigned int flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
+
+	/* return error if TTY interface open */
+	if (info->port.count)
+		return -EBUSY;
+
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	memset(&new_line, 0, size);
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE: /* return current sync_serial_settings */
+
+		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+
+		flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
+					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
+
+		switch (flags){
+		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+		case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
+		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
+		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+		default: new_line.clock_type = CLOCK_DEFAULT;
+		}
+
+		new_line.clock_rate = info->params.clock_speed;
+		new_line.loopback   = info->params.loopback ? 1:0;
+
+		if (copy_to_user(line, &new_line, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+		if(!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&new_line, line, size))
+			return -EFAULT;
+
+		switch (new_line.clock_type)
+		{
+		case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+		case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+		case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
+		case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
+		case CLOCK_DEFAULT:  flags = info->params.flags &
+					     (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
+					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
+		default: return -EINVAL;
+		}
+
+		if (new_line.loopback != 0 && new_line.loopback != 1)
+			return -EINVAL;
+
+		info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+					HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
+					HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+					HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
+		info->params.flags |= flags;
+
+		info->params.loopback = new_line.loopback;
+
+		if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+			info->params.clock_speed = new_line.clock_rate;
+		else
+			info->params.clock_speed = 0;
+
+		/* if network interface up, reprogram hardware */
+		if (info->netcount) {
+			struct tty_struct *tty = tty_port_tty_get(&info->port);
+			mgslpc_program_hw(info, tty);
+			tty_kref_put(tty);
+		}
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev  pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+	MGSLPC_INFO *info = dev_to_port(dev);
+	unsigned long flags;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("hdlcdev_tx_timeout(%s)\n", dev->name);
+
+	dev->stats.tx_errors++;
+	dev->stats.tx_aborted_errors++;
+
+	spin_lock_irqsave(&info->lock, flags);
+	tx_stop(info);
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info  pointer to device instance information
+ */
+static void hdlcdev_tx_done(MGSLPC_INFO *info)
+{
+	if (netif_queue_stopped(info->netdev))
+		netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info  pointer to device instance information
+ * buf   pointer to buffer contianing frame data
+ * size  count of data bytes in buf
+ */
+static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
+{
+	struct sk_buff *skb = dev_alloc_skb(size);
+	struct net_device *dev = info->netdev;
+
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("hdlcdev_rx(%s)\n", dev->name);
+
+	if (skb == NULL) {
+		printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
+		dev->stats.rx_dropped++;
+		return;
+	}
+
+	skb_put_data(skb, buf, size);
+
+	skb->protocol = hdlc_type_trans(skb, dev);
+
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += size;
+
+	netif_rx(skb);
+}
+
+static const struct net_device_ops hdlcdev_ops = {
+	.ndo_open       = hdlcdev_open,
+	.ndo_stop       = hdlcdev_close,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = hdlcdev_ioctl,
+	.ndo_tx_timeout = hdlcdev_tx_timeout,
+};
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info  pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(MGSLPC_INFO *info)
+{
+	int rc;
+	struct net_device *dev;
+	hdlc_device *hdlc;
+
+	/* allocate and initialize network and HDLC layer objects */
+
+	dev = alloc_hdlcdev(info);
+	if (dev == NULL) {
+		printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
+		return -ENOMEM;
+	}
+
+	/* for network layer reporting purposes only */
+	dev->base_addr = info->io_base;
+	dev->irq       = info->irq_level;
+
+	/* network layer callbacks and settings */
+	dev->netdev_ops	    = &hdlcdev_ops;
+	dev->watchdog_timeo = 10 * HZ;
+	dev->tx_queue_len   = 50;
+
+	/* generic HDLC layer callbacks and settings */
+	hdlc         = dev_to_hdlc(dev);
+	hdlc->attach = hdlcdev_attach;
+	hdlc->xmit   = hdlcdev_xmit;
+
+	/* register objects with HDLC layer */
+	rc = register_hdlc_device(dev);
+	if (rc) {
+		printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
+		free_netdev(dev);
+		return rc;
+	}
+
+	info->netdev = dev;
+	return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info  pointer to device instance information
+ */
+static void hdlcdev_exit(MGSLPC_INFO *info)
+{
+	unregister_hdlc_device(info->netdev);
+	free_netdev(info->netdev);
+	info->netdev = NULL;
+}
+
+#endif /* CONFIG_HDLC */
+
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
new file mode 100644
index 0000000..a45dabc
--- /dev/null
+++ b/drivers/char/powernv-op-panel.c
@@ -0,0 +1,223 @@
+/*
+ * OPAL Operator Panel Display Driver
+ *
+ * Copyright 2016, Suraj Jitindar Singh, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+
+#include <asm/opal.h>
+
+/*
+ * This driver creates a character device (/dev/op_panel) which exposes the
+ * operator panel (character LCD display) on IBM Power Systems machines
+ * with FSPs.
+ * A character buffer written to the device will be displayed on the
+ * operator panel.
+ */
+
+static DEFINE_MUTEX(oppanel_mutex);
+
+static u32		num_lines, oppanel_size;
+static oppanel_line_t	*oppanel_lines;
+static char		*oppanel_data;
+
+static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence)
+{
+	return fixed_size_llseek(filp, offset, whence, oppanel_size);
+}
+
+static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len,
+			    loff_t *f_pos)
+{
+	return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data,
+			oppanel_size);
+}
+
+static int __op_panel_update_display(void)
+{
+	struct opal_msg msg;
+	int rc, token;
+
+	token = opal_async_get_token_interruptible();
+	if (token < 0) {
+		if (token != -ERESTARTSYS)
+			pr_debug("Couldn't get OPAL async token [token=%d]\n",
+				token);
+		return token;
+	}
+
+	rc = opal_write_oppanel_async(token, oppanel_lines, num_lines);
+	switch (rc) {
+	case OPAL_ASYNC_COMPLETION:
+		rc = opal_async_wait_response(token, &msg);
+		if (rc) {
+			pr_debug("Failed to wait for async response [rc=%d]\n",
+				rc);
+			break;
+		}
+		rc = opal_get_async_rc(msg);
+		if (rc != OPAL_SUCCESS) {
+			pr_debug("OPAL async call returned failed [rc=%d]\n",
+				rc);
+			break;
+		}
+	case OPAL_SUCCESS:
+		break;
+	default:
+		pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc);
+	}
+
+	opal_async_release_token(token);
+	return rc;
+}
+
+static ssize_t oppanel_write(struct file *filp, const char __user *userbuf,
+			     size_t len, loff_t *f_pos)
+{
+	loff_t f_pos_prev = *f_pos;
+	ssize_t ret;
+	int rc;
+
+	if (!*f_pos)
+		memset(oppanel_data, ' ', oppanel_size);
+	else if (*f_pos >= oppanel_size)
+		return -EFBIG;
+
+	ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf,
+			len);
+	if (ret > 0) {
+		rc = __op_panel_update_display();
+		if (rc != OPAL_SUCCESS) {
+			pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n",
+				rc);
+			*f_pos = f_pos_prev;
+			return -EIO;
+		}
+	}
+	return ret;
+}
+
+static int oppanel_open(struct inode *inode, struct file *filp)
+{
+	if (!mutex_trylock(&oppanel_mutex)) {
+		pr_debug("Device Busy\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int oppanel_release(struct inode *inode, struct file *filp)
+{
+	mutex_unlock(&oppanel_mutex);
+	return 0;
+}
+
+static const struct file_operations oppanel_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= oppanel_llseek,
+	.read		= oppanel_read,
+	.write		= oppanel_write,
+	.open		= oppanel_open,
+	.release	= oppanel_release
+};
+
+static struct miscdevice oppanel_dev = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "op_panel",
+	.fops		= &oppanel_fops
+};
+
+static int oppanel_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 line_len;
+	int rc, i;
+
+	rc = of_property_read_u32(np, "#length", &line_len);
+	if (rc) {
+		pr_err_ratelimited("Operator panel length property not found\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "#lines", &num_lines);
+	if (rc) {
+		pr_err_ratelimited("Operator panel lines property not found\n");
+		return rc;
+	}
+	oppanel_size = line_len * num_lines;
+
+	pr_devel("Operator panel of size %u found with %u lines of length %u\n",
+			oppanel_size, num_lines, line_len);
+
+	oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL);
+	if (!oppanel_data)
+		return -ENOMEM;
+
+	oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL);
+	if (!oppanel_lines) {
+		rc = -ENOMEM;
+		goto free_oppanel_data;
+	}
+
+	memset(oppanel_data, ' ', oppanel_size);
+	for (i = 0; i < num_lines; i++) {
+		oppanel_lines[i].line_len = cpu_to_be64(line_len);
+		oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i *
+						line_len]));
+	}
+
+	rc = misc_register(&oppanel_dev);
+	if (rc) {
+		pr_err_ratelimited("Failed to register as misc device\n");
+		goto free_oppanel;
+	}
+
+	return 0;
+
+free_oppanel:
+	kfree(oppanel_lines);
+free_oppanel_data:
+	kfree(oppanel_data);
+	return rc;
+}
+
+static int oppanel_remove(struct platform_device *pdev)
+{
+	misc_deregister(&oppanel_dev);
+	kfree(oppanel_lines);
+	kfree(oppanel_data);
+	return 0;
+}
+
+static const struct of_device_id oppanel_match[] = {
+	{ .compatible = "ibm,opal-oppanel" },
+	{ },
+};
+
+static struct platform_driver oppanel_driver = {
+	.driver	= {
+		.name		= "powernv-op-panel",
+		.of_match_table	= oppanel_match,
+	},
+	.probe	= oppanel_probe,
+	.remove	= oppanel_remove,
+};
+
+module_platform_driver(oppanel_driver);
+
+MODULE_DEVICE_TABLE(of, oppanel_match);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver");
+MODULE_AUTHOR("Suraj Jitindar Singh <sjitindarsingh@gmail.com>");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
new file mode 100644
index 0000000..1ae77b4
--- /dev/null
+++ b/drivers/char/ppdev.c
@@ -0,0 +1,888 @@
+/*
+ * linux/drivers/char/ppdev.c
+ *
+ * This is the code behind /dev/parport* -- it allows a user-space
+ * application to use the parport subsystem.
+ *
+ * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * A /dev/parportx device node represents an arbitrary device
+ * on port 'x'.  The following operations are possible:
+ *
+ * open		do nothing, set up default IEEE 1284 protocol to be COMPAT
+ * close	release port and unregister device (if necessary)
+ * ioctl
+ *   EXCL	register device exclusively (may fail)
+ *   CLAIM	(register device first time) parport_claim_or_block
+ *   RELEASE	parport_release
+ *   SETMODE	set the IEEE 1284 protocol to use for read/write
+ *   SETPHASE	set the IEEE 1284 phase of a particular mode.  Not to be
+ *              confused with ioctl(fd, SETPHASER, &stun). ;-)
+ *   DATADIR	data_forward / data_reverse
+ *   WDATA	write_data
+ *   RDATA	read_data
+ *   WCONTROL	write_control
+ *   RCONTROL	read_control
+ *   FCONTROL	frob_control
+ *   RSTATUS	read_status
+ *   NEGOT	parport_negotiate
+ *   YIELD	parport_yield_blocking
+ *   WCTLONIRQ	on interrupt, set control lines
+ *   CLRIRQ	clear (and return) interrupt count
+ *   SETTIME	sets device timeout (struct timeval)
+ *   GETTIME	gets device timeout (struct timeval)
+ *   GETMODES	gets hardware supported modes (unsigned int)
+ *   GETMODE	gets the current IEEE1284 mode
+ *   GETPHASE   gets the current IEEE1284 phase
+ *   GETFLAGS   gets current (user-visible) flags
+ *   SETFLAGS   sets current (user-visible) flags
+ * read/write	read or write in current IEEE 1284 protocol
+ * select	wait for interrupt (in readfds)
+ *
+ * Changes:
+ * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999.
+ *
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25
+ * - On error, copy_from_user and copy_to_user do not return -EFAULT,
+ *   They return the positive number of bytes *not* copied due to address
+ *   space errors.
+ *
+ * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001.
+ * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched/signal.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/parport.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/ppdev.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+
+#define PP_VERSION "ppdev: user-space parallel port driver"
+#define CHRDEV "ppdev"
+
+struct pp_struct {
+	struct pardevice *pdev;
+	wait_queue_head_t irq_wait;
+	atomic_t irqc;
+	unsigned int flags;
+	int irqresponse;
+	unsigned char irqctl;
+	struct ieee1284_info state;
+	struct ieee1284_info saved_state;
+	long default_inactivity;
+	int index;
+};
+
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
+static DEFINE_IDA(ida_index);
+
+/* pp_struct.flags bitfields */
+#define PP_CLAIMED    (1<<0)
+#define PP_EXCL       (1<<1)
+
+/* Other constants */
+#define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */
+#define PP_BUFFER_SIZE 1024
+#define PARDEVICE_MAX 8
+
+static DEFINE_MUTEX(pp_do_mutex);
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32	_IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32	_IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64	_IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64	_IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
+{
+	struct parport *port = pp->pdev->port;
+
+	port->ops->enable_irq(port);
+}
+
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+		       loff_t *ppos)
+{
+	unsigned int minor = iminor(file_inode(file));
+	struct pp_struct *pp = file->private_data;
+	char *kbuffer;
+	ssize_t bytes_read = 0;
+	struct parport *pport;
+	int mode;
+
+	if (!(pp->flags & PP_CLAIMED)) {
+		/* Don't have the port claimed */
+		pr_debug(CHRDEV "%x: claim the port first\n", minor);
+		return -EINVAL;
+	}
+
+	/* Trivial case. */
+	if (count == 0)
+		return 0;
+
+	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+	if (!kbuffer)
+		return -ENOMEM;
+	pport = pp->pdev->port;
+	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
+
+	while (bytes_read == 0) {
+		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
+
+		if (mode == IEEE1284_MODE_EPP) {
+			/* various specials for EPP mode */
+			int flags = 0;
+			size_t (*fn)(struct parport *, void *, size_t, int);
+
+			if (pp->flags & PP_W91284PIC)
+				flags |= PARPORT_W91284PIC;
+			if (pp->flags & PP_FASTREAD)
+				flags |= PARPORT_EPP_FAST;
+			if (pport->ieee1284.mode & IEEE1284_ADDR)
+				fn = pport->ops->epp_read_addr;
+			else
+				fn = pport->ops->epp_read_data;
+			bytes_read = (*fn)(pport, kbuffer, need, flags);
+		} else {
+			bytes_read = parport_read(pport, kbuffer, need);
+		}
+
+		if (bytes_read != 0)
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			bytes_read = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			bytes_read = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+	}
+
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+	if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
+		bytes_read = -EFAULT;
+
+	kfree(kbuffer);
+	pp_enable_irq(pp);
+	return bytes_read;
+}
+
+static ssize_t pp_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	unsigned int minor = iminor(file_inode(file));
+	struct pp_struct *pp = file->private_data;
+	char *kbuffer;
+	ssize_t bytes_written = 0;
+	ssize_t wrote;
+	int mode;
+	struct parport *pport;
+
+	if (!(pp->flags & PP_CLAIMED)) {
+		/* Don't have the port claimed */
+		pr_debug(CHRDEV "%x: claim the port first\n", minor);
+		return -EINVAL;
+	}
+
+	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+	if (!kbuffer)
+		return -ENOMEM;
+
+	pport = pp->pdev->port;
+	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
+
+	while (bytes_written < count) {
+		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
+
+		if (copy_from_user(kbuffer, buf + bytes_written, n)) {
+			bytes_written = -EFAULT;
+			break;
+		}
+
+		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
+			/* do a fast EPP write */
+			if (pport->ieee1284.mode & IEEE1284_ADDR) {
+				wrote = pport->ops->epp_write_addr(pport,
+					kbuffer, n, PARPORT_EPP_FAST);
+			} else {
+				wrote = pport->ops->epp_write_data(pport,
+					kbuffer, n, PARPORT_EPP_FAST);
+			}
+		} else {
+			wrote = parport_write(pp->pdev->port, kbuffer, n);
+		}
+
+		if (wrote <= 0) {
+			if (!bytes_written)
+				bytes_written = wrote;
+			break;
+		}
+
+		bytes_written += wrote;
+
+		if (file->f_flags & O_NONBLOCK) {
+			if (!bytes_written)
+				bytes_written = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current))
+			break;
+
+		cond_resched();
+	}
+
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+	kfree(kbuffer);
+	pp_enable_irq(pp);
+	return bytes_written;
+}
+
+static void pp_irq(void *private)
+{
+	struct pp_struct *pp = private;
+
+	if (pp->irqresponse) {
+		parport_write_control(pp->pdev->port, pp->irqctl);
+		pp->irqresponse = 0;
+	}
+
+	atomic_inc(&pp->irqc);
+	wake_up_interruptible(&pp->irq_wait);
+}
+
+static int register_device(int minor, struct pp_struct *pp)
+{
+	struct parport *port;
+	struct pardevice *pdev = NULL;
+	char *name;
+	struct pardev_cb ppdev_cb;
+	int rc = 0, index;
+
+	name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
+	if (name == NULL)
+		return -ENOMEM;
+
+	port = parport_find_number(minor);
+	if (!port) {
+		pr_warn("%s: no associated port!\n", name);
+		rc = -ENXIO;
+		goto err;
+	}
+
+	index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+	memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+	ppdev_cb.irq_func = pp_irq;
+	ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+	ppdev_cb.private = pp;
+	pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
+	parport_put_port(port);
+
+	if (!pdev) {
+		pr_warn("%s: failed to register device!\n", name);
+		rc = -ENXIO;
+		ida_simple_remove(&ida_index, index);
+		goto err;
+	}
+
+	pp->pdev = pdev;
+	pp->index = index;
+	dev_dbg(&pdev->dev, "registered pardevice\n");
+err:
+	kfree(name);
+	return rc;
+}
+
+static enum ieee1284_phase init_phase(int mode)
+{
+	switch (mode & ~(IEEE1284_DEVICEID
+			 | IEEE1284_ADDR)) {
+	case IEEE1284_MODE_NIBBLE:
+	case IEEE1284_MODE_BYTE:
+		return IEEE1284_PH_REV_IDLE;
+	}
+	return IEEE1284_PH_FWD_IDLE;
+}
+
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+	long to_jiffies;
+
+	if ((tv_sec < 0) || (tv_usec < 0))
+		return -EINVAL;
+
+	to_jiffies = usecs_to_jiffies(tv_usec);
+	to_jiffies += tv_sec * HZ;
+	if (to_jiffies <= 0)
+		return -EINVAL;
+
+	pdev->timeout = to_jiffies;
+	return 0;
+}
+
+static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	unsigned int minor = iminor(file_inode(file));
+	struct pp_struct *pp = file->private_data;
+	struct parport *port;
+	void __user *argp = (void __user *)arg;
+
+	/* First handle the cases that don't take arguments. */
+	switch (cmd) {
+	case PPCLAIM:
+	    {
+		struct ieee1284_info *info;
+		int ret;
+
+		if (pp->flags & PP_CLAIMED) {
+			dev_dbg(&pp->pdev->dev, "you've already got it!\n");
+			return -EINVAL;
+		}
+
+		/* Deferred device registration. */
+		if (!pp->pdev) {
+			int err = register_device(minor, pp);
+
+			if (err)
+				return err;
+		}
+
+		ret = parport_claim_or_block(pp->pdev);
+		if (ret < 0)
+			return ret;
+
+		pp->flags |= PP_CLAIMED;
+
+		/* For interrupt-reporting to work, we need to be
+		 * informed of each interrupt. */
+		pp_enable_irq(pp);
+
+		/* We may need to fix up the state machine. */
+		info = &pp->pdev->port->ieee1284;
+		pp->saved_state.mode = info->mode;
+		pp->saved_state.phase = info->phase;
+		info->mode = pp->state.mode;
+		info->phase = pp->state.phase;
+		pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+		parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+		return 0;
+	    }
+	case PPEXCL:
+		if (pp->pdev) {
+			dev_dbg(&pp->pdev->dev,
+				"too late for PPEXCL; already registered\n");
+			if (pp->flags & PP_EXCL)
+				/* But it's not really an error. */
+				return 0;
+			/* There's no chance of making the driver happy. */
+			return -EINVAL;
+		}
+
+		/* Just remember to register the device exclusively
+		 * when we finally do the registration. */
+		pp->flags |= PP_EXCL;
+		return 0;
+	case PPSETMODE:
+	    {
+		int mode;
+
+		if (copy_from_user(&mode, argp, sizeof(mode)))
+			return -EFAULT;
+		/* FIXME: validate mode */
+		pp->state.mode = mode;
+		pp->state.phase = init_phase(mode);
+
+		if (pp->flags & PP_CLAIMED) {
+			pp->pdev->port->ieee1284.mode = mode;
+			pp->pdev->port->ieee1284.phase = pp->state.phase;
+		}
+
+		return 0;
+	    }
+	case PPGETMODE:
+	    {
+		int mode;
+
+		if (pp->flags & PP_CLAIMED)
+			mode = pp->pdev->port->ieee1284.mode;
+		else
+			mode = pp->state.mode;
+
+		if (copy_to_user(argp, &mode, sizeof(mode)))
+			return -EFAULT;
+		return 0;
+	    }
+	case PPSETPHASE:
+	    {
+		int phase;
+
+		if (copy_from_user(&phase, argp, sizeof(phase)))
+			return -EFAULT;
+
+		/* FIXME: validate phase */
+		pp->state.phase = phase;
+
+		if (pp->flags & PP_CLAIMED)
+			pp->pdev->port->ieee1284.phase = phase;
+
+		return 0;
+	    }
+	case PPGETPHASE:
+	    {
+		int phase;
+
+		if (pp->flags & PP_CLAIMED)
+			phase = pp->pdev->port->ieee1284.phase;
+		else
+			phase = pp->state.phase;
+		if (copy_to_user(argp, &phase, sizeof(phase)))
+			return -EFAULT;
+		return 0;
+	    }
+	case PPGETMODES:
+	    {
+		unsigned int modes;
+
+		port = parport_find_number(minor);
+		if (!port)
+			return -ENODEV;
+
+		modes = port->modes;
+		parport_put_port(port);
+		if (copy_to_user(argp, &modes, sizeof(modes)))
+			return -EFAULT;
+		return 0;
+	    }
+	case PPSETFLAGS:
+	    {
+		int uflags;
+
+		if (copy_from_user(&uflags, argp, sizeof(uflags)))
+			return -EFAULT;
+		pp->flags &= ~PP_FLAGMASK;
+		pp->flags |= (uflags & PP_FLAGMASK);
+		return 0;
+	    }
+	case PPGETFLAGS:
+	    {
+		int uflags;
+
+		uflags = pp->flags & PP_FLAGMASK;
+		if (copy_to_user(argp, &uflags, sizeof(uflags)))
+			return -EFAULT;
+		return 0;
+	    }
+	}	/* end switch() */
+
+	/* Everything else requires the port to be claimed, so check
+	 * that now. */
+	if ((pp->flags & PP_CLAIMED) == 0) {
+		pr_debug(CHRDEV "%x: claim the port first\n", minor);
+		return -EINVAL;
+	}
+
+	port = pp->pdev->port;
+	switch (cmd) {
+		struct ieee1284_info *info;
+		unsigned char reg;
+		unsigned char mask;
+		int mode;
+		s32 time32[2];
+		s64 time64[2];
+		struct timespec64 ts;
+		int ret;
+
+	case PPRSTATUS:
+		reg = parport_read_status(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
+			return -EFAULT;
+		return 0;
+	case PPRDATA:
+		reg = parport_read_data(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
+			return -EFAULT;
+		return 0;
+	case PPRCONTROL:
+		reg = parport_read_control(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
+			return -EFAULT;
+		return 0;
+	case PPYIELD:
+		parport_yield_blocking(pp->pdev);
+		return 0;
+
+	case PPRELEASE:
+		/* Save the state machine's state. */
+		info = &pp->pdev->port->ieee1284;
+		pp->state.mode = info->mode;
+		pp->state.phase = info->phase;
+		info->mode = pp->saved_state.mode;
+		info->phase = pp->saved_state.phase;
+		parport_release(pp->pdev);
+		pp->flags &= ~PP_CLAIMED;
+		return 0;
+
+	case PPWCONTROL:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+		parport_write_control(port, reg);
+		return 0;
+
+	case PPWDATA:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+		parport_write_data(port, reg);
+		return 0;
+
+	case PPFCONTROL:
+		if (copy_from_user(&mask, argp,
+				   sizeof(mask)))
+			return -EFAULT;
+		if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+				   sizeof(reg)))
+			return -EFAULT;
+		parport_frob_control(port, mask, reg);
+		return 0;
+
+	case PPDATADIR:
+		if (copy_from_user(&mode, argp, sizeof(mode)))
+			return -EFAULT;
+		if (mode)
+			port->ops->data_reverse(port);
+		else
+			port->ops->data_forward(port);
+		return 0;
+
+	case PPNEGOT:
+		if (copy_from_user(&mode, argp, sizeof(mode)))
+			return -EFAULT;
+		switch ((ret = parport_negotiate(port, mode))) {
+		case 0: break;
+		case -1: /* handshake failed, peripheral not IEEE 1284 */
+			ret = -EIO;
+			break;
+		case 1:  /* handshake succeeded, peripheral rejected mode */
+			ret = -ENXIO;
+			break;
+		}
+		pp_enable_irq(pp);
+		return ret;
+
+	case PPWCTLONIRQ:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+
+		/* Remember what to set the control lines to, for next
+		 * time we get an interrupt. */
+		pp->irqctl = reg;
+		pp->irqresponse = 1;
+		return 0;
+
+	case PPCLRIRQ:
+		ret = atomic_read(&pp->irqc);
+		if (copy_to_user(argp, &ret, sizeof(ret)))
+			return -EFAULT;
+		atomic_sub(ret, &pp->irqc);
+		return 0;
+
+	case PPSETTIME32:
+		if (copy_from_user(time32, argp, sizeof(time32)))
+			return -EFAULT;
+
+		return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+	case PPSETTIME64:
+		if (copy_from_user(time64, argp, sizeof(time64)))
+			return -EFAULT;
+
+		return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+	case PPGETTIME32:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time32[0] = ts.tv_sec;
+		time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time32[0] < 0) || (time32[1] < 0))
+			return -EINVAL;
+
+		if (copy_to_user(argp, time32, sizeof(time32)))
+			return -EFAULT;
+
+		return 0;
+
+	case PPGETTIME64:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time64[0] = ts.tv_sec;
+		time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time64[0] < 0) || (time64[1] < 0))
+			return -EINVAL;
+
+		if (copy_to_user(argp, time64, sizeof(time64)))
+			return -EFAULT;
+
+		return 0;
+
+	default:
+		dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Keep the compiler happy */
+	return 0;
+}
+
+static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	mutex_lock(&pp_do_mutex);
+	ret = pp_do_ioctl(file, cmd, arg);
+	mutex_unlock(&pp_do_mutex);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct pp_struct *pp;
+
+	if (minor >= PARPORT_MAX)
+		return -ENXIO;
+
+	pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->state.mode = IEEE1284_MODE_COMPAT;
+	pp->state.phase = init_phase(pp->state.mode);
+	pp->flags = 0;
+	pp->irqresponse = 0;
+	atomic_set(&pp->irqc, 0);
+	init_waitqueue_head(&pp->irq_wait);
+
+	/* Defer the actual device registration until the first claim.
+	 * That way, we know whether or not the driver wants to have
+	 * exclusive access to the port (PPEXCL).
+	 */
+	pp->pdev = NULL;
+	file->private_data = pp;
+
+	return 0;
+}
+
+static int pp_release(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct pp_struct *pp = file->private_data;
+	int compat_negot;
+
+	compat_negot = 0;
+	if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
+	    (pp->state.mode != IEEE1284_MODE_COMPAT)) {
+		struct ieee1284_info *info;
+
+		/* parport released, but not in compatibility mode */
+		parport_claim_or_block(pp->pdev);
+		pp->flags |= PP_CLAIMED;
+		info = &pp->pdev->port->ieee1284;
+		pp->saved_state.mode = info->mode;
+		pp->saved_state.phase = info->phase;
+		info->mode = pp->state.mode;
+		info->phase = pp->state.phase;
+		compat_negot = 1;
+	} else if ((pp->flags & PP_CLAIMED) && pp->pdev &&
+	    (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) {
+		compat_negot = 2;
+	}
+	if (compat_negot) {
+		parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+		dev_dbg(&pp->pdev->dev,
+			"negotiated back to compatibility mode because user-space forgot\n");
+	}
+
+	if (pp->flags & PP_CLAIMED) {
+		struct ieee1284_info *info;
+
+		info = &pp->pdev->port->ieee1284;
+		pp->state.mode = info->mode;
+		pp->state.phase = info->phase;
+		info->mode = pp->saved_state.mode;
+		info->phase = pp->saved_state.phase;
+		parport_release(pp->pdev);
+		if (compat_negot != 1) {
+			pr_debug(CHRDEV "%x: released pardevice "
+				"because user-space forgot\n", minor);
+		}
+	}
+
+	if (pp->pdev) {
+		parport_unregister_device(pp->pdev);
+		ida_simple_remove(&ida_index, pp->index);
+		pp->pdev = NULL;
+		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
+	}
+
+	kfree(pp);
+
+	return 0;
+}
+
+/* No kernel lock held - fine */
+static __poll_t pp_poll(struct file *file, poll_table *wait)
+{
+	struct pp_struct *pp = file->private_data;
+	__poll_t mask = 0;
+
+	poll_wait(file, &pp->irq_wait, wait);
+	if (atomic_read(&pp->irqc))
+		mask |= EPOLLIN | EPOLLRDNORM;
+
+	return mask;
+}
+
+static struct class *ppdev_class;
+
+static const struct file_operations pp_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= pp_read,
+	.write		= pp_write,
+	.poll		= pp_poll,
+	.unlocked_ioctl	= pp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = pp_compat_ioctl,
+#endif
+	.open		= pp_open,
+	.release	= pp_release,
+};
+
+static void pp_attach(struct parport *port)
+{
+	struct device *ret;
+
+	if (devices[port->number])
+		return;
+
+	ret = device_create(ppdev_class, port->dev,
+			    MKDEV(PP_MAJOR, port->number), NULL,
+			    "parport%d", port->number);
+	if (IS_ERR(ret)) {
+		pr_err("Failed to create device parport%d\n",
+		       port->number);
+		return;
+	}
+	devices[port->number] = ret;
+}
+
+static void pp_detach(struct parport *port)
+{
+	if (!devices[port->number])
+		return;
+
+	device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+	devices[port->number] = NULL;
+}
+
+static int pp_probe(struct pardevice *par_dev)
+{
+	struct device_driver *drv = par_dev->dev.driver;
+	int len = strlen(drv->name);
+
+	if (strncmp(par_dev->name, drv->name, len))
+		return -ENODEV;
+
+	return 0;
+}
+
+static struct parport_driver pp_driver = {
+	.name		= CHRDEV,
+	.probe		= pp_probe,
+	.match_port	= pp_attach,
+	.detach		= pp_detach,
+	.devmodel	= true,
+};
+
+static int __init ppdev_init(void)
+{
+	int err = 0;
+
+	if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+		pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
+		return -EIO;
+	}
+	ppdev_class = class_create(THIS_MODULE, CHRDEV);
+	if (IS_ERR(ppdev_class)) {
+		err = PTR_ERR(ppdev_class);
+		goto out_chrdev;
+	}
+	err = parport_register_driver(&pp_driver);
+	if (err < 0) {
+		pr_warn(CHRDEV ": unable to register with parport\n");
+		goto out_class;
+	}
+
+	pr_info(PP_VERSION "\n");
+	goto out;
+
+out_class:
+	class_destroy(ppdev_class);
+out_chrdev:
+	unregister_chrdev(PP_MAJOR, CHRDEV);
+out:
+	return err;
+}
+
+static void __exit ppdev_cleanup(void)
+{
+	/* Clean up all parport stuff */
+	parport_unregister_driver(&pp_driver);
+	class_destroy(ppdev_class);
+	unregister_chrdev(PP_MAJOR, CHRDEV);
+}
+
+module_init(ppdev_init);
+module_exit(ppdev_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
new file mode 100644
index 0000000..b526dc1
--- /dev/null
+++ b/drivers/char/ps3flash.c
@@ -0,0 +1,458 @@
+/*
+ * PS3 FLASH ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME		"ps3flash"
+
+#define FLASH_BLOCK_SIZE	(256*1024)
+
+
+struct ps3flash_private {
+	struct mutex mutex;	/* Bounce buffer mutex */
+	u64 chunk_sectors;
+	int tag;		/* Start sector of buffer, -1 if invalid */
+	bool dirty;
+};
+
+static struct ps3_storage_device *ps3flash_dev;
+
+static int ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+				       u64 start_sector, int write)
+{
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar,
+					     start_sector, priv->chunk_sectors,
+					     write);
+	if (res) {
+		dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
+			__LINE__, write ? "write" : "read", res);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int ps3flash_writeback(struct ps3_storage_device *dev)
+{
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	int res;
+
+	if (!priv->dirty || priv->tag < 0)
+		return 0;
+
+	res = ps3flash_read_write_sectors(dev, priv->tag, 1);
+	if (res)
+		return res;
+
+	priv->dirty = false;
+	return 0;
+}
+
+static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector)
+{
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	int res;
+
+	if (start_sector == priv->tag)
+		return 0;
+
+	res = ps3flash_writeback(dev);
+	if (res)
+		return res;
+
+	priv->tag = -1;
+
+	res = ps3flash_read_write_sectors(dev, start_sector, 0);
+	if (res)
+		return res;
+
+	priv->tag = start_sector;
+	return 0;
+}
+
+static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
+{
+	struct ps3_storage_device *dev = ps3flash_dev;
+	return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+			dev->regions[dev->region_idx].size*dev->blk_size);
+}
+
+static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf,
+			     size_t count, loff_t *pos)
+{
+	struct ps3_storage_device *dev = ps3flash_dev;
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 size, sector, offset;
+	int res;
+	size_t remaining, n;
+	const void *src;
+
+	dev_dbg(&dev->sbd.core,
+		"%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n",
+		__func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+	size = dev->regions[dev->region_idx].size*dev->blk_size;
+	if (*pos >= size || !count)
+		return 0;
+
+	if (*pos + count > size) {
+		dev_dbg(&dev->sbd.core,
+			"%s:%u Truncating count from %zu to %llu\n", __func__,
+			__LINE__, count, size - *pos);
+		count = size - *pos;
+	}
+
+	sector = *pos / dev->bounce_size * priv->chunk_sectors;
+	offset = *pos % dev->bounce_size;
+
+	remaining = count;
+	do {
+		n = min_t(u64, remaining, dev->bounce_size - offset);
+		src = dev->bounce_buf + offset;
+
+		mutex_lock(&priv->mutex);
+
+		res = ps3flash_fetch(dev, sector);
+		if (res)
+			goto fail;
+
+		dev_dbg(&dev->sbd.core,
+			"%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n",
+			__func__, __LINE__, n, src, userbuf, kernelbuf);
+		if (userbuf) {
+			if (copy_to_user(userbuf, src, n)) {
+				res = -EFAULT;
+				goto fail;
+			}
+			userbuf += n;
+		}
+		if (kernelbuf) {
+			memcpy(kernelbuf, src, n);
+			kernelbuf += n;
+		}
+
+		mutex_unlock(&priv->mutex);
+
+		*pos += n;
+		remaining -= n;
+		sector += priv->chunk_sectors;
+		offset = 0;
+	} while (remaining > 0);
+
+	return count;
+
+fail:
+	mutex_unlock(&priv->mutex);
+	return res;
+}
+
+static ssize_t ps3flash_write(const char __user *userbuf,
+			      const void *kernelbuf, size_t count, loff_t *pos)
+{
+	struct ps3_storage_device *dev = ps3flash_dev;
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 size, sector, offset;
+	int res = 0;
+	size_t remaining, n;
+	void *dst;
+
+	dev_dbg(&dev->sbd.core,
+		"%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n",
+		__func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+	size = dev->regions[dev->region_idx].size*dev->blk_size;
+	if (*pos >= size || !count)
+		return 0;
+
+	if (*pos + count > size) {
+		dev_dbg(&dev->sbd.core,
+			"%s:%u Truncating count from %zu to %llu\n", __func__,
+			__LINE__, count, size - *pos);
+		count = size - *pos;
+	}
+
+	sector = *pos / dev->bounce_size * priv->chunk_sectors;
+	offset = *pos % dev->bounce_size;
+
+	remaining = count;
+	do {
+		n = min_t(u64, remaining, dev->bounce_size - offset);
+		dst = dev->bounce_buf + offset;
+
+		mutex_lock(&priv->mutex);
+
+		if (n != dev->bounce_size)
+			res = ps3flash_fetch(dev, sector);
+		else if (sector != priv->tag)
+			res = ps3flash_writeback(dev);
+		if (res)
+			goto fail;
+
+		dev_dbg(&dev->sbd.core,
+			"%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n",
+			__func__, __LINE__, n, userbuf, kernelbuf, dst);
+		if (userbuf) {
+			if (copy_from_user(dst, userbuf, n)) {
+				res = -EFAULT;
+				goto fail;
+			}
+			userbuf += n;
+		}
+		if (kernelbuf) {
+			memcpy(dst, kernelbuf, n);
+			kernelbuf += n;
+		}
+
+		priv->tag = sector;
+		priv->dirty = true;
+
+		mutex_unlock(&priv->mutex);
+
+		*pos += n;
+		remaining -= n;
+		sector += priv->chunk_sectors;
+		offset = 0;
+	} while (remaining > 0);
+
+	return count;
+
+fail:
+	mutex_unlock(&priv->mutex);
+	return res;
+}
+
+static ssize_t ps3flash_user_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *pos)
+{
+	return ps3flash_read(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_user_write(struct file *file, const char __user *buf,
+				   size_t count, loff_t *pos)
+{
+	return ps3flash_write(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos)
+{
+	return ps3flash_read(NULL, buf, count, &pos);
+}
+
+static ssize_t ps3flash_kernel_write(const void *buf, size_t count,
+				     loff_t pos)
+{
+	ssize_t res;
+	int wb;
+
+	res = ps3flash_write(NULL, buf, count, &pos);
+	if (res < 0)
+		return res;
+
+	/* Make kernel writes synchronous */
+	wb = ps3flash_writeback(ps3flash_dev);
+	if (wb)
+		return wb;
+
+	return res;
+}
+
+static int ps3flash_flush(struct file *file, fl_owner_t id)
+{
+	return ps3flash_writeback(ps3flash_dev);
+}
+
+static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+	struct inode *inode = file_inode(file);
+	int err;
+	inode_lock(inode);
+	err = ps3flash_writeback(ps3flash_dev);
+	inode_unlock(inode);
+	return err;
+}
+
+static irqreturn_t ps3flash_interrupt(int irq, void *data)
+{
+	struct ps3_storage_device *dev = data;
+	int res;
+	u64 tag, status;
+
+	res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+	if (tag != dev->tag)
+		dev_err(&dev->sbd.core,
+			"%s:%u: tag mismatch, got %llx, expected %llx\n",
+			__func__, __LINE__, tag, dev->tag);
+
+	if (res) {
+		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
+			__func__, __LINE__, res, status);
+	} else {
+		dev->lv1_status = status;
+		complete(&dev->done);
+	}
+	return IRQ_HANDLED;
+}
+
+static const struct file_operations ps3flash_fops = {
+	.owner	= THIS_MODULE,
+	.llseek	= ps3flash_llseek,
+	.read	= ps3flash_user_read,
+	.write	= ps3flash_user_write,
+	.flush	= ps3flash_flush,
+	.fsync	= ps3flash_fsync,
+};
+
+static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = {
+	.read	= ps3flash_kernel_read,
+	.write	= ps3flash_kernel_write,
+};
+
+static struct miscdevice ps3flash_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= DEVICE_NAME,
+	.fops	= &ps3flash_fops,
+};
+
+static int ps3flash_probe(struct ps3_system_bus_device *_dev)
+{
+	struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+	struct ps3flash_private *priv;
+	int error;
+	unsigned long tmp;
+
+	tmp = dev->regions[dev->region_idx].start*dev->blk_size;
+	if (tmp % FLASH_BLOCK_SIZE) {
+		dev_err(&dev->sbd.core,
+			"%s:%u region start %lu is not aligned\n", __func__,
+			__LINE__, tmp);
+		return -EINVAL;
+	}
+	tmp = dev->regions[dev->region_idx].size*dev->blk_size;
+	if (tmp % FLASH_BLOCK_SIZE) {
+		dev_err(&dev->sbd.core,
+			"%s:%u region size %lu is not aligned\n", __func__,
+			__LINE__, tmp);
+		return -EINVAL;
+	}
+
+	/* use static buffer, kmalloc cannot allocate 256 KiB */
+	if (!ps3flash_bounce_buffer.address)
+		return -ENODEV;
+
+	if (ps3flash_dev) {
+		dev_err(&dev->sbd.core,
+			"Only one FLASH device is supported\n");
+		return -EBUSY;
+	}
+
+	ps3flash_dev = dev;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		error = -ENOMEM;
+		goto fail;
+	}
+
+	ps3_system_bus_set_drvdata(&dev->sbd, priv);
+	mutex_init(&priv->mutex);
+	priv->tag = -1;
+
+	dev->bounce_size = ps3flash_bounce_buffer.size;
+	dev->bounce_buf = ps3flash_bounce_buffer.address;
+	priv->chunk_sectors = dev->bounce_size / dev->blk_size;
+
+	error = ps3stor_setup(dev, ps3flash_interrupt);
+	if (error)
+		goto fail_free_priv;
+
+	ps3flash_misc.parent = &dev->sbd.core;
+	error = misc_register(&ps3flash_misc);
+	if (error) {
+		dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n",
+			__func__, __LINE__, error);
+		goto fail_teardown;
+	}
+
+	dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
+		 __func__, __LINE__, ps3flash_misc.minor);
+
+	ps3_os_area_flash_register(&ps3flash_kernel_ops);
+	return 0;
+
+fail_teardown:
+	ps3stor_teardown(dev);
+fail_free_priv:
+	kfree(priv);
+	ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+fail:
+	ps3flash_dev = NULL;
+	return error;
+}
+
+static int ps3flash_remove(struct ps3_system_bus_device *_dev)
+{
+	struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+
+	ps3_os_area_flash_register(NULL);
+	misc_deregister(&ps3flash_misc);
+	ps3stor_teardown(dev);
+	kfree(ps3_system_bus_get_drvdata(&dev->sbd));
+	ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+	ps3flash_dev = NULL;
+	return 0;
+}
+
+
+static struct ps3_system_bus_driver ps3flash = {
+	.match_id	= PS3_MATCH_ID_STOR_FLASH,
+	.core.name	= DEVICE_NAME,
+	.core.owner	= THIS_MODULE,
+	.probe		= ps3flash_probe,
+	.remove		= ps3flash_remove,
+	.shutdown	= ps3flash_remove,
+};
+
+
+static int __init ps3flash_init(void)
+{
+	return ps3_system_bus_driver_register(&ps3flash);
+}
+
+static void __exit ps3flash_exit(void)
+{
+	ps3_system_bus_driver_unregister(&ps3flash);
+}
+
+module_init(ps3flash_init);
+module_exit(ps3flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
diff --git a/drivers/char/random.c b/drivers/char/random.c
new file mode 100644
index 0000000..c75b6cd
--- /dev/null
+++ b/drivers/char/random.c
@@ -0,0 +1,2365 @@
+/*
+ * random.c -- A strong random number generator
+ *
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
+ * rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * (now, with legal B.S. out of the way.....)
+ *
+ * This routine gathers environmental noise from device drivers, etc.,
+ * and returns good random numbers, suitable for cryptographic use.
+ * Besides the obvious cryptographic uses, these numbers are also good
+ * for seeding TCP sequence numbers, and other places where it is
+ * desirable to have numbers which are not only random, but hard to
+ * predict by an attacker.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * Computers are very predictable devices.  Hence it is extremely hard
+ * to produce truly random numbers on a computer --- as opposed to
+ * pseudo-random numbers, which can easily generated by using a
+ * algorithm.  Unfortunately, it is very easy for attackers to guess
+ * the sequence of pseudo-random number generators, and for some
+ * applications this is not acceptable.  So instead, we must try to
+ * gather "environmental noise" from the computer's environment, which
+ * must be hard for outside attackers to observe, and use that to
+ * generate random numbers.  In a Unix environment, this is best done
+ * from inside the kernel.
+ *
+ * Sources of randomness from the environment include inter-keyboard
+ * timings, inter-interrupt timings from some interrupts, and other
+ * events which are both (a) non-deterministic and (b) hard for an
+ * outside observer to measure.  Randomness from these sources are
+ * added to an "entropy pool", which is mixed using a CRC-like function.
+ * This is not cryptographically strong, but it is adequate assuming
+ * the randomness is not chosen maliciously, and it is fast enough that
+ * the overhead of doing it on every interrupt is very reasonable.
+ * As random bytes are mixed into the entropy pool, the routines keep
+ * an *estimate* of how many bits of randomness have been stored into
+ * the random number generator's internal state.
+ *
+ * When random bytes are desired, they are obtained by taking the SHA
+ * hash of the contents of the "entropy pool".  The SHA hash avoids
+ * exposing the internal state of the entropy pool.  It is believed to
+ * be computationally infeasible to derive any useful information
+ * about the input of SHA from its output.  Even if it is possible to
+ * analyze SHA in some clever way, as long as the amount of data
+ * returned from the generator is less than the inherent entropy in
+ * the pool, the output data is totally unpredictable.  For this
+ * reason, the routine decreases its internal estimate of how many
+ * bits of "true randomness" are contained in the entropy pool as it
+ * outputs random numbers.
+ *
+ * If this estimate goes to zero, the routine can still generate
+ * random numbers; however, an attacker may (at least in theory) be
+ * able to infer the future output of the generator from prior
+ * outputs.  This requires successful cryptanalysis of SHA, which is
+ * not believed to be feasible, but there is a remote possibility.
+ * Nonetheless, these numbers should be useful for the vast majority
+ * of purposes.
+ *
+ * Exported interfaces ---- output
+ * ===============================
+ *
+ * There are three exported interfaces; the first is one designed to
+ * be used from within the kernel:
+ *
+ * 	void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer.
+ *
+ * The two other interfaces are two character devices /dev/random and
+ * /dev/urandom.  /dev/random is suitable for use when very high
+ * quality randomness is desired (for example, for key generation or
+ * one-time pads), as it will only return a maximum of the number of
+ * bits of randomness (as estimated by the random number generator)
+ * contained in the entropy pool.
+ *
+ * The /dev/urandom device does not have this limit, and will return
+ * as many bytes as are requested.  As more and more random bytes are
+ * requested without giving time for the entropy pool to recharge,
+ * this will result in random numbers that are merely cryptographically
+ * strong.  For many applications, however, this is acceptable.
+ *
+ * Exported interfaces ---- input
+ * ==============================
+ *
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
+ *	void add_device_randomness(const void *buf, unsigned int size);
+ * 	void add_input_randomness(unsigned int type, unsigned int code,
+ *                                unsigned int value);
+ *	void add_interrupt_randomness(int irq, int irq_flags);
+ * 	void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() is for adding data to the random pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* add any actual entropy to the
+ * pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the randomness roughly once a second.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * All of these routines try to estimate how many bits of randomness a
+ * particular randomness source.  They do this by keeping track of the
+ * first and second order deltas of the event timings.
+ *
+ * Ensuring unpredictability at system startup
+ * ============================================
+ *
+ * When any operating system starts up, it will go through a sequence
+ * of actions that are fairly predictable by an adversary, especially
+ * if the start-up does not involve interaction with a human operator.
+ * This reduces the actual number of bits of unpredictability in the
+ * entropy pool below the value in entropy_count.  In order to
+ * counteract this effect, it helps to carry information in the
+ * entropy pool across shut-downs and start-ups.  To do this, put the
+ * following lines an appropriate script which is run during the boot
+ * sequence:
+ *
+ *	echo "Initializing random number generator..."
+ *	random_seed=/var/run/random-seed
+ *	# Carry a random seed from start-up to start-up
+ *	# Load and then save the whole entropy pool
+ *	if [ -f $random_seed ]; then
+ *		cat $random_seed >/dev/urandom
+ *	else
+ *		touch $random_seed
+ *	fi
+ *	chmod 600 $random_seed
+ *	dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * and the following lines in an appropriate script which is run as
+ * the system is shutdown:
+ *
+ *	# Carry a random seed from shut-down to start-up
+ *	# Save the whole entropy pool
+ *	echo "Saving random seed..."
+ *	random_seed=/var/run/random-seed
+ *	touch $random_seed
+ *	chmod 600 $random_seed
+ *	dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * For example, on most modern systems using the System V init
+ * scripts, such code fragments would be found in
+ * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
+ * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
+ *
+ * Effectively, these commands cause the contents of the entropy pool
+ * to be saved at shut-down time and reloaded into the entropy pool at
+ * start-up.  (The 'dd' in the addition to the bootup script is to
+ * make sure that /etc/random-seed is different for every start-up,
+ * even if the system crashes without executing rc.0.)  Even with
+ * complete knowledge of the start-up activities, predicting the state
+ * of the entropy pool requires knowledge of the previous history of
+ * the system.
+ *
+ * Configuring the /dev/random driver under Linux
+ * ==============================================
+ *
+ * The /dev/random driver under Linux uses minor numbers 8 and 9 of
+ * the /dev/mem major number (#1).  So if your system does not have
+ * /dev/random and /dev/urandom created already, they can be created
+ * by using the commands:
+ *
+ * 	mknod /dev/random c 1 8
+ * 	mknod /dev/urandom c 1 9
+ *
+ * Acknowledgements:
+ * =================
+ *
+ * Ideas for constructing this random number generator were derived
+ * from Pretty Good Privacy's random number generator, and from private
+ * discussions with Phil Karn.  Colin Plumb provided a faster random
+ * number generator, which speed up the mixing function of the entropy
+ * pool, taken from PGPfone.  Dale Worley has also contributed many
+ * useful ideas and suggestions to improve this driver.
+ *
+ * Any flaws in the design are solely my responsibility, and should
+ * not be attributed to the Phil, Colin, or any of authors of PGP.
+ *
+ * Further background information on this topic may be obtained from
+ * RFC 1750, "Randomness Recommendations for Security", by Donald
+ * Eastlake, Steve Crocker, and Jeff Schiller.
+ */
+
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/cryptohash.h>
+#include <linux/fips.h>
+#include <linux/ptrace.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
+#include <linux/completion.h>
+#include <linux/uuid.h>
+#include <crypto/chacha20.h>
+
+#include <asm/processor.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/random.h>
+
+/* #define ADD_INTERRUPT_BENCH */
+
+/*
+ * Configuration information
+ */
+#define INPUT_POOL_SHIFT	12
+#define INPUT_POOL_WORDS	(1 << (INPUT_POOL_SHIFT-5))
+#define OUTPUT_POOL_SHIFT	10
+#define OUTPUT_POOL_WORDS	(1 << (OUTPUT_POOL_SHIFT-5))
+#define SEC_XFER_SIZE		512
+#define EXTRACT_SIZE		10
+
+
+#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+
+/*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+ *
+ * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * credit_entropy_bits() needs to be 64 bits wide.
+ */
+#define ENTROPY_SHIFT 3
+#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+
+/*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random.  Should be enough to do a significant reseed.
+ */
+static int random_read_wakeup_bits = 64;
+
+/*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+
+/*
+ * Originally, we used a primitive polynomial of degree .poolwords
+ * over GF(2).  The taps for various sizes are defined below.  They
+ * were chosen to be evenly spaced except for the last tap, which is 1
+ * to get the twisting happening as fast as possible.
+ *
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a (modified) twisted Generalized Feedback Shift
+ * Register.  (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR
+ * generators.  ACM Transactions on Modeling and Computer Simulation
+ * 2(3):179-194.  Also see M. Matsumoto & Y. Kurita, 1994.  Twisted
+ * GFSR generators II.  ACM Transactions on Modeling and Computer
+ * Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * The mixing operation is much less sensitive than the output hash,
+ * where we use SHA-1.  All that we want of mixing operation is that
+ * it be a good non-cryptographic hash; i.e. it not produce collisions
+ * when fed "random" data of the sort we expect to see.  As long as
+ * the pool state differs for different inputs, we have preserved the
+ * input entropy and done a good job.  The fact that an intelligent
+ * attacker can construct inputs that will produce controlled
+ * alterations to the pool's state is not important because we don't
+ * consider such inputs to contribute any randomness.  The only
+ * property we need with respect to them is that the attacker can't
+ * increase his/her knowledge of the pool's state.  Since all
+ * additions are reversible (knowing the final state and the input,
+ * you can reconstruct the initial state), if an attacker has any
+ * uncertainty about the initial state, he/she can only shuffle that
+ * uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
+ * Videau in their paper, "The Linux Pseudorandom Number Generator
+ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf).  In their
+ * paper, they point out that we are not using a true Twisted GFSR,
+ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
+ * is, with only three taps, instead of the six that we are using).
+ * As a result, the resulting polynomial is neither primitive nor
+ * irreducible, and hence does not have a maximal period over
+ * GF(2**32).  They suggest a slight change to the generator
+ * polynomial which improves the resulting TGFSR polynomial to be
+ * irreducible, which we have made here.
+ */
+static struct poolinfo {
+	int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+	int tap1, tap2, tap3, tap4, tap5;
+} poolinfo_table[] = {
+	/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+	/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+	{ S(128),	104,	76,	51,	25,	1 },
+	/* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+	/* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+	{ S(32),	26,	19,	14,	7,	1 },
+#if 0
+	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
+	{ S(2048),	1638,	1231,	819,	411,	1 },
+
+	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
+	{ S(1024),	817,	615,	412,	204,	1 },
+
+	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
+	{ S(1024),	819,	616,	410,	207,	2 },
+
+	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
+	{ S(512),	411,	308,	208,	104,	1 },
+
+	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
+	{ S(512),	409,	307,	206,	102,	2 },
+	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
+	{ S(512),	409,	309,	205,	103,	2 },
+
+	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
+	{ S(256),	205,	155,	101,	52,	1 },
+
+	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
+	{ S(128),	103,	78,	51,	27,	2 },
+
+	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
+	{ S(64),	52,	39,	26,	14,	1 },
+#endif
+};
+
+/*
+ * Static global variables
+ */
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
+
+static DEFINE_SPINLOCK(random_ready_list_lock);
+static LIST_HEAD(random_ready_list);
+
+struct crng_state {
+	__u32		state[16];
+	unsigned long	init_time;
+	spinlock_t	lock;
+};
+
+struct crng_state primary_crng = {
+	.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+};
+
+/*
+ * crng_init =  0 --> Uninitialized
+ *		1 --> Initialized
+ *		2 --> Initialized from input_pool
+ *
+ * crng_init is protected by primary_crng->lock, and only increases
+ * its value (from 0->1->2).
+ */
+static int crng_init = 0;
+#define crng_ready() (likely(crng_init > 1))
+static int crng_init_cnt = 0;
+static unsigned long crng_global_init_time = 0;
+#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng,
+			  __u32 out[CHACHA20_BLOCK_WORDS]);
+static void _crng_backtrack_protect(struct crng_state *crng,
+				    __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+static void process_random_ready_list(void);
+static void _get_random_bytes(void *buf, int nbytes);
+
+static struct ratelimit_state unseeded_warning =
+	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
+/**********************************************************************
+ *
+ * OS independent entropy store.   Here are the functions which handle
+ * storing entropy in an entropy pool.
+ *
+ **********************************************************************/
+
+struct entropy_store;
+struct entropy_store {
+	/* read-only data: */
+	const struct poolinfo *poolinfo;
+	__u32 *pool;
+	const char *name;
+	struct entropy_store *pull;
+	struct work_struct push_work;
+
+	/* read-write data: */
+	unsigned long last_pulled;
+	spinlock_t lock;
+	unsigned short add_ptr;
+	unsigned short input_rotate;
+	int entropy_count;
+	int entropy_total;
+	unsigned int initialized:1;
+	unsigned int last_data_init:1;
+	__u8 last_data[EXTRACT_SIZE];
+};
+
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+			       size_t nbytes, int min, int rsvd);
+static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
+				size_t nbytes, int fips);
+
+static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
+static void push_to_pool(struct work_struct *work);
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
+
+static struct entropy_store input_pool = {
+	.poolinfo = &poolinfo_table[0],
+	.name = "input",
+	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+	.pool = input_pool_data
+};
+
+static struct entropy_store blocking_pool = {
+	.poolinfo = &poolinfo_table[1],
+	.name = "blocking",
+	.pull = &input_pool,
+	.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
+	.pool = blocking_pool_data,
+	.push_work = __WORK_INITIALIZER(blocking_pool.push_work,
+					push_to_pool),
+};
+
+static __u32 const twist_table[8] = {
+	0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+	0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
+/*
+ * This function adds bytes into the entropy "pool".  It does not
+ * update the entropy estimate.  The caller should call
+ * credit_entropy_bits if this is appropriate.
+ *
+ * The pool is stirred with a primitive polynomial of the appropriate
+ * degree, and then twisted.  We twist by three bits at a time because
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+			    int nbytes)
+{
+	unsigned long i, tap1, tap2, tap3, tap4, tap5;
+	int input_rotate;
+	int wordmask = r->poolinfo->poolwords - 1;
+	const char *bytes = in;
+	__u32 w;
+
+	tap1 = r->poolinfo->tap1;
+	tap2 = r->poolinfo->tap2;
+	tap3 = r->poolinfo->tap3;
+	tap4 = r->poolinfo->tap4;
+	tap5 = r->poolinfo->tap5;
+
+	input_rotate = r->input_rotate;
+	i = r->add_ptr;
+
+	/* mix one byte at a time to simplify size handling and churn faster */
+	while (nbytes--) {
+		w = rol32(*bytes++, input_rotate);
+		i = (i - 1) & wordmask;
+
+		/* XOR in the various taps */
+		w ^= r->pool[i];
+		w ^= r->pool[(i + tap1) & wordmask];
+		w ^= r->pool[(i + tap2) & wordmask];
+		w ^= r->pool[(i + tap3) & wordmask];
+		w ^= r->pool[(i + tap4) & wordmask];
+		w ^= r->pool[(i + tap5) & wordmask];
+
+		/* Mix the result back in with a twist */
+		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+		/*
+		 * Normally, we add 7 bits of rotation to the pool.
+		 * At the beginning of the pool, add an extra 7 bits
+		 * rotation, so that successive passes spread the
+		 * input bits across the pool evenly.
+		 */
+		input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
+	}
+
+	r->input_rotate = input_rotate;
+	r->add_ptr = i;
+}
+
+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+			     int nbytes)
+{
+	trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
+	_mix_pool_bytes(r, in, nbytes);
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in,
+			   int nbytes)
+{
+	unsigned long flags;
+
+	trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+	spin_lock_irqsave(&r->lock, flags);
+	_mix_pool_bytes(r, in, nbytes);
+	spin_unlock_irqrestore(&r->lock, flags);
+}
+
+struct fast_pool {
+	__u32		pool[4];
+	unsigned long	last;
+	unsigned short	reg_idx;
+	unsigned char	count;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector.  It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f)
+{
+	__u32 a = f->pool[0],	b = f->pool[1];
+	__u32 c = f->pool[2],	d = f->pool[3];
+
+	a += b;			c += d;
+	b = rol32(b, 6);	d = rol32(d, 27);
+	d ^= a;			b ^= c;
+
+	a += b;			c += d;
+	b = rol32(b, 16);	d = rol32(d, 14);
+	d ^= a;			b ^= c;
+
+	a += b;			c += d;
+	b = rol32(b, 6);	d = rol32(d, 27);
+	d ^= a;			b ^= c;
+
+	a += b;			c += d;
+	b = rol32(b, 16);	d = rol32(d, 14);
+	d ^= a;			b ^= c;
+
+	f->pool[0] = a;  f->pool[1] = b;
+	f->pool[2] = c;  f->pool[3] = d;
+	f->count++;
+}
+
+static void process_random_ready_list(void)
+{
+	unsigned long flags;
+	struct random_ready_callback *rdy, *tmp;
+
+	spin_lock_irqsave(&random_ready_list_lock, flags);
+	list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
+		struct module *owner = rdy->owner;
+
+		list_del_init(&rdy->list);
+		rdy->func(rdy);
+		module_put(owner);
+	}
+	spin_unlock_irqrestore(&random_ready_list_lock, flags);
+}
+
+/*
+ * Credit (or debit) the entropy store with n bits of entropy.
+ * Use credit_entropy_bits_safe() if the value comes from userspace
+ * or otherwise should be checked for extreme values.
+ */
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
+{
+	int entropy_count, orig;
+	const int pool_size = r->poolinfo->poolfracbits;
+	int nfrac = nbits << ENTROPY_SHIFT;
+
+	if (!nbits)
+		return;
+
+retry:
+	entropy_count = orig = READ_ONCE(r->entropy_count);
+	if (nfrac < 0) {
+		/* Debit */
+		entropy_count += nfrac;
+	} else {
+		/*
+		 * Credit: we have to account for the possibility of
+		 * overwriting already present entropy.	 Even in the
+		 * ideal case of pure Shannon entropy, new contributions
+		 * approach the full value asymptotically:
+		 *
+		 * entropy <- entropy + (pool_size - entropy) *
+		 *	(1 - exp(-add_entropy/pool_size))
+		 *
+		 * For add_entropy <= pool_size/2 then
+		 * (1 - exp(-add_entropy/pool_size)) >=
+		 *    (add_entropy/pool_size)*0.7869...
+		 * so we can approximate the exponential with
+		 * 3/4*add_entropy/pool_size and still be on the
+		 * safe side by adding at most pool_size/2 at a time.
+		 *
+		 * The use of pool_size-2 in the while statement is to
+		 * prevent rounding artifacts from making the loop
+		 * arbitrarily long; this limits the loop to log2(pool_size)*2
+		 * turns no matter how large nbits is.
+		 */
+		int pnfrac = nfrac;
+		const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+		/* The +2 corresponds to the /4 in the denominator */
+
+		do {
+			unsigned int anfrac = min(pnfrac, pool_size/2);
+			unsigned int add =
+				((pool_size - entropy_count)*anfrac*3) >> s;
+
+			entropy_count += add;
+			pnfrac -= anfrac;
+		} while (unlikely(entropy_count < pool_size-2 && pnfrac));
+	}
+
+	if (unlikely(entropy_count < 0)) {
+		pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+			r->name, entropy_count);
+		WARN_ON(1);
+		entropy_count = 0;
+	} else if (entropy_count > pool_size)
+		entropy_count = pool_size;
+	if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+		goto retry;
+
+	r->entropy_total += nbits;
+	if (!r->initialized && r->entropy_total > 128) {
+		r->initialized = 1;
+		r->entropy_total = 0;
+	}
+
+	trace_credit_entropy_bits(r->name, nbits,
+				  entropy_count >> ENTROPY_SHIFT,
+				  r->entropy_total, _RET_IP_);
+
+	if (r == &input_pool) {
+		int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+
+		if (crng_init < 2 && entropy_bits >= 128) {
+			crng_reseed(&primary_crng, r);
+			entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
+		}
+
+		/* should we wake readers? */
+		if (entropy_bits >= random_read_wakeup_bits &&
+		    wq_has_sleeper(&random_read_wait)) {
+			wake_up_interruptible(&random_read_wait);
+			kill_fasync(&fasync, SIGIO, POLL_IN);
+		}
+		/* If the input pool is getting full, send some
+		 * entropy to the blocking pool until it is 75% full.
+		 */
+		if (entropy_bits > random_write_wakeup_bits &&
+		    r->initialized &&
+		    r->entropy_total >= 2*random_read_wakeup_bits) {
+			struct entropy_store *other = &blocking_pool;
+
+			if (other->entropy_count <=
+			    3 * other->poolinfo->poolfracbits / 4) {
+				schedule_work(&other->push_work);
+				r->entropy_total = 0;
+			}
+		}
+	}
+}
+
+static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+{
+	const int nbits_max = r->poolinfo->poolwords * 32;
+
+	if (nbits < 0)
+		return -EINVAL;
+
+	/* Cap the value to avoid overflows */
+	nbits = min(nbits,  nbits_max);
+
+	credit_entropy_bits(r, nbits);
+	return 0;
+}
+
+/*********************************************************************
+ *
+ * CRNG using CHACHA20
+ *
+ *********************************************************************/
+
+#define CRNG_RESEED_INTERVAL (300*HZ)
+
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+
+#ifdef CONFIG_NUMA
+/*
+ * Hack to deal with crazy userspace progams when they are all trying
+ * to access /dev/urandom in parallel.  The programs are almost
+ * certainly doing something terribly wrong, but we'll work around
+ * their brain damage.
+ */
+static struct crng_state **crng_node_pool __read_mostly;
+#endif
+
+static void invalidate_batched_entropy(void);
+
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+	return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
+static void crng_initialize(struct crng_state *crng)
+{
+	int		i;
+	int		arch_init = 1;
+	unsigned long	rv;
+
+	memcpy(&crng->state[0], "expand 32-byte k", 16);
+	if (crng == &primary_crng)
+		_extract_entropy(&input_pool, &crng->state[4],
+				 sizeof(__u32) * 12, 0);
+	else
+		_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+	for (i = 4; i < 16; i++) {
+		if (!arch_get_random_seed_long(&rv) &&
+		    !arch_get_random_long(&rv)) {
+			rv = random_get_entropy();
+			arch_init = 0;
+		}
+		crng->state[i] ^= rv;
+	}
+	if (trust_cpu && arch_init) {
+		crng_init = 2;
+		pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+	}
+	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+}
+
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+	int i;
+	struct crng_state *crng;
+	struct crng_state **pool;
+
+	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+	for_each_online_node(i) {
+		crng = kmalloc_node(sizeof(struct crng_state),
+				    GFP_KERNEL | __GFP_NOFAIL, i);
+		spin_lock_init(&crng->lock);
+		crng_initialize(crng);
+		pool[i] = crng;
+	}
+	mb();
+	if (cmpxchg(&crng_node_pool, NULL, pool)) {
+		for_each_node(i)
+			kfree(pool[i]);
+		kfree(pool);
+	}
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+	schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
+/*
+ * crng_fast_load() can be called by code in the interrupt service
+ * path.  So we can't afford to dilly-dally.
+ */
+static int crng_fast_load(const char *cp, size_t len)
+{
+	unsigned long flags;
+	char *p;
+
+	if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+		return 0;
+	if (crng_init != 0) {
+		spin_unlock_irqrestore(&primary_crng.lock, flags);
+		return 0;
+	}
+	p = (unsigned char *) &primary_crng.state[4];
+	while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
+		p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
+		cp++; crng_init_cnt++; len--;
+	}
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
+	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+		invalidate_batched_entropy();
+		crng_init = 1;
+		wake_up_interruptible(&crng_init_wait);
+		pr_notice("random: fast init done\n");
+	}
+	return 1;
+}
+
+/*
+ * crng_slow_load() is called by add_device_randomness, which has two
+ * attributes.  (1) We can't trust the buffer passed to it is
+ * guaranteed to be unpredictable (so it might not have any entropy at
+ * all), and (2) it doesn't have the performance constraints of
+ * crng_fast_load().
+ *
+ * So we do something more comprehensive which is guaranteed to touch
+ * all of the primary_crng's state, and which uses a LFSR with a
+ * period of 255 as part of the mixing algorithm.  Finally, we do
+ * *not* advance crng_init_cnt since buffer we may get may be something
+ * like a fixed DMI table (for example), which might very well be
+ * unique to the machine, but is otherwise unvarying.
+ */
+static int crng_slow_load(const char *cp, size_t len)
+{
+	unsigned long		flags;
+	static unsigned char	lfsr = 1;
+	unsigned char		tmp;
+	unsigned		i, max = CHACHA20_KEY_SIZE;
+	const char *		src_buf = cp;
+	char *			dest_buf = (char *) &primary_crng.state[4];
+
+	if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+		return 0;
+	if (crng_init != 0) {
+		spin_unlock_irqrestore(&primary_crng.lock, flags);
+		return 0;
+	}
+	if (len > max)
+		max = len;
+
+	for (i = 0; i < max ; i++) {
+		tmp = lfsr;
+		lfsr >>= 1;
+		if (tmp & 1)
+			lfsr ^= 0xE1;
+		tmp = dest_buf[i % CHACHA20_KEY_SIZE];
+		dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
+		lfsr += (tmp << 3) | (tmp >> 5);
+	}
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
+	return 1;
+}
+
+static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+{
+	unsigned long	flags;
+	int		i, num;
+	union {
+		__u32	block[CHACHA20_BLOCK_WORDS];
+		__u32	key[8];
+	} buf;
+
+	if (r) {
+		num = extract_entropy(r, &buf, 32, 16, 0);
+		if (num == 0)
+			return;
+	} else {
+		_extract_crng(&primary_crng, buf.block);
+		_crng_backtrack_protect(&primary_crng, buf.block,
+					CHACHA20_KEY_SIZE);
+	}
+	spin_lock_irqsave(&crng->lock, flags);
+	for (i = 0; i < 8; i++) {
+		unsigned long	rv;
+		if (!arch_get_random_seed_long(&rv) &&
+		    !arch_get_random_long(&rv))
+			rv = random_get_entropy();
+		crng->state[i+4] ^= buf.key[i] ^ rv;
+	}
+	memzero_explicit(&buf, sizeof(buf));
+	crng->init_time = jiffies;
+	spin_unlock_irqrestore(&crng->lock, flags);
+	if (crng == &primary_crng && crng_init < 2) {
+		invalidate_batched_entropy();
+		numa_crng_init();
+		crng_init = 2;
+		process_random_ready_list();
+		wake_up_interruptible(&crng_init_wait);
+		pr_notice("random: crng init done\n");
+		if (unseeded_warning.missed) {
+			pr_notice("random: %d get_random_xx warning(s) missed "
+				  "due to ratelimiting\n",
+				  unseeded_warning.missed);
+			unseeded_warning.missed = 0;
+		}
+		if (urandom_warning.missed) {
+			pr_notice("random: %d urandom warning(s) missed "
+				  "due to ratelimiting\n",
+				  urandom_warning.missed);
+			urandom_warning.missed = 0;
+		}
+	}
+}
+
+static void _extract_crng(struct crng_state *crng,
+			  __u32 out[CHACHA20_BLOCK_WORDS])
+{
+	unsigned long v, flags;
+
+	if (crng_ready() &&
+	    (time_after(crng_global_init_time, crng->init_time) ||
+	     time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
+		crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+	spin_lock_irqsave(&crng->lock, flags);
+	if (arch_get_random_long(&v))
+		crng->state[14] ^= v;
+	chacha20_block(&crng->state[0], out);
+	if (crng->state[12] == 0)
+		crng->state[13]++;
+	spin_unlock_irqrestore(&crng->lock, flags);
+}
+
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+{
+	struct crng_state *crng = NULL;
+
+#ifdef CONFIG_NUMA
+	if (crng_node_pool)
+		crng = crng_node_pool[numa_node_id()];
+	if (crng == NULL)
+#endif
+		crng = &primary_crng;
+	_extract_crng(crng, out);
+}
+
+/*
+ * Use the leftover bytes from the CRNG block output (if there is
+ * enough) to mutate the CRNG key to provide backtracking protection.
+ */
+static void _crng_backtrack_protect(struct crng_state *crng,
+				    __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+{
+	unsigned long	flags;
+	__u32		*s, *d;
+	int		i;
+
+	used = round_up(used, sizeof(__u32));
+	if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) {
+		extract_crng(tmp);
+		used = 0;
+	}
+	spin_lock_irqsave(&crng->lock, flags);
+	s = &tmp[used / sizeof(__u32)];
+	d = &crng->state[4];
+	for (i=0; i < 8; i++)
+		*d++ ^= *s++;
+	spin_unlock_irqrestore(&crng->lock, flags);
+}
+
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+{
+	struct crng_state *crng = NULL;
+
+#ifdef CONFIG_NUMA
+	if (crng_node_pool)
+		crng = crng_node_pool[numa_node_id()];
+	if (crng == NULL)
+#endif
+		crng = &primary_crng;
+	_crng_backtrack_protect(crng, tmp, used);
+}
+
+static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+{
+	ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
+	__u32 tmp[CHACHA20_BLOCK_WORDS];
+	int large_request = (nbytes > 256);
+
+	while (nbytes) {
+		if (large_request && need_resched()) {
+			if (signal_pending(current)) {
+				if (ret == 0)
+					ret = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+		}
+
+		extract_crng(tmp);
+		i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
+		if (copy_to_user(buf, tmp, i)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		nbytes -= i;
+		buf += i;
+		ret += i;
+	}
+	crng_backtrack_protect(tmp, i);
+
+	/* Wipe data just written to memory */
+	memzero_explicit(tmp, sizeof(tmp));
+
+	return ret;
+}
+
+
+/*********************************************************************
+ *
+ * Entropy input management
+ *
+ *********************************************************************/
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+	cycles_t last_time;
+	long last_delta, last_delta2;
+};
+
+#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+
+/*
+ * Add device- or boot-specific data to the input pool to help
+ * initialize it.
+ *
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
+ */
+void add_device_randomness(const void *buf, unsigned int size)
+{
+	unsigned long time = random_get_entropy() ^ jiffies;
+	unsigned long flags;
+
+	if (!crng_ready() && size)
+		crng_slow_load(buf, size);
+
+	trace_add_device_randomness(size, _RET_IP_);
+	spin_lock_irqsave(&input_pool.lock, flags);
+	_mix_pool_bytes(&input_pool, buf, size);
+	_mix_pool_bytes(&input_pool, &time, sizeof(time));
+	spin_unlock_irqrestore(&input_pool.lock, flags);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
+static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+
+/*
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays.  It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool.
+ *
+ * The number "num" is also added to the pool - it should somehow describe
+ * the type of event which just happened.  This is currently 0-255 for
+ * keyboard scan codes, and 256 upwards for interrupts.
+ *
+ */
+static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+{
+	struct entropy_store	*r;
+	struct {
+		long jiffies;
+		unsigned cycles;
+		unsigned num;
+	} sample;
+	long delta, delta2, delta3;
+
+	sample.jiffies = jiffies;
+	sample.cycles = random_get_entropy();
+	sample.num = num;
+	r = &input_pool;
+	mix_pool_bytes(r, &sample, sizeof(sample));
+
+	/*
+	 * Calculate number of bits of randomness we probably added.
+	 * We take into account the first, second and third-order deltas
+	 * in order to make our estimate.
+	 */
+	delta = sample.jiffies - state->last_time;
+	state->last_time = sample.jiffies;
+
+	delta2 = delta - state->last_delta;
+	state->last_delta = delta;
+
+	delta3 = delta2 - state->last_delta2;
+	state->last_delta2 = delta2;
+
+	if (delta < 0)
+		delta = -delta;
+	if (delta2 < 0)
+		delta2 = -delta2;
+	if (delta3 < 0)
+		delta3 = -delta3;
+	if (delta > delta2)
+		delta = delta2;
+	if (delta > delta3)
+		delta = delta3;
+
+	/*
+	 * delta is now minimum absolute delta.
+	 * Round down by 1 bit on general principles,
+	 * and limit entropy entimate to 12 bits.
+	 */
+	credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+}
+
+void add_input_randomness(unsigned int type, unsigned int code,
+				 unsigned int value)
+{
+	static unsigned char last_value;
+
+	/* ignore autorepeat and the like */
+	if (value == last_value)
+		return;
+
+	last_value = value;
+	add_timer_randomness(&input_timer_state,
+			     (type << 4) ^ code ^ (code >> 4) ^ value);
+	trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+#ifdef ADD_INTERRUPT_BENCH
+static unsigned long avg_cycles, avg_deviation;
+
+#define AVG_SHIFT 8     /* Exponential average factor k=1/256 */
+#define FIXED_1_2 (1 << (AVG_SHIFT-1))
+
+static void add_interrupt_bench(cycles_t start)
+{
+        long delta = random_get_entropy() - start;
+
+        /* Use a weighted moving average */
+        delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
+        avg_cycles += delta;
+        /* And average deviation */
+        delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
+        avg_deviation += delta;
+}
+#else
+#define add_interrupt_bench(x)
+#endif
+
+static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+{
+	__u32 *ptr = (__u32 *) regs;
+	unsigned int idx;
+
+	if (regs == NULL)
+		return 0;
+	idx = READ_ONCE(f->reg_idx);
+	if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+		idx = 0;
+	ptr += idx++;
+	WRITE_ONCE(f->reg_idx, idx);
+	return *ptr;
+}
+
+void add_interrupt_randomness(int irq, int irq_flags)
+{
+	struct entropy_store	*r;
+	struct fast_pool	*fast_pool = this_cpu_ptr(&irq_randomness);
+	struct pt_regs		*regs = get_irq_regs();
+	unsigned long		now = jiffies;
+	cycles_t		cycles = random_get_entropy();
+	__u32			c_high, j_high;
+	__u64			ip;
+	unsigned long		seed;
+	int			credit = 0;
+
+	if (cycles == 0)
+		cycles = get_reg(fast_pool, regs);
+	c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+	j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+	fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
+	fast_pool->pool[1] ^= now ^ c_high;
+	ip = regs ? instruction_pointer(regs) : _RET_IP_;
+	fast_pool->pool[2] ^= ip;
+	fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
+		get_reg(fast_pool, regs);
+
+	fast_mix(fast_pool);
+	add_interrupt_bench(cycles);
+
+	if (unlikely(crng_init == 0)) {
+		if ((fast_pool->count >= 64) &&
+		    crng_fast_load((char *) fast_pool->pool,
+				   sizeof(fast_pool->pool))) {
+			fast_pool->count = 0;
+			fast_pool->last = now;
+		}
+		return;
+	}
+
+	if ((fast_pool->count < 64) &&
+	    !time_after(now, fast_pool->last + HZ))
+		return;
+
+	r = &input_pool;
+	if (!spin_trylock(&r->lock))
+		return;
+
+	fast_pool->last = now;
+	__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
+
+	/*
+	 * If we have architectural seed generator, produce a seed and
+	 * add it to the pool.  For the sake of paranoia don't let the
+	 * architectural seed generator dominate the input from the
+	 * interrupt noise.
+	 */
+	if (arch_get_random_seed_long(&seed)) {
+		__mix_pool_bytes(r, &seed, sizeof(seed));
+		credit = 1;
+	}
+	spin_unlock(&r->lock);
+
+	fast_pool->count = 0;
+
+	/* award one bit for the contents of the fast pool */
+	credit_entropy_bits(r, credit + 1);
+}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
+{
+	if (!disk || !disk->random)
+		return;
+	/* first major is 1, so we get >= 0x200 here */
+	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+	trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
+}
+EXPORT_SYMBOL_GPL(add_disk_randomness);
+#endif
+
+/*********************************************************************
+ *
+ * Entropy extraction routines
+ *
+ *********************************************************************/
+
+/*
+ * This utility inline function is responsible for transferring entropy
+ * from the primary pool to the secondary extraction pool. We make
+ * sure we pull enough for a 'catastrophic reseed'.
+ */
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
+static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+	if (!r->pull ||
+	    r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
+	    r->entropy_count > r->poolinfo->poolfracbits)
+		return;
+
+	_xfer_secondary_pool(r, nbytes);
+}
+
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+	__u32	tmp[OUTPUT_POOL_WORDS];
+
+	int bytes = nbytes;
+
+	/* pull at least as much as a wakeup */
+	bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
+	/* but never more than the buffer size */
+	bytes = min_t(int, bytes, sizeof(tmp));
+
+	trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+				  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+	bytes = extract_entropy(r->pull, tmp, bytes,
+				random_read_wakeup_bits / 8, 0);
+	mix_pool_bytes(r, tmp, bytes);
+	credit_entropy_bits(r, bytes*8);
+}
+
+/*
+ * Used as a workqueue function so that when the input pool is getting
+ * full, we can "spill over" some entropy to the output pools.  That
+ * way the output pools can store some of the excess entropy instead
+ * of letting it go to waste.
+ */
+static void push_to_pool(struct work_struct *work)
+{
+	struct entropy_store *r = container_of(work, struct entropy_store,
+					      push_work);
+	BUG_ON(!r);
+	_xfer_secondary_pool(r, random_read_wakeup_bits/8);
+	trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
+			   r->pull->entropy_count >> ENTROPY_SHIFT);
+}
+
+/*
+ * This function decides how many bytes to actually take from the
+ * given pool, and also debits the entropy count accordingly.
+ */
+static size_t account(struct entropy_store *r, size_t nbytes, int min,
+		      int reserved)
+{
+	int entropy_count, orig, have_bytes;
+	size_t ibytes, nfrac;
+
+	BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+
+	/* Can we pull enough? */
+retry:
+	entropy_count = orig = READ_ONCE(r->entropy_count);
+	ibytes = nbytes;
+	/* never pull more than available */
+	have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+
+	if ((have_bytes -= reserved) < 0)
+		have_bytes = 0;
+	ibytes = min_t(size_t, ibytes, have_bytes);
+	if (ibytes < min)
+		ibytes = 0;
+
+	if (unlikely(entropy_count < 0)) {
+		pr_warn("random: negative entropy count: pool %s count %d\n",
+			r->name, entropy_count);
+		WARN_ON(1);
+		entropy_count = 0;
+	}
+	nfrac = ibytes << (ENTROPY_SHIFT + 3);
+	if ((size_t) entropy_count > nfrac)
+		entropy_count -= nfrac;
+	else
+		entropy_count = 0;
+
+	if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+		goto retry;
+
+	trace_debit_entropy(r->name, 8 * ibytes);
+	if (ibytes &&
+	    (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
+		wake_up_interruptible(&random_write_wait);
+		kill_fasync(&fasync, SIGIO, POLL_OUT);
+	}
+
+	return ibytes;
+}
+
+/*
+ * This function does the actual extraction for extract_entropy and
+ * extract_entropy_user.
+ *
+ * Note: we assume that .poolwords is a multiple of 16 words.
+ */
+static void extract_buf(struct entropy_store *r, __u8 *out)
+{
+	int i;
+	union {
+		__u32 w[5];
+		unsigned long l[LONGS(20)];
+	} hash;
+	__u32 workspace[SHA_WORKSPACE_WORDS];
+	unsigned long flags;
+
+	/*
+	 * If we have an architectural hardware random number
+	 * generator, use it for SHA's initial vector
+	 */
+	sha_init(hash.w);
+	for (i = 0; i < LONGS(20); i++) {
+		unsigned long v;
+		if (!arch_get_random_long(&v))
+			break;
+		hash.l[i] = v;
+	}
+
+	/* Generate a hash across the pool, 16 words (512 bits) at a time */
+	spin_lock_irqsave(&r->lock, flags);
+	for (i = 0; i < r->poolinfo->poolwords; i += 16)
+		sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
+	/*
+	 * We mix the hash back into the pool to prevent backtracking
+	 * attacks (where the attacker knows the state of the pool
+	 * plus the current outputs, and attempts to find previous
+	 * ouputs), unless the hash function can be inverted. By
+	 * mixing at least a SHA1 worth of hash data back, we make
+	 * brute-forcing the feedback as hard as brute-forcing the
+	 * hash.
+	 */
+	__mix_pool_bytes(r, hash.w, sizeof(hash.w));
+	spin_unlock_irqrestore(&r->lock, flags);
+
+	memzero_explicit(workspace, sizeof(workspace));
+
+	/*
+	 * In case the hash function has some recognizable output
+	 * pattern, we fold it in half. Thus, we always feed back
+	 * twice as much data as we output.
+	 */
+	hash.w[0] ^= hash.w[3];
+	hash.w[1] ^= hash.w[4];
+	hash.w[2] ^= rol32(hash.w[2], 16);
+
+	memcpy(out, &hash, EXTRACT_SIZE);
+	memzero_explicit(&hash, sizeof(hash));
+}
+
+static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
+				size_t nbytes, int fips)
+{
+	ssize_t ret = 0, i;
+	__u8 tmp[EXTRACT_SIZE];
+	unsigned long flags;
+
+	while (nbytes) {
+		extract_buf(r, tmp);
+
+		if (fips) {
+			spin_lock_irqsave(&r->lock, flags);
+			if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+				panic("Hardware RNG duplicated output!\n");
+			memcpy(r->last_data, tmp, EXTRACT_SIZE);
+			spin_unlock_irqrestore(&r->lock, flags);
+		}
+		i = min_t(int, nbytes, EXTRACT_SIZE);
+		memcpy(buf, tmp, i);
+		nbytes -= i;
+		buf += i;
+		ret += i;
+	}
+
+	/* Wipe data just returned from memory */
+	memzero_explicit(tmp, sizeof(tmp));
+
+	return ret;
+}
+
+/*
+ * This function extracts randomness from the "entropy pool", and
+ * returns it in a buffer.
+ *
+ * The min parameter specifies the minimum amount we can pull before
+ * failing to avoid races that defeat catastrophic reseeding while the
+ * reserved parameter indicates how much entropy we must leave in the
+ * pool after each pull to avoid starving other readers.
+ */
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+				 size_t nbytes, int min, int reserved)
+{
+	__u8 tmp[EXTRACT_SIZE];
+	unsigned long flags;
+
+	/* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
+	if (fips_enabled) {
+		spin_lock_irqsave(&r->lock, flags);
+		if (!r->last_data_init) {
+			r->last_data_init = 1;
+			spin_unlock_irqrestore(&r->lock, flags);
+			trace_extract_entropy(r->name, EXTRACT_SIZE,
+					      ENTROPY_BITS(r), _RET_IP_);
+			xfer_secondary_pool(r, EXTRACT_SIZE);
+			extract_buf(r, tmp);
+			spin_lock_irqsave(&r->lock, flags);
+			memcpy(r->last_data, tmp, EXTRACT_SIZE);
+		}
+		spin_unlock_irqrestore(&r->lock, flags);
+	}
+
+	trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+	xfer_secondary_pool(r, nbytes);
+	nbytes = account(r, nbytes, min, reserved);
+
+	return _extract_entropy(r, buf, nbytes, fips_enabled);
+}
+
+/*
+ * This function extracts randomness from the "entropy pool", and
+ * returns it in a userspace buffer.
+ */
+static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+				    size_t nbytes)
+{
+	ssize_t ret = 0, i;
+	__u8 tmp[EXTRACT_SIZE];
+	int large_request = (nbytes > 256);
+
+	trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+	xfer_secondary_pool(r, nbytes);
+	nbytes = account(r, nbytes, 0, 0);
+
+	while (nbytes) {
+		if (large_request && need_resched()) {
+			if (signal_pending(current)) {
+				if (ret == 0)
+					ret = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+		}
+
+		extract_buf(r, tmp);
+		i = min_t(int, nbytes, EXTRACT_SIZE);
+		if (copy_to_user(buf, tmp, i)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		nbytes -= i;
+		buf += i;
+		ret += i;
+	}
+
+	/* Wipe data just returned from memory */
+	memzero_explicit(tmp, sizeof(tmp));
+
+	return ret;
+}
+
+#define warn_unseeded_randomness(previous) \
+	_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+
+static void _warn_unseeded_randomness(const char *func_name, void *caller,
+				      void **previous)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+	const bool print_once = false;
+#else
+	static bool print_once __read_mostly;
+#endif
+
+	if (print_once ||
+	    crng_ready() ||
+	    (previous && (caller == READ_ONCE(*previous))))
+		return;
+	WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+	print_once = true;
+#endif
+	if (__ratelimit(&unseeded_warning))
+		pr_notice("random: %s called from %pS with crng_init=%d\n",
+			  func_name, caller, crng_init);
+}
+
+/*
+ * This function is the exported kernel interface.  It returns some
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc.  It does not rely on the hardware random
+ * number generator.  For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+static void _get_random_bytes(void *buf, int nbytes)
+{
+	__u32 tmp[CHACHA20_BLOCK_WORDS];
+
+	trace_get_random_bytes(nbytes, _RET_IP_);
+
+	while (nbytes >= CHACHA20_BLOCK_SIZE) {
+		extract_crng(buf);
+		buf += CHACHA20_BLOCK_SIZE;
+		nbytes -= CHACHA20_BLOCK_SIZE;
+	}
+
+	if (nbytes > 0) {
+		extract_crng(tmp);
+		memcpy(buf, tmp, nbytes);
+		crng_backtrack_protect(tmp, nbytes);
+	} else
+		crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
+	memzero_explicit(tmp, sizeof(tmp));
+}
+
+void get_random_bytes(void *buf, int nbytes)
+{
+	static void *previous;
+
+	warn_unseeded_randomness(&previous);
+	_get_random_bytes(buf, nbytes);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * Wait for the urandom pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the urandom pool has been seeded.
+ *          -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+	if (likely(crng_ready()))
+		return 0;
+	return wait_event_interruptible(crng_init_wait, crng_ready());
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
+ * Returns whether or not the urandom pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ *          false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+	return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/*
+ * Add a callback function that will be invoked when the nonblocking
+ * pool is initialised.
+ *
+ * returns: 0 if callback is successfully added
+ *	    -EALREADY if pool is already initialised (callback not called)
+ *	    -ENOENT if module for callback is not alive
+ */
+int add_random_ready_callback(struct random_ready_callback *rdy)
+{
+	struct module *owner;
+	unsigned long flags;
+	int err = -EALREADY;
+
+	if (crng_ready())
+		return err;
+
+	owner = rdy->owner;
+	if (!try_module_get(owner))
+		return -ENOENT;
+
+	spin_lock_irqsave(&random_ready_list_lock, flags);
+	if (crng_ready())
+		goto out;
+
+	owner = NULL;
+
+	list_add(&rdy->list, &random_ready_list);
+	err = 0;
+
+out:
+	spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+	module_put(owner);
+
+	return err;
+}
+EXPORT_SYMBOL(add_random_ready_callback);
+
+/*
+ * Delete a previously registered readiness callback function.
+ */
+void del_random_ready_callback(struct random_ready_callback *rdy)
+{
+	unsigned long flags;
+	struct module *owner = NULL;
+
+	spin_lock_irqsave(&random_ready_list_lock, flags);
+	if (!list_empty(&rdy->list)) {
+		list_del_init(&rdy->list);
+		owner = rdy->owner;
+	}
+	spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+	module_put(owner);
+}
+EXPORT_SYMBOL(del_random_ready_callback);
+
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available.  The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA).  So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ *
+ * Return number of bytes filled in.
+ */
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
+{
+	int left = nbytes;
+	char *p = buf;
+
+	trace_get_random_bytes_arch(left, _RET_IP_);
+	while (left) {
+		unsigned long v;
+		int chunk = min_t(int, left, sizeof(unsigned long));
+
+		if (!arch_get_random_long(&v))
+			break;
+
+		memcpy(p, &v, chunk);
+		p += chunk;
+		left -= chunk;
+	}
+
+	return nbytes - left;
+}
+EXPORT_SYMBOL(get_random_bytes_arch);
+
+/*
+ * init_std_data - initialize pool with system data
+ *
+ * @r: pool to initialize
+ *
+ * This function clears the pool's entropy count and mixes some system
+ * data into the pool to prepare it for use. The pool is not cleared
+ * as that can only decrease the entropy in the pool.
+ */
+static void init_std_data(struct entropy_store *r)
+{
+	int i;
+	ktime_t now = ktime_get_real();
+	unsigned long rv;
+
+	r->last_pulled = jiffies;
+	mix_pool_bytes(r, &now, sizeof(now));
+	for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+		if (!arch_get_random_seed_long(&rv) &&
+		    !arch_get_random_long(&rv))
+			rv = random_get_entropy();
+		mix_pool_bytes(r, &rv, sizeof(rv));
+	}
+	mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+}
+
+/*
+ * Note that setup_arch() may call add_device_randomness()
+ * long before we get here. This allows seeding of the pools
+ * with some platform dependent data very early in the boot
+ * process. But it limits our options here. We must use
+ * statically allocated structures that already have all
+ * initializations complete at compile time. We should also
+ * take care not to overwrite the precious per platform data
+ * we were given.
+ */
+static int rand_initialize(void)
+{
+	init_std_data(&input_pool);
+	init_std_data(&blocking_pool);
+	crng_initialize(&primary_crng);
+	crng_global_init_time = jiffies;
+	if (ratelimit_disable) {
+		urandom_warning.interval = 0;
+		unseeded_warning.interval = 0;
+	}
+	return 0;
+}
+early_initcall(rand_initialize);
+
+#ifdef CONFIG_BLOCK
+void rand_initialize_disk(struct gendisk *disk)
+{
+	struct timer_rand_state *state;
+
+	/*
+	 * If kzalloc returns null, we just won't use that entropy
+	 * source.
+	 */
+	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+	if (state) {
+		state->last_time = INITIAL_JIFFIES;
+		disk->random = state;
+	}
+}
+#endif
+
+static ssize_t
+_random_read(int nonblock, char __user *buf, size_t nbytes)
+{
+	ssize_t n;
+
+	if (nbytes == 0)
+		return 0;
+
+	nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
+	while (1) {
+		n = extract_entropy_user(&blocking_pool, buf, nbytes);
+		if (n < 0)
+			return n;
+		trace_random_read(n*8, (nbytes-n)*8,
+				  ENTROPY_BITS(&blocking_pool),
+				  ENTROPY_BITS(&input_pool));
+		if (n > 0)
+			return n;
+
+		/* Pool is (near) empty.  Maybe wait and retry. */
+		if (nonblock)
+			return -EAGAIN;
+
+		wait_event_interruptible(random_read_wait,
+			ENTROPY_BITS(&input_pool) >=
+			random_read_wakeup_bits);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+	}
+}
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+	return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
+}
+
+static ssize_t
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+	unsigned long flags;
+	static int maxwarn = 10;
+	int ret;
+
+	if (!crng_ready() && maxwarn > 0) {
+		maxwarn--;
+		if (__ratelimit(&urandom_warning))
+			printk(KERN_NOTICE "random: %s: uninitialized "
+			       "urandom read (%zd bytes read)\n",
+			       current->comm, nbytes);
+		spin_lock_irqsave(&primary_crng.lock, flags);
+		crng_init_cnt = 0;
+		spin_unlock_irqrestore(&primary_crng.lock, flags);
+	}
+	nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+	ret = extract_crng_user(buf, nbytes);
+	trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+	return ret;
+}
+
+static __poll_t
+random_poll(struct file *file, poll_table * wait)
+{
+	__poll_t mask;
+
+	poll_wait(file, &random_read_wait, wait);
+	poll_wait(file, &random_write_wait, wait);
+	mask = 0;
+	if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
+		mask |= EPOLLIN | EPOLLRDNORM;
+	if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
+		mask |= EPOLLOUT | EPOLLWRNORM;
+	return mask;
+}
+
+static int
+write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+{
+	size_t bytes;
+	__u32 t, buf[16];
+	const char __user *p = buffer;
+
+	while (count > 0) {
+		int b, i = 0;
+
+		bytes = min(count, sizeof(buf));
+		if (copy_from_user(&buf, p, bytes))
+			return -EFAULT;
+
+		for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+			if (!arch_get_random_int(&t))
+				break;
+			buf[i] ^= t;
+		}
+
+		count -= bytes;
+		p += bytes;
+
+		mix_pool_bytes(r, buf, bytes);
+		cond_resched();
+	}
+
+	return 0;
+}
+
+static ssize_t random_write(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *ppos)
+{
+	size_t ret;
+
+	ret = write_pool(&input_pool, buffer, count);
+	if (ret)
+		return ret;
+
+	return (ssize_t)count;
+}
+
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	int size, ent_count;
+	int __user *p = (int __user *)arg;
+	int retval;
+
+	switch (cmd) {
+	case RNDGETENTCNT:
+		/* inherently racy, no point locking */
+		ent_count = ENTROPY_BITS(&input_pool);
+		if (put_user(ent_count, p))
+			return -EFAULT;
+		return 0;
+	case RNDADDTOENTCNT:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (get_user(ent_count, p))
+			return -EFAULT;
+		return credit_entropy_bits_safe(&input_pool, ent_count);
+	case RNDADDENTROPY:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (get_user(ent_count, p++))
+			return -EFAULT;
+		if (ent_count < 0)
+			return -EINVAL;
+		if (get_user(size, p++))
+			return -EFAULT;
+		retval = write_pool(&input_pool, (const char __user *)p,
+				    size);
+		if (retval < 0)
+			return retval;
+		return credit_entropy_bits_safe(&input_pool, ent_count);
+	case RNDZAPENTCNT:
+	case RNDCLEARPOOL:
+		/*
+		 * Clear the entropy pool counters. We no longer clear
+		 * the entropy pool, as that's silly.
+		 */
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		input_pool.entropy_count = 0;
+		blocking_pool.entropy_count = 0;
+		return 0;
+	case RNDRESEEDCRNG:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (crng_init < 2)
+			return -ENODATA;
+		crng_reseed(&primary_crng, NULL);
+		crng_global_init_time = jiffies - 1;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int random_fasync(int fd, struct file *filp, int on)
+{
+	return fasync_helper(fd, filp, on, &fasync);
+}
+
+const struct file_operations random_fops = {
+	.read  = random_read,
+	.write = random_write,
+	.poll  = random_poll,
+	.unlocked_ioctl = random_ioctl,
+	.fasync = random_fasync,
+	.llseek = noop_llseek,
+};
+
+const struct file_operations urandom_fops = {
+	.read  = urandom_read,
+	.write = random_write,
+	.unlocked_ioctl = random_ioctl,
+	.fasync = random_fasync,
+	.llseek = noop_llseek,
+};
+
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
+		unsigned int, flags)
+{
+	int ret;
+
+	if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
+		return -EINVAL;
+
+	if (count > INT_MAX)
+		count = INT_MAX;
+
+	if (flags & GRND_RANDOM)
+		return _random_read(flags & GRND_NONBLOCK, buf, count);
+
+	if (!crng_ready()) {
+		if (flags & GRND_NONBLOCK)
+			return -EAGAIN;
+		ret = wait_for_random_bytes();
+		if (unlikely(ret))
+			return ret;
+	}
+	return urandom_read(NULL, buf, count, NULL);
+}
+
+/********************************************************************
+ *
+ * Sysctl interface
+ *
+ ********************************************************************/
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int min_read_thresh = 8, min_write_thresh;
+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int random_min_urandom_seed = 60;
+static char sysctl_bootid[16];
+
+/*
+ * This function is used to return both the bootid UUID, and random
+ * UUID.  The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ *
+ * If the user accesses this via the proc interface, the UUID will be
+ * returned as an ASCII string in the standard UUID format; if via the
+ * sysctl system call, as 16 bytes of binary data.
+ */
+static int proc_do_uuid(struct ctl_table *table, int write,
+			void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct ctl_table fake_table;
+	unsigned char buf[64], tmp_uuid[16], *uuid;
+
+	uuid = table->data;
+	if (!uuid) {
+		uuid = tmp_uuid;
+		generate_random_uuid(uuid);
+	} else {
+		static DEFINE_SPINLOCK(bootid_spinlock);
+
+		spin_lock(&bootid_spinlock);
+		if (!uuid[8])
+			generate_random_uuid(uuid);
+		spin_unlock(&bootid_spinlock);
+	}
+
+	sprintf(buf, "%pU", uuid);
+
+	fake_table.data = buf;
+	fake_table.maxlen = sizeof(buf);
+
+	return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+}
+
+/*
+ * Return entropy available scaled to integral bits
+ */
+static int proc_do_entropy(struct ctl_table *table, int write,
+			   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct ctl_table fake_table;
+	int entropy_count;
+
+	entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+
+	fake_table.data = &entropy_count;
+	fake_table.maxlen = sizeof(entropy_count);
+
+	return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
+static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+extern struct ctl_table random_table[];
+struct ctl_table random_table[] = {
+	{
+		.procname	= "poolsize",
+		.data		= &sysctl_poolsize,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "entropy_avail",
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_do_entropy,
+		.data		= &input_pool.entropy_count,
+	},
+	{
+		.procname	= "read_wakeup_threshold",
+		.data		= &random_read_wakeup_bits,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &min_read_thresh,
+		.extra2		= &max_read_thresh,
+	},
+	{
+		.procname	= "write_wakeup_threshold",
+		.data		= &random_write_wakeup_bits,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &min_write_thresh,
+		.extra2		= &max_write_thresh,
+	},
+	{
+		.procname	= "urandom_min_reseed_secs",
+		.data		= &random_min_urandom_seed,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "boot_id",
+		.data		= &sysctl_bootid,
+		.maxlen		= 16,
+		.mode		= 0444,
+		.proc_handler	= proc_do_uuid,
+	},
+	{
+		.procname	= "uuid",
+		.maxlen		= 16,
+		.mode		= 0444,
+		.proc_handler	= proc_do_uuid,
+	},
+#ifdef ADD_INTERRUPT_BENCH
+	{
+		.procname	= "add_interrupt_avg_cycles",
+		.data		= &avg_cycles,
+		.maxlen		= sizeof(avg_cycles),
+		.mode		= 0444,
+		.proc_handler	= proc_doulongvec_minmax,
+	},
+	{
+		.procname	= "add_interrupt_avg_deviation",
+		.data		= &avg_deviation,
+		.maxlen		= sizeof(avg_deviation),
+		.mode		= 0444,
+		.proc_handler	= proc_doulongvec_minmax,
+	},
+#endif
+	{ }
+};
+#endif 	/* CONFIG_SYSCTL */
+
+struct batched_entropy {
+	union {
+		u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
+		u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+	};
+	unsigned int position;
+};
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+
+/*
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+u64 get_random_u64(void)
+{
+	u64 ret;
+	bool use_lock;
+	unsigned long flags = 0;
+	struct batched_entropy *batch;
+	static void *previous;
+
+#if BITS_PER_LONG == 64
+	if (arch_get_random_long((unsigned long *)&ret))
+		return ret;
+#else
+	if (arch_get_random_long((unsigned long *)&ret) &&
+	    arch_get_random_long((unsigned long *)&ret + 1))
+	    return ret;
+#endif
+
+	warn_unseeded_randomness(&previous);
+
+	use_lock = READ_ONCE(crng_init) < 2;
+	batch = &get_cpu_var(batched_entropy_u64);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+		extract_crng((__u32 *)batch->entropy_u64);
+		batch->position = 0;
+	}
+	ret = batch->entropy_u64[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+	put_cpu_var(batched_entropy_u64);
+	return ret;
+}
+EXPORT_SYMBOL(get_random_u64);
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+u32 get_random_u32(void)
+{
+	u32 ret;
+	bool use_lock;
+	unsigned long flags = 0;
+	struct batched_entropy *batch;
+	static void *previous;
+
+	if (arch_get_random_int(&ret))
+		return ret;
+
+	warn_unseeded_randomness(&previous);
+
+	use_lock = READ_ONCE(crng_init) < 2;
+	batch = &get_cpu_var(batched_entropy_u32);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+		extract_crng(batch->entropy_u32);
+		batch->position = 0;
+	}
+	ret = batch->entropy_u32[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+	put_cpu_var(batched_entropy_u32);
+	return ret;
+}
+EXPORT_SYMBOL(get_random_u32);
+
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+	int cpu;
+	unsigned long flags;
+
+	write_lock_irqsave(&batched_entropy_reset_lock, flags);
+	for_each_possible_cpu (cpu) {
+		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+	}
+	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start:	The smallest acceptable address the caller will take.
+ * @range:	The size of the area, starting at @start, within which the
+ *		random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned.  We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range).  On error,
+ * @start is returned.
+ */
+unsigned long
+randomize_page(unsigned long start, unsigned long range)
+{
+	if (!PAGE_ALIGNED(start)) {
+		range -= PAGE_ALIGN(start) - start;
+		start = PAGE_ALIGN(start);
+	}
+
+	if (start > ULONG_MAX - range)
+		range = ULONG_MAX - start;
+
+	range >>= PAGE_SHIFT;
+
+	if (range == 0)
+		return start;
+
+	return start + (get_random_long() % range << PAGE_SHIFT);
+}
+
+/* Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ */
+void add_hwgenerator_randomness(const char *buffer, size_t count,
+				size_t entropy)
+{
+	struct entropy_store *poolp = &input_pool;
+
+	if (unlikely(crng_init == 0)) {
+		crng_fast_load(buffer, count);
+		return;
+	}
+
+	/* Suspend writing if we're above the trickle threshold.
+	 * We'll be woken up again once below random_write_wakeup_thresh,
+	 * or when the calling thread is about to terminate.
+	 */
+	wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+			ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
+	mix_pool_bytes(poolp, buffer, count);
+	credit_entropy_bits(poolp, entropy);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
new file mode 100644
index 0000000..fd6eec8
--- /dev/null
+++ b/drivers/char/raw.c
@@ -0,0 +1,369 @@
+/*
+ * linux/drivers/char/raw.c
+ *
+ * Front-end raw character devices.  These can be bound to any block
+ * devices to provide genuine Unix raw character device semantics.
+ *
+ * We reserve minor number 0 for a control interface.  ioctl()s on this
+ * device are used to bind the other minor numbers to block devices.
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
+#include <linux/module.h>
+#include <linux/raw.h>
+#include <linux/capability.h>
+#include <linux/uio.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+#include <linux/compat.h>
+#include <linux/vmalloc.h>
+
+#include <linux/uaccess.h>
+
+struct raw_device_data {
+	struct block_device *binding;
+	int inuse;
+};
+
+static struct class *raw_class;
+static struct raw_device_data *raw_devices;
+static DEFINE_MUTEX(raw_mutex);
+static const struct file_operations raw_ctl_fops; /* forward declaration */
+
+static int max_raw_minors = MAX_RAW_MINORS;
+
+module_param(max_raw_minors, int, 0);
+MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
+
+/*
+ * Open/close code for raw IO.
+ *
+ * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
+ * point at the blockdev's address_space and set the file handle to use
+ * O_DIRECT.
+ *
+ * Set the device's soft blocksize to the minimum possible.  This gives the
+ * finest possible alignment and has no adverse impact on performance.
+ */
+static int raw_open(struct inode *inode, struct file *filp)
+{
+	const int minor = iminor(inode);
+	struct block_device *bdev;
+	int err;
+
+	if (minor == 0) {	/* It is the control device */
+		filp->f_op = &raw_ctl_fops;
+		return 0;
+	}
+
+	mutex_lock(&raw_mutex);
+
+	/*
+	 * All we need to do on open is check that the device is bound.
+	 */
+	bdev = raw_devices[minor].binding;
+	err = -ENODEV;
+	if (!bdev)
+		goto out;
+	bdgrab(bdev);
+	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
+	if (err)
+		goto out;
+	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
+	if (err)
+		goto out1;
+	filp->f_flags |= O_DIRECT;
+	filp->f_mapping = bdev->bd_inode->i_mapping;
+	if (++raw_devices[minor].inuse == 1)
+		file_inode(filp)->i_mapping =
+			bdev->bd_inode->i_mapping;
+	filp->private_data = bdev;
+	mutex_unlock(&raw_mutex);
+	return 0;
+
+out1:
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
+out:
+	mutex_unlock(&raw_mutex);
+	return err;
+}
+
+/*
+ * When the final fd which refers to this character-special node is closed, we
+ * make its ->mapping point back at its own i_data.
+ */
+static int raw_release(struct inode *inode, struct file *filp)
+{
+	const int minor= iminor(inode);
+	struct block_device *bdev;
+
+	mutex_lock(&raw_mutex);
+	bdev = raw_devices[minor].binding;
+	if (--raw_devices[minor].inuse == 0)
+		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
+		inode->i_mapping = &inode->i_data;
+	mutex_unlock(&raw_mutex);
+
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
+	return 0;
+}
+
+/*
+ * Forward ioctls to the underlying block device.
+ */
+static long
+raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
+{
+	struct block_device *bdev = filp->private_data;
+	return blkdev_ioctl(bdev, 0, command, arg);
+}
+
+static int bind_set(int number, u64 major, u64 minor)
+{
+	dev_t dev = MKDEV(major, minor);
+	struct raw_device_data *rawdev;
+	int err = 0;
+
+	if (number <= 0 || number >= max_raw_minors)
+		return -EINVAL;
+
+	if (MAJOR(dev) != major || MINOR(dev) != minor)
+		return -EINVAL;
+
+	rawdev = &raw_devices[number];
+
+	/*
+	 * This is like making block devices, so demand the
+	 * same capability
+	 */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	/*
+	 * For now, we don't need to check that the underlying
+	 * block device is present or not: we can do that when
+	 * the raw device is opened.  Just check that the
+	 * major/minor numbers make sense.
+	 */
+
+	if (MAJOR(dev) == 0 && dev != 0)
+		return -EINVAL;
+
+	mutex_lock(&raw_mutex);
+	if (rawdev->inuse) {
+		mutex_unlock(&raw_mutex);
+		return -EBUSY;
+	}
+	if (rawdev->binding) {
+		bdput(rawdev->binding);
+		module_put(THIS_MODULE);
+	}
+	if (!dev) {
+		/* unbind */
+		rawdev->binding = NULL;
+		device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
+	} else {
+		rawdev->binding = bdget(dev);
+		if (rawdev->binding == NULL) {
+			err = -ENOMEM;
+		} else {
+			dev_t raw = MKDEV(RAW_MAJOR, number);
+			__module_get(THIS_MODULE);
+			device_destroy(raw_class, raw);
+			device_create(raw_class, NULL, raw, NULL,
+				      "raw%d", number);
+		}
+	}
+	mutex_unlock(&raw_mutex);
+	return err;
+}
+
+static int bind_get(int number, dev_t *dev)
+{
+	struct raw_device_data *rawdev;
+	struct block_device *bdev;
+
+	if (number <= 0 || number >= max_raw_minors)
+		return -EINVAL;
+
+	rawdev = &raw_devices[number];
+
+	mutex_lock(&raw_mutex);
+	bdev = rawdev->binding;
+	*dev = bdev ? bdev->bd_dev : 0;
+	mutex_unlock(&raw_mutex);
+	return 0;
+}
+
+/*
+ * Deal with ioctls against the raw-device control interface, to bind
+ * and unbind other raw devices.
+ */
+static long raw_ctl_ioctl(struct file *filp, unsigned int command,
+			  unsigned long arg)
+{
+	struct raw_config_request rq;
+	dev_t dev;
+	int err;
+
+	switch (command) {
+	case RAW_SETBIND:
+		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
+			return -EFAULT;
+
+		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
+
+	case RAW_GETBIND:
+		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
+			return -EFAULT;
+
+		err = bind_get(rq.raw_minor, &dev);
+		if (err)
+			return err;
+
+		rq.block_major = MAJOR(dev);
+		rq.block_minor = MINOR(dev);
+
+		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+struct raw32_config_request {
+	compat_int_t	raw_minor;
+	compat_u64	block_major;
+	compat_u64	block_minor;
+};
+
+static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct raw32_config_request __user *user_req = compat_ptr(arg);
+	struct raw32_config_request rq;
+	dev_t dev;
+	int err = 0;
+
+	switch (cmd) {
+	case RAW_SETBIND:
+		if (copy_from_user(&rq, user_req, sizeof(rq)))
+			return -EFAULT;
+
+		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
+
+	case RAW_GETBIND:
+		if (copy_from_user(&rq, user_req, sizeof(rq)))
+			return -EFAULT;
+
+		err = bind_get(rq.raw_minor, &dev);
+		if (err)
+			return err;
+
+		rq.block_major = MAJOR(dev);
+		rq.block_minor = MINOR(dev);
+
+		if (copy_to_user(user_req, &rq, sizeof(rq)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+static const struct file_operations raw_fops = {
+	.read_iter	= blkdev_read_iter,
+	.write_iter	= blkdev_write_iter,
+	.fsync		= blkdev_fsync,
+	.open		= raw_open,
+	.release	= raw_release,
+	.unlocked_ioctl = raw_ioctl,
+	.llseek		= default_llseek,
+	.owner		= THIS_MODULE,
+};
+
+static const struct file_operations raw_ctl_fops = {
+	.unlocked_ioctl = raw_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= raw_ctl_compat_ioctl,
+#endif
+	.open		= raw_open,
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+};
+
+static struct cdev raw_cdev;
+
+static char *raw_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
+}
+
+static int __init raw_init(void)
+{
+	dev_t dev = MKDEV(RAW_MAJOR, 0);
+	int ret;
+
+	if (max_raw_minors < 1 || max_raw_minors > 65536) {
+		printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
+			" between 1 and 65536), using %d\n", MAX_RAW_MINORS);
+		max_raw_minors = MAX_RAW_MINORS;
+	}
+
+	raw_devices = vzalloc(array_size(max_raw_minors,
+					 sizeof(struct raw_device_data)));
+	if (!raw_devices) {
+		printk(KERN_ERR "Not enough memory for raw device structures\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = register_chrdev_region(dev, max_raw_minors, "raw");
+	if (ret)
+		goto error;
+
+	cdev_init(&raw_cdev, &raw_fops);
+	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
+	if (ret)
+		goto error_region;
+	raw_class = class_create(THIS_MODULE, "raw");
+	if (IS_ERR(raw_class)) {
+		printk(KERN_ERR "Error creating raw class.\n");
+		cdev_del(&raw_cdev);
+		ret = PTR_ERR(raw_class);
+		goto error_region;
+	}
+	raw_class->devnode = raw_devnode;
+	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
+
+	return 0;
+
+error_region:
+	unregister_chrdev_region(dev, max_raw_minors);
+error:
+	vfree(raw_devices);
+	return ret;
+}
+
+static void __exit raw_exit(void)
+{
+	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
+	class_destroy(raw_class);
+	cdev_del(&raw_cdev);
+	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
+}
+
+module_init(raw_init);
+module_exit(raw_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
new file mode 100644
index 0000000..4948c8b
--- /dev/null
+++ b/drivers/char/rtc.c
@@ -0,0 +1,1316 @@
+/*
+ *	Real Time Clock interface for Linux
+ *
+ *	Copyright (C) 1996 Paul Gortmaker
+ *
+ *	This driver allows use of the real time clock (built into
+ *	nearly all computers) from user space. It exports the /dev/rtc
+ *	interface supporting various ioctl() and also the
+ *	/proc/driver/rtc pseudo-file for status information.
+ *
+ *	The ioctls can be used to set the interrupt behaviour and
+ *	generation rate from the RTC via IRQ 8. Then the /dev/rtc
+ *	interface can be used to make use of these timer interrupts,
+ *	be they interval or alarm based.
+ *
+ *	The /dev/rtc interface will block on reads until an interrupt
+ *	has been received. If a RTC interrupt has already happened,
+ *	it will output an unsigned long and then block. The output value
+ *	contains the interrupt status in the low byte and the number of
+ *	interrupts since the last read in the remaining high bytes. The
+ *	/dev/rtc interface can also be used with the select(2) call.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Based on other minimal char device drivers, like Alan's
+ *	watchdog, Ted's random, etc. etc.
+ *
+ *	1.07	Paul Gortmaker.
+ *	1.08	Miquel van Smoorenburg: disallow certain things on the
+ *		DEC Alpha as the CMOS clock is also used for other things.
+ *	1.09	Nikita Schmidt: epoch support and some Alpha cleanup.
+ *	1.09a	Pete Zaitcev: Sun SPARC
+ *	1.09b	Jeff Garzik: Modularize, init cleanup
+ *	1.09c	Jeff Garzik: SMP cleanup
+ *	1.10	Paul Barton-Davis: add support for async I/O
+ *	1.10a	Andrea Arcangeli: Alpha updates
+ *	1.10b	Andrew Morton: SMP lock fix
+ *	1.10c	Cesar Barros: SMP locking fixes and cleanup
+ *	1.10d	Paul Gortmaker: delete paranoia check in rtc_exit
+ *	1.10e	Maciej W. Rozycki: Handle DECstation's year weirdness.
+ *	1.11	Takashi Iwai: Kernel access functions
+ *			      rtc_register/rtc_unregister/rtc_control
+ *      1.11a   Daniele Bellucci: Audit create_proc_read_entry in rtc_init
+ *	1.12	Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
+ *		CONFIG_HPET_EMULATE_RTC
+ *	1.12a	Maciej W. Rozycki: Handle memory-mapped chips properly.
+ *	1.12ac	Alan Cox: Allow read access to the day of week register
+ *	1.12b	David John: Remove calls to the BKL.
+ */
+
+#define RTC_VERSION		"1.12b"
+
+/*
+ *	Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
+ *	interrupts disabled. Due to the index-port/data-port (0x70/0x71)
+ *	design of the RTC, we don't want two different things trying to
+ *	get to it at once. (e.g. the periodic 11 min sync from
+ *      kernel/time/ntp.c vs. this driver.)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/sched/signal.h>
+#include <linux/sysctl.h>
+#include <linux/wait.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+
+#include <asm/current.h>
+
+#ifdef CONFIG_X86
+#include <asm/hpet.h>
+#endif
+
+#ifdef CONFIG_SPARC32
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/io.h>
+
+static unsigned long rtc_port;
+static int rtc_irq;
+#endif
+
+#ifdef	CONFIG_HPET_EMULATE_RTC
+#undef	RTC_IRQ
+#endif
+
+#ifdef RTC_IRQ
+static int rtc_has_irq = 1;
+#endif
+
+#ifndef CONFIG_HPET_EMULATE_RTC
+#define is_hpet_enabled()			0
+#define hpet_set_alarm_time(hrs, min, sec)	0
+#define hpet_set_periodic_freq(arg)		0
+#define hpet_mask_rtc_irq_bit(arg)		0
+#define hpet_set_rtc_irq_bit(arg)		0
+#define hpet_rtc_timer_init()			do { } while (0)
+#define hpet_rtc_dropped_irq()			0
+#define hpet_register_irq_handler(h)		({ 0; })
+#define hpet_unregister_irq_handler(h)		({ 0; })
+#ifdef RTC_IRQ
+static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+{
+	return 0;
+}
+#endif
+#endif
+
+/*
+ *	We sponge a minor off of the misc major. No need slurping
+ *	up another valuable major dev number for this. If you add
+ *	an ioctl, make sure you don't conflict with SPARC's RTC
+ *	ioctls.
+ */
+
+static struct fasync_struct *rtc_async_queue;
+
+static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
+
+#ifdef RTC_IRQ
+static void rtc_dropped_irq(struct timer_list *unused);
+
+static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
+#endif
+
+static ssize_t rtc_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos);
+
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
+
+#ifdef RTC_IRQ
+static __poll_t rtc_poll(struct file *file, poll_table *wait);
+#endif
+
+static void get_rtc_alm_time(struct rtc_time *alm_tm);
+#ifdef RTC_IRQ
+static void set_rtc_irq_bit_locked(unsigned char bit);
+static void mask_rtc_irq_bit_locked(unsigned char bit);
+
+static inline void set_rtc_irq_bit(unsigned char bit)
+{
+	spin_lock_irq(&rtc_lock);
+	set_rtc_irq_bit_locked(bit);
+	spin_unlock_irq(&rtc_lock);
+}
+
+static void mask_rtc_irq_bit(unsigned char bit)
+{
+	spin_lock_irq(&rtc_lock);
+	mask_rtc_irq_bit_locked(bit);
+	spin_unlock_irq(&rtc_lock);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+static int rtc_proc_show(struct seq_file *seq, void *v);
+#endif
+
+/*
+ *	Bits in rtc_status. (6 bits of room for future expansion)
+ */
+
+#define RTC_IS_OPEN		0x01	/* means /dev/rtc is in use	*/
+#define RTC_TIMER_ON		0x02	/* missed irq timer active	*/
+
+/*
+ * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
+ * protected by the spin lock rtc_lock. However, ioctl can still disable the
+ * timer in rtc_status and then with del_timer after the interrupt has read
+ * rtc_status but before mod_timer is called, which would then reenable the
+ * timer (but you would need to have an awful timing before you'd trip on it)
+ */
+static unsigned long rtc_status;	/* bitmapped status byte.	*/
+static unsigned long rtc_freq;		/* Current periodic IRQ rate	*/
+static unsigned long rtc_irq_data;	/* our output to the world	*/
+static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
+
+/*
+ *	If this driver ever becomes modularised, it will be really nice
+ *	to make the epoch retain its value across module reload...
+ */
+
+static unsigned long epoch = 1900;	/* year corresponding to 0x00	*/
+
+static const unsigned char days_in_mo[] =
+{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char rtc_is_updating(void)
+{
+	unsigned long flags;
+	unsigned char uip;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return uip;
+}
+
+#ifdef RTC_IRQ
+/*
+ *	A very tiny interrupt handler. It runs with interrupts disabled,
+ *	but there is possibility of conflicting with the set_rtc_mmss()
+ *	call (the rtc irq and the timer irq can easily run at the same
+ *	time in two different CPUs). So we need to serialize
+ *	accesses to the chip with the rtc_lock spinlock that each
+ *	architecture should implement in the timer code.
+ *	(See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.)
+ */
+
+static irqreturn_t rtc_interrupt(int irq, void *dev_id)
+{
+	/*
+	 *	Can be an alarm interrupt, update complete interrupt,
+	 *	or a periodic interrupt. We store the status in the
+	 *	low byte and the number of interrupts received since
+	 *	the last read in the remainder of rtc_irq_data.
+	 */
+
+	spin_lock(&rtc_lock);
+	rtc_irq_data += 0x100;
+	rtc_irq_data &= ~0xff;
+	if (is_hpet_enabled()) {
+		/*
+		 * In this case it is HPET RTC interrupt handler
+		 * calling us, with the interrupt information
+		 * passed as arg1, instead of irq.
+		 */
+		rtc_irq_data |= (unsigned long)irq & 0xF0;
+	} else {
+		rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);
+	}
+
+	if (rtc_status & RTC_TIMER_ON)
+		mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
+
+	spin_unlock(&rtc_lock);
+
+	wake_up_interruptible(&rtc_wait);
+
+	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+
+	return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * sysctl-tuning infrastructure.
+ */
+static struct ctl_table rtc_table[] = {
+	{
+		.procname	= "max-user-freq",
+		.data		= &rtc_max_user_freq,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{ }
+};
+
+static struct ctl_table rtc_root[] = {
+	{
+		.procname	= "rtc",
+		.mode		= 0555,
+		.child		= rtc_table,
+	},
+	{ }
+};
+
+static struct ctl_table dev_root[] = {
+	{
+		.procname	= "dev",
+		.mode		= 0555,
+		.child		= rtc_root,
+	},
+	{ }
+};
+
+static struct ctl_table_header *sysctl_header;
+
+static int __init init_sysctl(void)
+{
+    sysctl_header = register_sysctl_table(dev_root);
+    return 0;
+}
+
+static void __exit cleanup_sysctl(void)
+{
+    unregister_sysctl_table(sysctl_header);
+}
+
+/*
+ *	Now all the various file operations that we export.
+ */
+
+static ssize_t rtc_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+#ifndef RTC_IRQ
+	return -EIO;
+#else
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long data;
+	ssize_t retval;
+
+	if (rtc_has_irq == 0)
+		return -EIO;
+
+	/*
+	 * Historically this function used to assume that sizeof(unsigned long)
+	 * is the same in userspace and kernelspace.  This lead to problems
+	 * for configurations with multiple ABIs such a the MIPS o32 and 64
+	 * ABIs supported on the same kernel.  So now we support read of both
+	 * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the
+	 * userspace ABI.
+	 */
+	if (count != sizeof(unsigned int) && count !=  sizeof(unsigned long))
+		return -EINVAL;
+
+	add_wait_queue(&rtc_wait, &wait);
+
+	do {
+		/* First make it right. Then make it fast. Putting this whole
+		 * block within the parentheses of a while would be too
+		 * confusing. And no, xchg() is not the answer. */
+
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irq(&rtc_lock);
+		data = rtc_irq_data;
+		rtc_irq_data = 0;
+		spin_unlock_irq(&rtc_lock);
+
+		if (data != 0)
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			goto out;
+		}
+		if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			goto out;
+		}
+		schedule();
+	} while (1);
+
+	if (count == sizeof(unsigned int)) {
+		retval = put_user(data,
+				  (unsigned int __user *)buf) ?: sizeof(int);
+	} else {
+		retval = put_user(data,
+				  (unsigned long __user *)buf) ?: sizeof(long);
+	}
+	if (!retval)
+		retval = count;
+ out:
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&rtc_wait, &wait);
+
+	return retval;
+#endif
+}
+
+static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+{
+	struct rtc_time wtime;
+
+#ifdef RTC_IRQ
+	if (rtc_has_irq == 0) {
+		switch (cmd) {
+		case RTC_AIE_OFF:
+		case RTC_AIE_ON:
+		case RTC_PIE_OFF:
+		case RTC_PIE_ON:
+		case RTC_UIE_OFF:
+		case RTC_UIE_ON:
+		case RTC_IRQP_READ:
+		case RTC_IRQP_SET:
+			return -EINVAL;
+		}
+	}
+#endif
+
+	switch (cmd) {
+#ifdef RTC_IRQ
+	case RTC_AIE_OFF:	/* Mask alarm int. enab. bit	*/
+	{
+		mask_rtc_irq_bit(RTC_AIE);
+		return 0;
+	}
+	case RTC_AIE_ON:	/* Allow alarm interrupts.	*/
+	{
+		set_rtc_irq_bit(RTC_AIE);
+		return 0;
+	}
+	case RTC_PIE_OFF:	/* Mask periodic int. enab. bit	*/
+	{
+		/* can be called from isr via rtc_control() */
+		unsigned long flags;
+
+		spin_lock_irqsave(&rtc_lock, flags);
+		mask_rtc_irq_bit_locked(RTC_PIE);
+		if (rtc_status & RTC_TIMER_ON) {
+			rtc_status &= ~RTC_TIMER_ON;
+			del_timer(&rtc_irq_timer);
+		}
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		return 0;
+	}
+	case RTC_PIE_ON:	/* Allow periodic ints		*/
+	{
+		/* can be called from isr via rtc_control() */
+		unsigned long flags;
+
+		/*
+		 * We don't really want Joe User enabling more
+		 * than 64Hz of interrupts on a multi-user machine.
+		 */
+		if (!kernel && (rtc_freq > rtc_max_user_freq) &&
+						(!capable(CAP_SYS_RESOURCE)))
+			return -EACCES;
+
+		spin_lock_irqsave(&rtc_lock, flags);
+		if (!(rtc_status & RTC_TIMER_ON)) {
+			mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
+					2*HZ/100);
+			rtc_status |= RTC_TIMER_ON;
+		}
+		set_rtc_irq_bit_locked(RTC_PIE);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		return 0;
+	}
+	case RTC_UIE_OFF:	/* Mask ints from RTC updates.	*/
+	{
+		mask_rtc_irq_bit(RTC_UIE);
+		return 0;
+	}
+	case RTC_UIE_ON:	/* Allow ints for RTC updates.	*/
+	{
+		set_rtc_irq_bit(RTC_UIE);
+		return 0;
+	}
+#endif
+	case RTC_ALM_READ:	/* Read the present alarm time */
+	{
+		/*
+		 * This returns a struct rtc_time. Reading >= 0xc0
+		 * means "don't care" or "match all". Only the tm_hour,
+		 * tm_min, and tm_sec values are filled in.
+		 */
+		memset(&wtime, 0, sizeof(struct rtc_time));
+		get_rtc_alm_time(&wtime);
+		break;
+	}
+	case RTC_ALM_SET:	/* Store a time into the alarm */
+	{
+		/*
+		 * This expects a struct rtc_time. Writing 0xff means
+		 * "don't care" or "match all". Only the tm_hour,
+		 * tm_min and tm_sec are used.
+		 */
+		unsigned char hrs, min, sec;
+		struct rtc_time alm_tm;
+
+		if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg,
+				   sizeof(struct rtc_time)))
+			return -EFAULT;
+
+		hrs = alm_tm.tm_hour;
+		min = alm_tm.tm_min;
+		sec = alm_tm.tm_sec;
+
+		spin_lock_irq(&rtc_lock);
+		if (hpet_set_alarm_time(hrs, min, sec)) {
+			/*
+			 * Fallthru and set alarm time in CMOS too,
+			 * so that we will get proper value in RTC_ALM_READ
+			 */
+		}
+		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
+							RTC_ALWAYS_BCD) {
+			if (sec < 60)
+				sec = bin2bcd(sec);
+			else
+				sec = 0xff;
+
+			if (min < 60)
+				min = bin2bcd(min);
+			else
+				min = 0xff;
+
+			if (hrs < 24)
+				hrs = bin2bcd(hrs);
+			else
+				hrs = 0xff;
+		}
+		CMOS_WRITE(hrs, RTC_HOURS_ALARM);
+		CMOS_WRITE(min, RTC_MINUTES_ALARM);
+		CMOS_WRITE(sec, RTC_SECONDS_ALARM);
+		spin_unlock_irq(&rtc_lock);
+
+		return 0;
+	}
+	case RTC_RD_TIME:	/* Read the time/date from RTC	*/
+	{
+		memset(&wtime, 0, sizeof(struct rtc_time));
+		rtc_get_rtc_time(&wtime);
+		break;
+	}
+	case RTC_SET_TIME:	/* Set the RTC */
+	{
+		struct rtc_time rtc_tm;
+		unsigned char mon, day, hrs, min, sec, leap_yr;
+		unsigned char save_control, save_freq_select;
+		unsigned int yrs;
+#ifdef CONFIG_MACH_DECSTATION
+		unsigned int real_yrs;
+#endif
+
+		if (!capable(CAP_SYS_TIME))
+			return -EACCES;
+
+		if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
+				   sizeof(struct rtc_time)))
+			return -EFAULT;
+
+		yrs = rtc_tm.tm_year + 1900;
+		mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
+		day = rtc_tm.tm_mday;
+		hrs = rtc_tm.tm_hour;
+		min = rtc_tm.tm_min;
+		sec = rtc_tm.tm_sec;
+
+		if (yrs < 1970)
+			return -EINVAL;
+
+		leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+		if ((mon > 12) || (day == 0))
+			return -EINVAL;
+
+		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+			return -EINVAL;
+
+		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+			return -EINVAL;
+
+		yrs -= epoch;
+		if (yrs > 255)		/* They are unsigned */
+			return -EINVAL;
+
+		spin_lock_irq(&rtc_lock);
+#ifdef CONFIG_MACH_DECSTATION
+		real_yrs = yrs;
+		yrs = 72;
+
+		/*
+		 * We want to keep the year set to 73 until March
+		 * for non-leap years, so that Feb, 29th is handled
+		 * correctly.
+		 */
+		if (!leap_yr && mon < 3) {
+			real_yrs--;
+			yrs = 73;
+		}
+#endif
+		/* These limits and adjustments are independent of
+		 * whether the chip is in binary mode or not.
+		 */
+		if (yrs > 169) {
+			spin_unlock_irq(&rtc_lock);
+			return -EINVAL;
+		}
+		if (yrs >= 100)
+			yrs -= 100;
+
+		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+		    || RTC_ALWAYS_BCD) {
+			sec = bin2bcd(sec);
+			min = bin2bcd(min);
+			hrs = bin2bcd(hrs);
+			day = bin2bcd(day);
+			mon = bin2bcd(mon);
+			yrs = bin2bcd(yrs);
+		}
+
+		save_control = CMOS_READ(RTC_CONTROL);
+		CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+		save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+		CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_MACH_DECSTATION
+		CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+#endif
+		CMOS_WRITE(yrs, RTC_YEAR);
+		CMOS_WRITE(mon, RTC_MONTH);
+		CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+		CMOS_WRITE(hrs, RTC_HOURS);
+		CMOS_WRITE(min, RTC_MINUTES);
+		CMOS_WRITE(sec, RTC_SECONDS);
+
+		CMOS_WRITE(save_control, RTC_CONTROL);
+		CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+		spin_unlock_irq(&rtc_lock);
+		return 0;
+	}
+#ifdef RTC_IRQ
+	case RTC_IRQP_READ:	/* Read the periodic IRQ rate.	*/
+	{
+		return put_user(rtc_freq, (unsigned long __user *)arg);
+	}
+	case RTC_IRQP_SET:	/* Set periodic IRQ rate.	*/
+	{
+		int tmp = 0;
+		unsigned char val;
+		/* can be called from isr via rtc_control() */
+		unsigned long flags;
+
+		/*
+		 * The max we can do is 8192Hz.
+		 */
+		if ((arg < 2) || (arg > 8192))
+			return -EINVAL;
+		/*
+		 * We don't really want Joe User generating more
+		 * than 64Hz of interrupts on a multi-user machine.
+		 */
+		if (!kernel && (arg > rtc_max_user_freq) &&
+					!capable(CAP_SYS_RESOURCE))
+			return -EACCES;
+
+		while (arg > (1<<tmp))
+			tmp++;
+
+		/*
+		 * Check that the input was really a power of 2.
+		 */
+		if (arg != (1<<tmp))
+			return -EINVAL;
+
+		rtc_freq = arg;
+
+		spin_lock_irqsave(&rtc_lock, flags);
+		if (hpet_set_periodic_freq(arg)) {
+			spin_unlock_irqrestore(&rtc_lock, flags);
+			return 0;
+		}
+
+		val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
+		val |= (16 - tmp);
+		CMOS_WRITE(val, RTC_FREQ_SELECT);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+		return 0;
+	}
+#endif
+	case RTC_EPOCH_READ:	/* Read the epoch.	*/
+	{
+		return put_user(epoch, (unsigned long __user *)arg);
+	}
+	case RTC_EPOCH_SET:	/* Set the epoch.	*/
+	{
+		/*
+		 * There were no RTC clocks before 1900.
+		 */
+		if (arg < 1900)
+			return -EINVAL;
+
+		if (!capable(CAP_SYS_TIME))
+			return -EACCES;
+
+		epoch = arg;
+		return 0;
+	}
+	default:
+		return -ENOTTY;
+	}
+	return copy_to_user((void __user *)arg,
+			    &wtime, sizeof wtime) ? -EFAULT : 0;
+}
+
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+	ret = rtc_do_ioctl(cmd, arg, 0);
+	return ret;
+}
+
+/*
+ *	We enforce only one user at a time here with the open/close.
+ *	Also clear the previous interrupt data on an open, and clean
+ *	up things on a close.
+ */
+static int rtc_open(struct inode *inode, struct file *file)
+{
+	spin_lock_irq(&rtc_lock);
+
+	if (rtc_status & RTC_IS_OPEN)
+		goto out_busy;
+
+	rtc_status |= RTC_IS_OPEN;
+
+	rtc_irq_data = 0;
+	spin_unlock_irq(&rtc_lock);
+	return 0;
+
+out_busy:
+	spin_unlock_irq(&rtc_lock);
+	return -EBUSY;
+}
+
+static int rtc_fasync(int fd, struct file *filp, int on)
+{
+	return fasync_helper(fd, filp, on, &rtc_async_queue);
+}
+
+static int rtc_release(struct inode *inode, struct file *file)
+{
+#ifdef RTC_IRQ
+	unsigned char tmp;
+
+	if (rtc_has_irq == 0)
+		goto no_irq;
+
+	/*
+	 * Turn off all interrupts once the device is no longer
+	 * in use, and clear the data.
+	 */
+
+	spin_lock_irq(&rtc_lock);
+	if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
+		tmp = CMOS_READ(RTC_CONTROL);
+		tmp &=  ~RTC_PIE;
+		tmp &=  ~RTC_AIE;
+		tmp &=  ~RTC_UIE;
+		CMOS_WRITE(tmp, RTC_CONTROL);
+		CMOS_READ(RTC_INTR_FLAGS);
+	}
+	if (rtc_status & RTC_TIMER_ON) {
+		rtc_status &= ~RTC_TIMER_ON;
+		del_timer(&rtc_irq_timer);
+	}
+	spin_unlock_irq(&rtc_lock);
+
+no_irq:
+#endif
+
+	spin_lock_irq(&rtc_lock);
+	rtc_irq_data = 0;
+	rtc_status &= ~RTC_IS_OPEN;
+	spin_unlock_irq(&rtc_lock);
+
+	return 0;
+}
+
+#ifdef RTC_IRQ
+static __poll_t rtc_poll(struct file *file, poll_table *wait)
+{
+	unsigned long l;
+
+	if (rtc_has_irq == 0)
+		return 0;
+
+	poll_wait(file, &rtc_wait, wait);
+
+	spin_lock_irq(&rtc_lock);
+	l = rtc_irq_data;
+	spin_unlock_irq(&rtc_lock);
+
+	if (l != 0)
+		return EPOLLIN | EPOLLRDNORM;
+	return 0;
+}
+#endif
+
+/*
+ *	The various file operations we support.
+ */
+
+static const struct file_operations rtc_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= rtc_read,
+#ifdef RTC_IRQ
+	.poll		= rtc_poll,
+#endif
+	.unlocked_ioctl	= rtc_ioctl,
+	.open		= rtc_open,
+	.release	= rtc_release,
+	.fasync		= rtc_fasync,
+};
+
+static struct miscdevice rtc_dev = {
+	.minor		= RTC_MINOR,
+	.name		= "rtc",
+	.fops		= &rtc_fops,
+};
+
+static resource_size_t rtc_size;
+
+static struct resource * __init rtc_request_region(resource_size_t size)
+{
+	struct resource *r;
+
+	if (RTC_IOMAPPED)
+		r = request_region(RTC_PORT(0), size, "rtc");
+	else
+		r = request_mem_region(RTC_PORT(0), size, "rtc");
+
+	if (r)
+		rtc_size = size;
+
+	return r;
+}
+
+static void rtc_release_region(void)
+{
+	if (RTC_IOMAPPED)
+		release_region(RTC_PORT(0), rtc_size);
+	else
+		release_mem_region(RTC_PORT(0), rtc_size);
+}
+
+static int __init rtc_init(void)
+{
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *ent;
+#endif
+#if defined(__alpha__) || defined(__mips__)
+	unsigned int year, ctrl;
+	char *guess = NULL;
+#endif
+#ifdef CONFIG_SPARC32
+	struct device_node *ebus_dp;
+	struct platform_device *op;
+#else
+	void *r;
+#ifdef RTC_IRQ
+	irq_handler_t rtc_int_handler_ptr;
+#endif
+#endif
+
+#ifdef CONFIG_SPARC32
+	for_each_node_by_name(ebus_dp, "ebus") {
+		struct device_node *dp;
+		for (dp = ebus_dp; dp; dp = dp->sibling) {
+			if (!strcmp(dp->name, "rtc")) {
+				op = of_find_device_by_node(dp);
+				if (op) {
+					rtc_port = op->resource[0].start;
+					rtc_irq = op->irqs[0];
+					goto found;
+				}
+			}
+		}
+	}
+	rtc_has_irq = 0;
+	printk(KERN_ERR "rtc_init: no PC rtc found\n");
+	return -EIO;
+
+found:
+	if (!rtc_irq) {
+		rtc_has_irq = 0;
+		goto no_irq;
+	}
+
+	/*
+	 * XXX Interrupt pin #7 in Espresso is shared between RTC and
+	 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
+	 */
+	if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
+			(void *)&rtc_port)) {
+		rtc_has_irq = 0;
+		printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
+		return -EIO;
+	}
+no_irq:
+#else
+	r = rtc_request_region(RTC_IO_EXTENT);
+
+	/*
+	 * If we've already requested a smaller range (for example, because
+	 * PNPBIOS or ACPI told us how the device is configured), the request
+	 * above might fail because it's too big.
+	 *
+	 * If so, request just the range we actually use.
+	 */
+	if (!r)
+		r = rtc_request_region(RTC_IO_EXTENT_USED);
+	if (!r) {
+#ifdef RTC_IRQ
+		rtc_has_irq = 0;
+#endif
+		printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
+		       (long)(RTC_PORT(0)));
+		return -EIO;
+	}
+
+#ifdef RTC_IRQ
+	if (is_hpet_enabled()) {
+		int err;
+
+		rtc_int_handler_ptr = hpet_rtc_interrupt;
+		err = hpet_register_irq_handler(rtc_interrupt);
+		if (err != 0) {
+			printk(KERN_WARNING "hpet_register_irq_handler failed "
+					"in rtc_init().");
+			return err;
+		}
+	} else {
+		rtc_int_handler_ptr = rtc_interrupt;
+	}
+
+	if (request_irq(RTC_IRQ, rtc_int_handler_ptr, 0, "rtc", NULL)) {
+		/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
+		rtc_has_irq = 0;
+		printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
+		rtc_release_region();
+
+		return -EIO;
+	}
+	hpet_rtc_timer_init();
+
+#endif
+
+#endif /* CONFIG_SPARC32 vs. others */
+
+	if (misc_register(&rtc_dev)) {
+#ifdef RTC_IRQ
+		free_irq(RTC_IRQ, NULL);
+		hpet_unregister_irq_handler(rtc_interrupt);
+		rtc_has_irq = 0;
+#endif
+		rtc_release_region();
+		return -ENODEV;
+	}
+
+#ifdef CONFIG_PROC_FS
+	ent = proc_create_single("driver/rtc", 0, NULL, rtc_proc_show);
+	if (!ent)
+		printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
+#endif
+
+#if defined(__alpha__) || defined(__mips__)
+	rtc_freq = HZ;
+
+	/* Each operating system on an Alpha uses its own epoch.
+	   Let's try to guess which one we are using now. */
+
+	if (rtc_is_updating() != 0)
+		msleep(20);
+
+	spin_lock_irq(&rtc_lock);
+	year = CMOS_READ(RTC_YEAR);
+	ctrl = CMOS_READ(RTC_CONTROL);
+	spin_unlock_irq(&rtc_lock);
+
+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		year = bcd2bin(year);       /* This should never happen... */
+
+	if (year < 20) {
+		epoch = 2000;
+		guess = "SRM (post-2000)";
+	} else if (year >= 20 && year < 48) {
+		epoch = 1980;
+		guess = "ARC console";
+	} else if (year >= 48 && year < 72) {
+		epoch = 1952;
+		guess = "Digital UNIX";
+#if defined(__mips__)
+	} else if (year >= 72 && year < 74) {
+		epoch = 2000;
+		guess = "Digital DECstation";
+#else
+	} else if (year >= 70) {
+		epoch = 1900;
+		guess = "Standard PC (1900)";
+#endif
+	}
+	if (guess)
+		printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
+			guess, epoch);
+#endif
+#ifdef RTC_IRQ
+	if (rtc_has_irq == 0)
+		goto no_irq2;
+
+	spin_lock_irq(&rtc_lock);
+	rtc_freq = 1024;
+	if (!hpet_set_periodic_freq(rtc_freq)) {
+		/*
+		 * Initialize periodic frequency to CMOS reset default,
+		 * which is 1024Hz
+		 */
+		CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
+			   RTC_FREQ_SELECT);
+	}
+	spin_unlock_irq(&rtc_lock);
+no_irq2:
+#endif
+
+	(void) init_sysctl();
+
+	printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n");
+
+	return 0;
+}
+
+static void __exit rtc_exit(void)
+{
+	cleanup_sysctl();
+	remove_proc_entry("driver/rtc", NULL);
+	misc_deregister(&rtc_dev);
+
+#ifdef CONFIG_SPARC32
+	if (rtc_has_irq)
+		free_irq(rtc_irq, &rtc_port);
+#else
+	rtc_release_region();
+#ifdef RTC_IRQ
+	if (rtc_has_irq) {
+		free_irq(RTC_IRQ, NULL);
+		hpet_unregister_irq_handler(hpet_rtc_interrupt);
+	}
+#endif
+#endif /* CONFIG_SPARC32 */
+}
+
+module_init(rtc_init);
+module_exit(rtc_exit);
+
+#ifdef RTC_IRQ
+/*
+ *	At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
+ *	(usually during an IDE disk interrupt, with IRQ unmasking off)
+ *	Since the interrupt handler doesn't get called, the IRQ status
+ *	byte doesn't get read, and the RTC stops generating interrupts.
+ *	A timer is set, and will call this function if/when that happens.
+ *	To get it out of this stalled state, we just read the status.
+ *	At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
+ *	(You *really* shouldn't be trying to use a non-realtime system
+ *	for something that requires a steady > 1KHz signal anyways.)
+ */
+
+static void rtc_dropped_irq(struct timer_list *unused)
+{
+	unsigned long freq;
+
+	spin_lock_irq(&rtc_lock);
+
+	if (hpet_rtc_dropped_irq()) {
+		spin_unlock_irq(&rtc_lock);
+		return;
+	}
+
+	/* Just in case someone disabled the timer from behind our back... */
+	if (rtc_status & RTC_TIMER_ON)
+		mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
+
+	rtc_irq_data += ((rtc_freq/HZ)<<8);
+	rtc_irq_data &= ~0xff;
+	rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);	/* restart */
+
+	freq = rtc_freq;
+
+	spin_unlock_irq(&rtc_lock);
+
+	printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+			   freq);
+
+	/* Now we have new data */
+	wake_up_interruptible(&rtc_wait);
+
+	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+/*
+ *	Info exported via "/proc/driver/rtc".
+ */
+
+static int rtc_proc_show(struct seq_file *seq, void *v)
+{
+#define YN(bit) ((ctrl & bit) ? "yes" : "no")
+#define NY(bit) ((ctrl & bit) ? "no" : "yes")
+	struct rtc_time tm;
+	unsigned char batt, ctrl;
+	unsigned long freq;
+
+	spin_lock_irq(&rtc_lock);
+	batt = CMOS_READ(RTC_VALID) & RTC_VRT;
+	ctrl = CMOS_READ(RTC_CONTROL);
+	freq = rtc_freq;
+	spin_unlock_irq(&rtc_lock);
+
+
+	rtc_get_rtc_time(&tm);
+
+	/*
+	 * There is no way to tell if the luser has the RTC set for local
+	 * time or for Universal Standard Time (GMT). Probably local though.
+	 */
+	seq_printf(seq,
+		   "rtc_time\t: %02d:%02d:%02d\n"
+		   "rtc_date\t: %04d-%02d-%02d\n"
+		   "rtc_epoch\t: %04lu\n",
+		   tm.tm_hour, tm.tm_min, tm.tm_sec,
+		   tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
+
+	get_rtc_alm_time(&tm);
+
+	/*
+	 * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will
+	 * match any value for that particular field. Values that are
+	 * greater than a valid time, but less than 0xc0 shouldn't appear.
+	 */
+	seq_puts(seq, "alarm\t\t: ");
+	if (tm.tm_hour <= 24)
+		seq_printf(seq, "%02d:", tm.tm_hour);
+	else
+		seq_puts(seq, "**:");
+
+	if (tm.tm_min <= 59)
+		seq_printf(seq, "%02d:", tm.tm_min);
+	else
+		seq_puts(seq, "**:");
+
+	if (tm.tm_sec <= 59)
+		seq_printf(seq, "%02d\n", tm.tm_sec);
+	else
+		seq_puts(seq, "**\n");
+
+	seq_printf(seq,
+		   "DST_enable\t: %s\n"
+		   "BCD\t\t: %s\n"
+		   "24hr\t\t: %s\n"
+		   "square_wave\t: %s\n"
+		   "alarm_IRQ\t: %s\n"
+		   "update_IRQ\t: %s\n"
+		   "periodic_IRQ\t: %s\n"
+		   "periodic_freq\t: %ld\n"
+		   "batt_status\t: %s\n",
+		   YN(RTC_DST_EN),
+		   NY(RTC_DM_BINARY),
+		   YN(RTC_24H),
+		   YN(RTC_SQWE),
+		   YN(RTC_AIE),
+		   YN(RTC_UIE),
+		   YN(RTC_PIE),
+		   freq,
+		   batt ? "okay" : "dead");
+
+	return  0;
+#undef YN
+#undef NY
+}
+#endif
+
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+{
+	unsigned long uip_watchdog = jiffies, flags;
+	unsigned char ctrl;
+#ifdef CONFIG_MACH_DECSTATION
+	unsigned int real_year;
+#endif
+
+	/*
+	 * read RTC once any update in progress is done. The update
+	 * can take just over 2ms. We wait 20ms. There is no need to
+	 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+	 * If you need to know *exactly* when a second has started, enable
+	 * periodic update complete interrupts, (via ioctl) and then
+	 * immediately read /dev/rtc which will block until you get the IRQ.
+	 * Once the read clears, read the RTC time (again via ioctl). Easy.
+	 */
+
+	while (rtc_is_updating() != 0 &&
+	       time_before(jiffies, uip_watchdog + 2*HZ/100))
+		cpu_relax();
+
+	/*
+	 * Only the values that we read from the RTC are set. We leave
+	 * tm_wday, tm_yday and tm_isdst untouched. Note that while the
+	 * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is
+	 * only updated by the RTC when initially set to a non-zero value.
+	 */
+	spin_lock_irqsave(&rtc_lock, flags);
+	rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
+	rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
+	rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
+	rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+	rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
+	rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
+	/* Only set from 2.6.16 onwards */
+	rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
+
+#ifdef CONFIG_MACH_DECSTATION
+	real_year = CMOS_READ(RTC_DEC_YEAR);
+#endif
+	ctrl = CMOS_READ(RTC_CONTROL);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+		rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+		rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+		rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+		rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+		rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+		rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+		rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
+	}
+
+#ifdef CONFIG_MACH_DECSTATION
+	rtc_tm->tm_year += real_year - 72;
+#endif
+
+	/*
+	 * Account for differences between how the RTC uses the values
+	 * and how they are defined in a struct rtc_time;
+	 */
+	rtc_tm->tm_year += epoch - 1900;
+	if (rtc_tm->tm_year <= 69)
+		rtc_tm->tm_year += 100;
+
+	rtc_tm->tm_mon--;
+}
+
+static void get_rtc_alm_time(struct rtc_time *alm_tm)
+{
+	unsigned char ctrl;
+
+	/*
+	 * Only the values that we read from the RTC are set. That
+	 * means only tm_hour, tm_min, and tm_sec.
+	 */
+	spin_lock_irq(&rtc_lock);
+	alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
+	alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
+	alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
+	ctrl = CMOS_READ(RTC_CONTROL);
+	spin_unlock_irq(&rtc_lock);
+
+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+		alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
+		alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+		alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
+	}
+}
+
+#ifdef RTC_IRQ
+/*
+ * Used to disable/enable interrupts for any one of UIE, AIE, PIE.
+ * Rumour has it that if you frob the interrupt enable/disable
+ * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to
+ * ensure you actually start getting interrupts. Probably for
+ * compatibility with older/broken chipset RTC implementations.
+ * We also clear out any old irq data after an ioctl() that
+ * meddles with the interrupt enable/disable bits.
+ */
+
+static void mask_rtc_irq_bit_locked(unsigned char bit)
+{
+	unsigned char val;
+
+	if (hpet_mask_rtc_irq_bit(bit))
+		return;
+	val = CMOS_READ(RTC_CONTROL);
+	val &=  ~bit;
+	CMOS_WRITE(val, RTC_CONTROL);
+	CMOS_READ(RTC_INTR_FLAGS);
+
+	rtc_irq_data = 0;
+}
+
+static void set_rtc_irq_bit_locked(unsigned char bit)
+{
+	unsigned char val;
+
+	if (hpet_set_rtc_irq_bit(bit))
+		return;
+	val = CMOS_READ(RTC_CONTROL);
+	val |= bit;
+	CMOS_WRITE(val, RTC_CONTROL);
+	CMOS_READ(RTC_INTR_FLAGS);
+
+	rtc_irq_data = 0;
+}
+#endif
+
+MODULE_AUTHOR("Paul Gortmaker");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
new file mode 100644
index 0000000..903761b
--- /dev/null
+++ b/drivers/char/scx200_gpio.c
@@ -0,0 +1,132 @@
+/* linux/drivers/char/scx200_gpio.c
+
+   National Semiconductor SCx200 GPIO driver.  Allows a user space
+   process to play with the GPIO pins.
+
+   Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include <linux/scx200_gpio.h>
+#include <linux/nsc_gpio.h>
+
+#define DRVNAME "scx200_gpio"
+
+static struct platform_device *pdev;
+
+MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
+MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major = 0;		/* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+#define MAX_PINS 32		/* 64 later, when known ok */
+
+struct nsc_gpio_ops scx200_gpio_ops = {
+	.owner		= THIS_MODULE,
+	.gpio_config	= scx200_gpio_configure,
+	.gpio_dump	= nsc_gpio_dump,
+	.gpio_get	= scx200_gpio_get,
+	.gpio_set	= scx200_gpio_set,
+	.gpio_change	= scx200_gpio_change,
+	.gpio_current	= scx200_gpio_current
+};
+EXPORT_SYMBOL_GPL(scx200_gpio_ops);
+
+static int scx200_gpio_open(struct inode *inode, struct file *file)
+{
+	unsigned m = iminor(inode);
+	file->private_data = &scx200_gpio_ops;
+
+	if (m >= MAX_PINS)
+		return -EINVAL;
+	return nonseekable_open(inode, file);
+}
+
+static int scx200_gpio_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations scx200_gpio_fileops = {
+	.owner   = THIS_MODULE,
+	.write   = nsc_gpio_write,
+	.read    = nsc_gpio_read,
+	.open    = scx200_gpio_open,
+	.release = scx200_gpio_release,
+	.llseek  = no_llseek,
+};
+
+static struct cdev scx200_gpio_cdev;  /* use 1 cdev for all pins */
+
+static int __init scx200_gpio_init(void)
+{
+	int rc;
+	dev_t devid;
+
+	if (!scx200_gpio_present()) {
+		printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n");
+		return -ENODEV;
+	}
+
+	/* support dev_dbg() with pdev->dev */
+	pdev = platform_device_alloc(DRVNAME, 0);
+	if (!pdev)
+		return -ENOMEM;
+
+	rc = platform_device_add(pdev);
+	if (rc)
+		goto undo_malloc;
+
+	/* nsc_gpio uses dev_dbg(), so needs this */
+	scx200_gpio_ops.dev = &pdev->dev;
+
+	if (major) {
+		devid = MKDEV(major, 0);
+		rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio");
+	} else {
+		rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio");
+		major = MAJOR(devid);
+	}
+	if (rc < 0) {
+		dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
+		goto undo_platform_device_add;
+	}
+
+	cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops);
+	cdev_add(&scx200_gpio_cdev, devid, MAX_PINS);
+
+	return 0; /* succeed */
+
+undo_platform_device_add:
+	platform_device_del(pdev);
+undo_malloc:
+	platform_device_put(pdev);
+
+	return rc;
+}
+
+static void __exit scx200_gpio_cleanup(void)
+{
+	cdev_del(&scx200_gpio_cdev);
+	/* cdev_put(&scx200_gpio_cdev); */
+
+	unregister_chrdev_region(MKDEV(major, 0), MAX_PINS);
+	platform_device_unregister(pdev);
+}
+
+module_init(scx200_gpio_init);
+module_exit(scx200_gpio_cleanup);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
new file mode 100644
index 0000000..5918ea7
--- /dev/null
+++ b/drivers/char/snsc.c
@@ -0,0 +1,469 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * System controller communication driver
+ *
+ * This driver allows a user process to communicate with the system
+ * controller (a.k.a. "IRouter") network in an SGI SN system.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/module.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+#include "snsc.h"
+
+#define SYSCTL_BASENAME	"snsc"
+
+#define SCDRV_BUFSZ	2048
+#define SCDRV_TIMEOUT	1000
+
+static DEFINE_MUTEX(scdrv_mutex);
+static irqreturn_t
+scdrv_interrupt(int irq, void *subch_data)
+{
+	struct subch_data_s *sd = subch_data;
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(&sd->sd_rlock, flags);
+	spin_lock(&sd->sd_wlock);
+	status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+
+	if (status > 0) {
+		if (status & SAL_IROUTER_INTR_RECV) {
+			wake_up(&sd->sd_rq);
+		}
+		if (status & SAL_IROUTER_INTR_XMIT) {
+			ia64_sn_irtr_intr_disable
+			    (sd->sd_nasid, sd->sd_subch,
+			     SAL_IROUTER_INTR_XMIT);
+			wake_up(&sd->sd_wq);
+		}
+	}
+	spin_unlock(&sd->sd_wlock);
+	spin_unlock_irqrestore(&sd->sd_rlock, flags);
+	return IRQ_HANDLED;
+}
+
+/*
+ * scdrv_open
+ *
+ * Reserve a subchannel for system controller communication.
+ */
+
+static int
+scdrv_open(struct inode *inode, struct file *file)
+{
+	struct sysctl_data_s *scd;
+	struct subch_data_s *sd;
+	int rv;
+
+	/* look up device info for this device file */
+	scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
+
+	/* allocate memory for subchannel data */
+	sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
+	if (sd == NULL) {
+		printk("%s: couldn't allocate subchannel data\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+	/* initialize subch_data_s fields */
+	sd->sd_nasid = scd->scd_nasid;
+	sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
+
+	if (sd->sd_subch < 0) {
+		kfree(sd);
+		printk("%s: couldn't allocate subchannel\n", __func__);
+		return -EBUSY;
+	}
+
+	spin_lock_init(&sd->sd_rlock);
+	spin_lock_init(&sd->sd_wlock);
+	init_waitqueue_head(&sd->sd_rq);
+	init_waitqueue_head(&sd->sd_wq);
+	sema_init(&sd->sd_rbs, 1);
+	sema_init(&sd->sd_wbs, 1);
+
+	file->private_data = sd;
+
+	/* hook this subchannel up to the system controller interrupt */
+	mutex_lock(&scdrv_mutex);
+	rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
+			 IRQF_SHARED, SYSCTL_BASENAME, sd);
+	if (rv) {
+		ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
+		kfree(sd);
+		printk("%s: irq request failed (%d)\n", __func__, rv);
+		mutex_unlock(&scdrv_mutex);
+		return -EBUSY;
+	}
+	mutex_unlock(&scdrv_mutex);
+	return 0;
+}
+
+/*
+ * scdrv_release
+ *
+ * Release a previously-reserved subchannel.
+ */
+
+static int
+scdrv_release(struct inode *inode, struct file *file)
+{
+	struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+	int rv;
+
+	/* free the interrupt */
+	free_irq(SGI_UART_VECTOR, sd);
+
+	/* ask SAL to close the subchannel */
+	rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
+
+	kfree(sd);
+	return rv;
+}
+
+/*
+ * scdrv_read
+ *
+ * Called to read bytes from the open IRouter pipe.
+ *
+ */
+
+static inline int
+read_status_check(struct subch_data_s *sd, int *len)
+{
+	return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len);
+}
+
+static ssize_t
+scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+{
+	int status;
+	int len;
+	unsigned long flags;
+	struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+
+	/* try to get control of the read buffer */
+	if (down_trylock(&sd->sd_rbs)) {
+		/* somebody else has it now;
+		 * if we're non-blocking, then exit...
+		 */
+		if (file->f_flags & O_NONBLOCK) {
+			return -EAGAIN;
+		}
+		/* ...or if we want to block, then do so here */
+		if (down_interruptible(&sd->sd_rbs)) {
+			/* something went wrong with wait */
+			return -ERESTARTSYS;
+		}
+	}
+
+	/* anything to read? */
+	len = CHUNKSIZE;
+	spin_lock_irqsave(&sd->sd_rlock, flags);
+	status = read_status_check(sd, &len);
+
+	/* if not, and we're blocking I/O, loop */
+	while (status < 0) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		if (file->f_flags & O_NONBLOCK) {
+			spin_unlock_irqrestore(&sd->sd_rlock, flags);
+			up(&sd->sd_rbs);
+			return -EAGAIN;
+		}
+
+		len = CHUNKSIZE;
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&sd->sd_rq, &wait);
+		spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+		schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
+
+		remove_wait_queue(&sd->sd_rq, &wait);
+		if (signal_pending(current)) {
+			/* wait was interrupted */
+			up(&sd->sd_rbs);
+			return -ERESTARTSYS;
+		}
+
+		spin_lock_irqsave(&sd->sd_rlock, flags);
+		status = read_status_check(sd, &len);
+	}
+	spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+	if (len > 0) {
+		/* we read something in the last read_status_check(); copy
+		 * it out to user space
+		 */
+		if (count < len) {
+			pr_debug("%s: only accepting %d of %d bytes\n",
+				 __func__, (int) count, len);
+		}
+		len = min((int) count, len);
+		if (copy_to_user(buf, sd->sd_rb, len))
+			len = -EFAULT;
+	}
+
+	/* release the read buffer and wake anyone who might be
+	 * waiting for it
+	 */
+	up(&sd->sd_rbs);
+
+	/* return the number of characters read in */
+	return len;
+}
+
+/*
+ * scdrv_write
+ *
+ * Writes a chunk of an IRouter packet (or other system controller data)
+ * to the system controller.
+ *
+ */
+static inline int
+write_status_check(struct subch_data_s *sd, int count)
+{
+	return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count);
+}
+
+static ssize_t
+scdrv_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *f_pos)
+{
+	unsigned long flags;
+	int status;
+	struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+
+	/* try to get control of the write buffer */
+	if (down_trylock(&sd->sd_wbs)) {
+		/* somebody else has it now;
+		 * if we're non-blocking, then exit...
+		 */
+		if (file->f_flags & O_NONBLOCK) {
+			return -EAGAIN;
+		}
+		/* ...or if we want to block, then do so here */
+		if (down_interruptible(&sd->sd_wbs)) {
+			/* something went wrong with wait */
+			return -ERESTARTSYS;
+		}
+	}
+
+	count = min((int) count, CHUNKSIZE);
+	if (copy_from_user(sd->sd_wb, buf, count)) {
+		up(&sd->sd_wbs);
+		return -EFAULT;
+	}
+
+	/* try to send the buffer */
+	spin_lock_irqsave(&sd->sd_wlock, flags);
+	status = write_status_check(sd, count);
+
+	/* if we failed, and we want to block, then loop */
+	while (status <= 0) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		if (file->f_flags & O_NONBLOCK) {
+			spin_unlock_irqrestore(&sd->sd_wlock, flags);
+			up(&sd->sd_wbs);
+			return -EAGAIN;
+		}
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&sd->sd_wq, &wait);
+		spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+		schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
+
+		remove_wait_queue(&sd->sd_wq, &wait);
+		if (signal_pending(current)) {
+			/* wait was interrupted */
+			up(&sd->sd_wbs);
+			return -ERESTARTSYS;
+		}
+
+		spin_lock_irqsave(&sd->sd_wlock, flags);
+		status = write_status_check(sd, count);
+	}
+	spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+	/* release the write buffer and wake anyone who's waiting for it */
+	up(&sd->sd_wbs);
+
+	/* return the number of characters accepted (should be the complete
+	 * "chunk" as requested)
+	 */
+	if ((status >= 0) && (status < count)) {
+		pr_debug("Didn't accept the full chunk; %d of %d\n",
+			 status, (int) count);
+	}
+	return status;
+}
+
+static __poll_t
+scdrv_poll(struct file *file, struct poll_table_struct *wait)
+{
+	__poll_t mask = 0;
+	int status = 0;
+	struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+	unsigned long flags;
+
+	poll_wait(file, &sd->sd_rq, wait);
+	poll_wait(file, &sd->sd_wq, wait);
+
+	spin_lock_irqsave(&sd->sd_rlock, flags);
+	spin_lock(&sd->sd_wlock);
+	status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+	spin_unlock(&sd->sd_wlock);
+	spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+	if (status > 0) {
+		if (status & SAL_IROUTER_INTR_RECV) {
+			mask |= EPOLLIN | EPOLLRDNORM;
+		}
+		if (status & SAL_IROUTER_INTR_XMIT) {
+			mask |= EPOLLOUT | EPOLLWRNORM;
+		}
+	}
+
+	return mask;
+}
+
+static const struct file_operations scdrv_fops = {
+	.owner =	THIS_MODULE,
+	.read =		scdrv_read,
+	.write =	scdrv_write,
+	.poll =		scdrv_poll,
+	.open =		scdrv_open,
+	.release =	scdrv_release,
+	.llseek =	noop_llseek,
+};
+
+static struct class *snsc_class;
+
+/*
+ * scdrv_init
+ *
+ * Called at boot time to initialize the system controller communication
+ * facility.
+ */
+int __init
+scdrv_init(void)
+{
+	geoid_t geoid;
+	cnodeid_t cnode;
+	char devname[32];
+	char *devnamep;
+	struct sysctl_data_s *scd;
+	void *salbuf;
+	dev_t first_dev, dev;
+	nasid_t event_nasid;
+
+	if (!ia64_platform_is("sn2"))
+		return -ENODEV;
+
+	event_nasid = ia64_sn_get_console_nasid();
+
+	snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
+	if (IS_ERR(snsc_class)) {
+		printk("%s: failed to allocate class\n", __func__);
+		return PTR_ERR(snsc_class);
+	}
+
+	if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
+				SYSCTL_BASENAME) < 0) {
+		printk("%s: failed to register SN system controller device\n",
+		       __func__);
+		return -ENODEV;
+	}
+
+	for (cnode = 0; cnode < num_cnodes; cnode++) {
+			geoid = cnodeid_get_geoid(cnode);
+			devnamep = devname;
+			format_module_id(devnamep, geo_module(geoid),
+					 MODULE_FORMAT_BRIEF);
+			devnamep = devname + strlen(devname);
+			sprintf(devnamep, "^%d#%d", geo_slot(geoid),
+				geo_slab(geoid));
+
+			/* allocate sysctl device data */
+			scd = kzalloc(sizeof (struct sysctl_data_s),
+				      GFP_KERNEL);
+			if (!scd) {
+				printk("%s: failed to allocate device info"
+				       "for %s/%s\n", __func__,
+				       SYSCTL_BASENAME, devname);
+				continue;
+			}
+
+			/* initialize sysctl device data fields */
+			scd->scd_nasid = cnodeid_to_nasid(cnode);
+			if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
+				printk("%s: failed to allocate driver buffer"
+				       "(%s%s)\n", __func__,
+				       SYSCTL_BASENAME, devname);
+				kfree(scd);
+				continue;
+			}
+
+			if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
+					      SCDRV_BUFSZ) < 0) {
+				printk
+				    ("%s: failed to initialize SAL for"
+				     " system controller communication"
+				     " (%s/%s): outdated PROM?\n",
+				     __func__, SYSCTL_BASENAME, devname);
+				kfree(scd);
+				kfree(salbuf);
+				continue;
+			}
+
+			dev = first_dev + cnode;
+			cdev_init(&scd->scd_cdev, &scdrv_fops);
+			if (cdev_add(&scd->scd_cdev, dev, 1)) {
+				printk("%s: failed to register system"
+				       " controller device (%s%s)\n",
+				       __func__, SYSCTL_BASENAME, devname);
+				kfree(scd);
+				kfree(salbuf);
+				continue;
+			}
+
+			device_create(snsc_class, NULL, dev, NULL,
+				      "%s", devname);
+
+			ia64_sn_irtr_intr_enable(scd->scd_nasid,
+						 0 /*ignored */ ,
+						 SAL_IROUTER_INTR_RECV);
+
+                        /* on the console nasid, prepare to receive
+                         * system controller environmental events
+                         */
+                        if(scd->scd_nasid == event_nasid) {
+                                scdrv_event_init(scd);
+                        }
+	}
+	return 0;
+}
+device_initcall(scdrv_init);
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
new file mode 100644
index 0000000..e8c52c8
--- /dev/null
+++ b/drivers/char/snsc.h
@@ -0,0 +1,92 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * This file contains macros and data types for communication with the
+ * system controllers in SGI SN systems.
+ */
+
+#ifndef _SN_SYSCTL_H_
+#define _SN_SYSCTL_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <asm/sn/types.h>
+
+#define CHUNKSIZE 127
+
+/* This structure is used to track an open subchannel. */
+struct subch_data_s {
+	nasid_t sd_nasid;	/* node on which the subchannel was opened */
+	int sd_subch;		/* subchannel number */
+	spinlock_t sd_rlock;	/* monitor lock for rsv */
+	spinlock_t sd_wlock;	/* monitor lock for wsv */
+	wait_queue_head_t sd_rq;	/* wait queue for readers */
+	wait_queue_head_t sd_wq;	/* wait queue for writers */
+	struct semaphore sd_rbs;	/* semaphore for read buffer */
+	struct semaphore sd_wbs;	/* semaphore for write buffer */
+
+	char sd_rb[CHUNKSIZE];	/* read buffer */
+	char sd_wb[CHUNKSIZE];	/* write buffer */
+};
+
+struct sysctl_data_s {
+	struct cdev scd_cdev;	/* Character device info */
+	nasid_t scd_nasid;	/* Node on which subchannels are opened. */
+};
+
+
+/* argument types */
+#define IR_ARG_INT              0x00    /* 4-byte integer (big-endian)  */
+#define IR_ARG_ASCII            0x01    /* null-terminated ASCII string */
+#define IR_ARG_UNKNOWN          0x80    /* unknown data type.  The low
+                                         * 7 bits will contain the data
+                                         * length.                      */
+#define IR_ARG_UNKNOWN_LENGTH_MASK	0x7f
+
+
+/* system controller event codes */
+#define EV_CLASS_MASK		0xf000ul
+#define EV_SEVERITY_MASK	0x0f00ul
+#define EV_COMPONENT_MASK	0x00fful
+
+#define EV_CLASS_POWER		0x1000ul
+#define EV_CLASS_FAN		0x2000ul
+#define EV_CLASS_TEMP		0x3000ul
+#define EV_CLASS_ENV		0x4000ul
+#define EV_CLASS_TEST_FAULT	0x5000ul
+#define EV_CLASS_TEST_WARNING	0x6000ul
+#define EV_CLASS_PWRD_NOTIFY	0x8000ul
+
+/* ENV class codes */
+#define ENV_PWRDN_PEND		0x4101ul
+
+#define EV_SEVERITY_POWER_STABLE	0x0000ul
+#define EV_SEVERITY_POWER_LOW_WARNING	0x0100ul
+#define EV_SEVERITY_POWER_HIGH_WARNING	0x0200ul
+#define EV_SEVERITY_POWER_HIGH_FAULT	0x0300ul
+#define EV_SEVERITY_POWER_LOW_FAULT	0x0400ul
+
+#define EV_SEVERITY_FAN_STABLE		0x0000ul
+#define EV_SEVERITY_FAN_WARNING		0x0100ul
+#define EV_SEVERITY_FAN_FAULT		0x0200ul
+
+#define EV_SEVERITY_TEMP_STABLE		0x0000ul
+#define EV_SEVERITY_TEMP_ADVISORY	0x0100ul
+#define EV_SEVERITY_TEMP_CRITICAL	0x0200ul
+#define EV_SEVERITY_TEMP_FAULT		0x0300ul
+
+void scdrv_event_init(struct sysctl_data_s *);
+
+#endif /* _SN_SYSCTL_H_ */
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
new file mode 100644
index 0000000..e452673
--- /dev/null
+++ b/drivers/char/snsc_event.c
@@ -0,0 +1,303 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * System controller event handler
+ *
+ * These routines deal with environmental events arriving from the
+ * system controllers.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/unaligned.h>
+#include "snsc.h"
+
+static struct subch_data_s *event_sd;
+
+void scdrv_event(unsigned long);
+DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
+
+/*
+ * scdrv_event_interrupt
+ *
+ * Pull incoming environmental events off the physical link to the
+ * system controller and put them in a temporary holding area in SAL.
+ * Schedule scdrv_event() to move them along to their ultimate
+ * destination.
+ */
+static irqreturn_t
+scdrv_event_interrupt(int irq, void *subch_data)
+{
+	struct subch_data_s *sd = subch_data;
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(&sd->sd_rlock, flags);
+	status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+
+	if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
+		tasklet_schedule(&sn_sysctl_event);
+	}
+	spin_unlock_irqrestore(&sd->sd_rlock, flags);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * scdrv_parse_event
+ *
+ * Break an event (as read from SAL) into useful pieces so we can decide
+ * what to do with it.
+ */
+static int
+scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
+{
+	char *desc_end;
+
+	/* record event source address */
+	*src = get_unaligned_be32(event);
+	event += 4; 			/* move on to event code */
+
+	/* record the system controller's event code */
+	*code = get_unaligned_be32(event);
+	event += 4;			/* move on to event arguments */
+
+	/* how many arguments are in the packet? */
+	if (*event++ != 2) {
+		/* if not 2, give up */
+		return -1;
+	}
+
+	/* parse out the ESP code */
+	if (*event++ != IR_ARG_INT) {
+		/* not an integer argument, so give up */
+		return -1;
+	}
+	*esp_code = get_unaligned_be32(event);
+	event += 4;
+
+	/* parse out the event description */
+	if (*event++ != IR_ARG_ASCII) {
+		/* not an ASCII string, so give up */
+		return -1;
+	}
+	event[CHUNKSIZE-1] = '\0';	/* ensure this string ends! */
+	event += 2; 			/* skip leading CR/LF */
+	desc_end = desc + sprintf(desc, "%s", event);
+
+	/* strip trailing CR/LF (if any) */
+	for (desc_end--;
+	     (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
+	     desc_end--) {
+		*desc_end = '\0';
+	}
+
+	return 0;
+}
+
+
+/*
+ * scdrv_event_severity
+ *
+ * Figure out how urgent a message we should write to the console/syslog
+ * via printk.
+ */
+static char *
+scdrv_event_severity(int code)
+{
+	int ev_class = (code & EV_CLASS_MASK);
+	int ev_severity = (code & EV_SEVERITY_MASK);
+	char *pk_severity = KERN_NOTICE;
+
+	switch (ev_class) {
+	case EV_CLASS_POWER:
+		switch (ev_severity) {
+		case EV_SEVERITY_POWER_LOW_WARNING:
+		case EV_SEVERITY_POWER_HIGH_WARNING:
+			pk_severity = KERN_WARNING;
+			break;
+		case EV_SEVERITY_POWER_HIGH_FAULT:
+		case EV_SEVERITY_POWER_LOW_FAULT:
+			pk_severity = KERN_ALERT;
+			break;
+		}
+		break;
+	case EV_CLASS_FAN:
+		switch (ev_severity) {
+		case EV_SEVERITY_FAN_WARNING:
+			pk_severity = KERN_WARNING;
+			break;
+		case EV_SEVERITY_FAN_FAULT:
+			pk_severity = KERN_CRIT;
+			break;
+		}
+		break;
+	case EV_CLASS_TEMP:
+		switch (ev_severity) {
+		case EV_SEVERITY_TEMP_ADVISORY:
+			pk_severity = KERN_WARNING;
+			break;
+		case EV_SEVERITY_TEMP_CRITICAL:
+			pk_severity = KERN_CRIT;
+			break;
+		case EV_SEVERITY_TEMP_FAULT:
+			pk_severity = KERN_ALERT;
+			break;
+		}
+		break;
+	case EV_CLASS_ENV:
+		pk_severity = KERN_ALERT;
+		break;
+	case EV_CLASS_TEST_FAULT:
+		pk_severity = KERN_ALERT;
+		break;
+	case EV_CLASS_TEST_WARNING:
+		pk_severity = KERN_WARNING;
+		break;
+	case EV_CLASS_PWRD_NOTIFY:
+		pk_severity = KERN_ALERT;
+		break;
+	}
+
+	return pk_severity;
+}
+
+
+/*
+ * scdrv_dispatch_event
+ *
+ * Do the right thing with an incoming event.  That's often nothing
+ * more than printing it to the system log.  For power-down notifications
+ * we start a graceful shutdown.
+ */
+static void
+scdrv_dispatch_event(char *event, int len)
+{
+	static int snsc_shutting_down = 0;
+	int code, esp_code, src, class;
+	char desc[CHUNKSIZE];
+	char *severity;
+
+	if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
+		/* ignore uninterpretible event */
+		return;
+	}
+
+	/* how urgent is the message? */
+	severity = scdrv_event_severity(code);
+
+	class = (code & EV_CLASS_MASK);
+
+	if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) {
+		if (snsc_shutting_down)
+			return;
+
+		snsc_shutting_down = 1;
+
+		/* give a message for each type of event */
+		if (class == EV_CLASS_PWRD_NOTIFY)
+			printk(KERN_NOTICE "Power off indication received."
+			       " Sending SIGPWR to init...\n");
+		else if (code == ENV_PWRDN_PEND)
+			printk(KERN_CRIT "WARNING: Shutting down the system"
+			       " due to a critical environmental condition."
+			       " Sending SIGPWR to init...\n");
+
+		/* give a SIGPWR signal to init proc */
+		kill_cad_pid(SIGPWR, 0);
+	} else {
+		/* print to system log */
+		printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
+	}
+}
+
+
+/*
+ * scdrv_event
+ *
+ * Called as a tasklet when an event arrives from the L1.  Read the event
+ * from where it's temporarily stored in SAL and call scdrv_dispatch_event()
+ * to send it on its way.  Keep trying to read events until SAL indicates
+ * that there are no more immediately available.
+ */
+void
+scdrv_event(unsigned long dummy)
+{
+	int status;
+	int len;
+	unsigned long flags;
+	struct subch_data_s *sd = event_sd;
+
+	/* anything to read? */
+	len = CHUNKSIZE;
+	spin_lock_irqsave(&sd->sd_rlock, flags);
+	status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
+				   sd->sd_rb, &len);
+
+	while (!(status < 0)) {
+		spin_unlock_irqrestore(&sd->sd_rlock, flags);
+		scdrv_dispatch_event(sd->sd_rb, len);
+		len = CHUNKSIZE;
+		spin_lock_irqsave(&sd->sd_rlock, flags);
+		status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
+					   sd->sd_rb, &len);
+	}
+	spin_unlock_irqrestore(&sd->sd_rlock, flags);
+}
+
+
+/*
+ * scdrv_event_init
+ *
+ * Sets up a system controller subchannel to begin receiving event
+ * messages. This is sort of a specialized version of scdrv_open()
+ * in drivers/char/sn_sysctl.c.
+ */
+void
+scdrv_event_init(struct sysctl_data_s *scd)
+{
+	int rv;
+
+	event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
+	if (event_sd == NULL) {
+		printk(KERN_WARNING "%s: couldn't allocate subchannel info"
+		       " for event monitoring\n", __func__);
+		return;
+	}
+
+	/* initialize subch_data_s fields */
+	event_sd->sd_nasid = scd->scd_nasid;
+	spin_lock_init(&event_sd->sd_rlock);
+
+	/* ask the system controllers to send events to this node */
+	event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
+
+	if (event_sd->sd_subch < 0) {
+		kfree(event_sd);
+		printk(KERN_WARNING "%s: couldn't open event subchannel\n",
+		       __func__);
+		return;
+	}
+
+	/* hook event subchannel up to the system controller interrupt */
+	rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
+			 IRQF_SHARED, "system controller events", event_sd);
+	if (rv) {
+		printk(KERN_WARNING "%s: irq request failed (%d)\n",
+		       __func__, rv);
+		ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
+		kfree(event_sd);
+		return;
+	}
+}
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
new file mode 100644
index 0000000..1866898
--- /dev/null
+++ b/drivers/char/sonypi.c
@@ -0,0 +1,1563 @@
+/*
+ * Sony Programmable I/O Control Device driver for VAIO
+ *
+ * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+ *
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/sonypi.h>
+
+#define SONYPI_DRIVER_VERSION	 "1.26"
+
+MODULE_AUTHOR("Stelian Pop <stelian@popies.net>");
+MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SONYPI_DRIVER_VERSION);
+
+static int minor = -1;
+module_param(minor, int, 0);
+MODULE_PARM_DESC(minor,
+		 "minor number of the misc device, default is -1 (automatic)");
+
+static int verbose;		/* = 0 */
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
+
+static int fnkeyinit;		/* = 0 */
+module_param(fnkeyinit, int, 0444);
+MODULE_PARM_DESC(fnkeyinit,
+		 "set this if your Fn keys do not generate any event");
+
+static int camera;		/* = 0 */
+module_param(camera, int, 0444);
+MODULE_PARM_DESC(camera,
+		 "set this if you have a MotionEye camera (PictureBook series)");
+
+static int compat;		/* = 0 */
+module_param(compat, int, 0444);
+MODULE_PARM_DESC(compat,
+		 "set this if you want to enable backward compatibility mode");
+
+static unsigned long mask = 0xffffffff;
+module_param(mask, ulong, 0644);
+MODULE_PARM_DESC(mask,
+		 "set this to the mask of event you want to enable (see doc)");
+
+static int useinput = 1;
+module_param(useinput, int, 0444);
+MODULE_PARM_DESC(useinput,
+		 "set this if you would like sonypi to feed events to the input subsystem");
+
+static int check_ioport = 1;
+module_param(check_ioport, int, 0444);
+MODULE_PARM_DESC(check_ioport,
+		 "set this to 0 if you think the automatic ioport check for sony-laptop is wrong");
+
+#define SONYPI_DEVICE_MODEL_TYPE1	1
+#define SONYPI_DEVICE_MODEL_TYPE2	2
+#define SONYPI_DEVICE_MODEL_TYPE3	3
+
+/* type1 models use those */
+#define SONYPI_IRQ_PORT			0x8034
+#define SONYPI_IRQ_SHIFT		22
+#define SONYPI_TYPE1_BASE		0x50
+#define SONYPI_G10A			(SONYPI_TYPE1_BASE+0x14)
+#define SONYPI_TYPE1_REGION_SIZE	0x08
+#define SONYPI_TYPE1_EVTYPE_OFFSET	0x04
+
+/* type2 series specifics */
+#define SONYPI_SIRQ			0x9b
+#define SONYPI_SLOB			0x9c
+#define SONYPI_SHIB			0x9d
+#define SONYPI_TYPE2_REGION_SIZE	0x20
+#define SONYPI_TYPE2_EVTYPE_OFFSET	0x12
+
+/* type3 series specifics */
+#define SONYPI_TYPE3_BASE		0x40
+#define SONYPI_TYPE3_GID2		(SONYPI_TYPE3_BASE+0x48) /* 16 bits */
+#define SONYPI_TYPE3_MISC		(SONYPI_TYPE3_BASE+0x6d) /* 8 bits  */
+#define SONYPI_TYPE3_REGION_SIZE	0x20
+#define SONYPI_TYPE3_EVTYPE_OFFSET	0x12
+
+/* battery / brightness addresses */
+#define SONYPI_BAT_FLAGS	0x81
+#define SONYPI_LCD_LIGHT	0x96
+#define SONYPI_BAT1_PCTRM	0xa0
+#define SONYPI_BAT1_LEFT	0xa2
+#define SONYPI_BAT1_MAXRT	0xa4
+#define SONYPI_BAT2_PCTRM	0xa8
+#define SONYPI_BAT2_LEFT	0xaa
+#define SONYPI_BAT2_MAXRT	0xac
+#define SONYPI_BAT1_MAXTK	0xb0
+#define SONYPI_BAT1_FULL	0xb2
+#define SONYPI_BAT2_MAXTK	0xb8
+#define SONYPI_BAT2_FULL	0xba
+
+/* FAN0 information (reverse engineered from ACPI tables) */
+#define SONYPI_FAN0_STATUS	0x93
+#define SONYPI_TEMP_STATUS	0xC1
+
+/* ioports used for brightness and type2 events */
+#define SONYPI_DATA_IOPORT	0x62
+#define SONYPI_CST_IOPORT	0x66
+
+/* The set of possible ioports */
+struct sonypi_ioport_list {
+	u16	port1;
+	u16	port2;
+};
+
+static struct sonypi_ioport_list sonypi_type1_ioport_list[] = {
+	{ 0x10c0, 0x10c4 },	/* looks like the default on C1Vx */
+	{ 0x1080, 0x1084 },
+	{ 0x1090, 0x1094 },
+	{ 0x10a0, 0x10a4 },
+	{ 0x10b0, 0x10b4 },
+	{ 0x0, 0x0 }
+};
+
+static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
+	{ 0x1080, 0x1084 },
+	{ 0x10a0, 0x10a4 },
+	{ 0x10c0, 0x10c4 },
+	{ 0x10e0, 0x10e4 },
+	{ 0x0, 0x0 }
+};
+
+/* same as in type 2 models */
+static struct sonypi_ioport_list *sonypi_type3_ioport_list =
+	sonypi_type2_ioport_list;
+
+/* The set of possible interrupts */
+struct sonypi_irq_list {
+	u16	irq;
+	u16	bits;
+};
+
+static struct sonypi_irq_list sonypi_type1_irq_list[] = {
+	{ 11, 0x2 },	/* IRQ 11, GO22=0,GO23=1 in AML */
+	{ 10, 0x1 },	/* IRQ 10, GO22=1,GO23=0 in AML */
+	{  5, 0x0 },	/* IRQ  5, GO22=0,GO23=0 in AML */
+	{  0, 0x3 }	/* no IRQ, GO22=1,GO23=1 in AML */
+};
+
+static struct sonypi_irq_list sonypi_type2_irq_list[] = {
+	{ 11, 0x80 },	/* IRQ 11, 0x80 in SIRQ in AML */
+	{ 10, 0x40 },	/* IRQ 10, 0x40 in SIRQ in AML */
+	{  9, 0x20 },	/* IRQ  9, 0x20 in SIRQ in AML */
+	{  6, 0x10 },	/* IRQ  6, 0x10 in SIRQ in AML */
+	{  0, 0x00 }	/* no IRQ, 0x00 in SIRQ in AML */
+};
+
+/* same as in type2 models */
+static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
+
+#define SONYPI_CAMERA_BRIGHTNESS		0
+#define SONYPI_CAMERA_CONTRAST			1
+#define SONYPI_CAMERA_HUE			2
+#define SONYPI_CAMERA_COLOR			3
+#define SONYPI_CAMERA_SHARPNESS			4
+
+#define SONYPI_CAMERA_PICTURE			5
+#define SONYPI_CAMERA_EXPOSURE_MASK		0xC
+#define SONYPI_CAMERA_WHITE_BALANCE_MASK	0x3
+#define SONYPI_CAMERA_PICTURE_MODE_MASK		0x30
+#define SONYPI_CAMERA_MUTE_MASK			0x40
+
+/* the rest don't need a loop until not 0xff */
+#define SONYPI_CAMERA_AGC			6
+#define SONYPI_CAMERA_AGC_MASK			0x30
+#define SONYPI_CAMERA_SHUTTER_MASK 		0x7
+
+#define SONYPI_CAMERA_SHUTDOWN_REQUEST		7
+#define SONYPI_CAMERA_CONTROL			0x10
+
+#define SONYPI_CAMERA_STATUS 			7
+#define SONYPI_CAMERA_STATUS_READY 		0x2
+#define SONYPI_CAMERA_STATUS_POSITION		0x4
+
+#define SONYPI_DIRECTION_BACKWARDS 		0x4
+
+#define SONYPI_CAMERA_REVISION 			8
+#define SONYPI_CAMERA_ROMVERSION 		9
+
+/* Event masks */
+#define SONYPI_JOGGER_MASK			0x00000001
+#define SONYPI_CAPTURE_MASK			0x00000002
+#define SONYPI_FNKEY_MASK			0x00000004
+#define SONYPI_BLUETOOTH_MASK			0x00000008
+#define SONYPI_PKEY_MASK			0x00000010
+#define SONYPI_BACK_MASK			0x00000020
+#define SONYPI_HELP_MASK			0x00000040
+#define SONYPI_LID_MASK				0x00000080
+#define SONYPI_ZOOM_MASK			0x00000100
+#define SONYPI_THUMBPHRASE_MASK			0x00000200
+#define SONYPI_MEYE_MASK			0x00000400
+#define SONYPI_MEMORYSTICK_MASK			0x00000800
+#define SONYPI_BATTERY_MASK			0x00001000
+#define SONYPI_WIRELESS_MASK			0x00002000
+
+struct sonypi_event {
+	u8	data;
+	u8	event;
+};
+
+/* The set of possible button release events */
+static struct sonypi_event sonypi_releaseev[] = {
+	{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0, 0 }
+};
+
+/* The set of possible jogger events  */
+static struct sonypi_event sonypi_joggerev[] = {
+	{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
+	{ 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
+	{ 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
+	{ 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
+	{ 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
+	{ 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
+	{ 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
+	{ 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
+	{ 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
+	{ 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
+	{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
+	{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
+	{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible capture button events */
+static struct sonypi_event sonypi_captureev[] = {
+	{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
+	{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+	{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
+	{ 0, 0 }
+};
+
+/* The set of possible fnkeys events */
+static struct sonypi_event sonypi_fnkeyev[] = {
+	{ 0x10, SONYPI_EVENT_FNKEY_ESC },
+	{ 0x11, SONYPI_EVENT_FNKEY_F1 },
+	{ 0x12, SONYPI_EVENT_FNKEY_F2 },
+	{ 0x13, SONYPI_EVENT_FNKEY_F3 },
+	{ 0x14, SONYPI_EVENT_FNKEY_F4 },
+	{ 0x15, SONYPI_EVENT_FNKEY_F5 },
+	{ 0x16, SONYPI_EVENT_FNKEY_F6 },
+	{ 0x17, SONYPI_EVENT_FNKEY_F7 },
+	{ 0x18, SONYPI_EVENT_FNKEY_F8 },
+	{ 0x19, SONYPI_EVENT_FNKEY_F9 },
+	{ 0x1a, SONYPI_EVENT_FNKEY_F10 },
+	{ 0x1b, SONYPI_EVENT_FNKEY_F11 },
+	{ 0x1c, SONYPI_EVENT_FNKEY_F12 },
+	{ 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x21, SONYPI_EVENT_FNKEY_1 },
+	{ 0x22, SONYPI_EVENT_FNKEY_2 },
+	{ 0x31, SONYPI_EVENT_FNKEY_D },
+	{ 0x32, SONYPI_EVENT_FNKEY_E },
+	{ 0x33, SONYPI_EVENT_FNKEY_F },
+	{ 0x34, SONYPI_EVENT_FNKEY_S },
+	{ 0x35, SONYPI_EVENT_FNKEY_B },
+	{ 0x36, SONYPI_EVENT_FNKEY_ONLY },
+	{ 0, 0 }
+};
+
+/* The set of possible program key events */
+static struct sonypi_event sonypi_pkeyev[] = {
+	{ 0x01, SONYPI_EVENT_PKEY_P1 },
+	{ 0x02, SONYPI_EVENT_PKEY_P2 },
+	{ 0x04, SONYPI_EVENT_PKEY_P3 },
+	{ 0x5c, SONYPI_EVENT_PKEY_P1 },
+	{ 0, 0 }
+};
+
+/* The set of possible bluetooth events */
+static struct sonypi_event sonypi_blueev[] = {
+	{ 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
+	{ 0x59, SONYPI_EVENT_BLUETOOTH_ON },
+	{ 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
+	{ 0, 0 }
+};
+
+/* The set of possible wireless events */
+static struct sonypi_event sonypi_wlessev[] = {
+	{ 0x59, SONYPI_EVENT_WIRELESS_ON },
+	{ 0x5a, SONYPI_EVENT_WIRELESS_OFF },
+	{ 0, 0 }
+};
+
+/* The set of possible back button events */
+static struct sonypi_event sonypi_backev[] = {
+	{ 0x20, SONYPI_EVENT_BACK_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible help button events */
+static struct sonypi_event sonypi_helpev[] = {
+	{ 0x3b, SONYPI_EVENT_HELP_PRESSED },
+	{ 0, 0 }
+};
+
+
+/* The set of possible lid events */
+static struct sonypi_event sonypi_lidev[] = {
+	{ 0x51, SONYPI_EVENT_LID_CLOSED },
+	{ 0x50, SONYPI_EVENT_LID_OPENED },
+	{ 0, 0 }
+};
+
+/* The set of possible zoom events */
+static struct sonypi_event sonypi_zoomev[] = {
+	{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible thumbphrase events */
+static struct sonypi_event sonypi_thumbphraseev[] = {
+	{ 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible motioneye camera events */
+static struct sonypi_event sonypi_meyeev[] = {
+	{ 0x00, SONYPI_EVENT_MEYE_FACE },
+	{ 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
+	{ 0, 0 }
+};
+
+/* The set of possible memorystick events */
+static struct sonypi_event sonypi_memorystickev[] = {
+	{ 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
+	{ 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
+	{ 0, 0 }
+};
+
+/* The set of possible battery events */
+static struct sonypi_event sonypi_batteryev[] = {
+	{ 0x20, SONYPI_EVENT_BATTERY_INSERT },
+	{ 0x30, SONYPI_EVENT_BATTERY_REMOVE },
+	{ 0, 0 }
+};
+
+static struct sonypi_eventtypes {
+	int			model;
+	u8			data;
+	unsigned long		mask;
+	struct sonypi_event *	events;
+} sonypi_eventtypes[] = {
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0 }
+};
+
+#define SONYPI_BUF_SIZE	128
+
+/* Correspondance table between sonypi events and input layer events */
+static struct {
+	int sonypiev;
+	int inputev;
+} sonypi_inputkeys[] = {
+	{ SONYPI_EVENT_CAPTURE_PRESSED,	 	KEY_CAMERA },
+	{ SONYPI_EVENT_FNKEY_ONLY, 		KEY_FN },
+	{ SONYPI_EVENT_FNKEY_ESC, 		KEY_FN_ESC },
+	{ SONYPI_EVENT_FNKEY_F1, 		KEY_FN_F1 },
+	{ SONYPI_EVENT_FNKEY_F2, 		KEY_FN_F2 },
+	{ SONYPI_EVENT_FNKEY_F3, 		KEY_FN_F3 },
+	{ SONYPI_EVENT_FNKEY_F4, 		KEY_FN_F4 },
+	{ SONYPI_EVENT_FNKEY_F5, 		KEY_FN_F5 },
+	{ SONYPI_EVENT_FNKEY_F6, 		KEY_FN_F6 },
+	{ SONYPI_EVENT_FNKEY_F7, 		KEY_FN_F7 },
+	{ SONYPI_EVENT_FNKEY_F8, 		KEY_FN_F8 },
+	{ SONYPI_EVENT_FNKEY_F9,		KEY_FN_F9 },
+	{ SONYPI_EVENT_FNKEY_F10,		KEY_FN_F10 },
+	{ SONYPI_EVENT_FNKEY_F11, 		KEY_FN_F11 },
+	{ SONYPI_EVENT_FNKEY_F12,		KEY_FN_F12 },
+	{ SONYPI_EVENT_FNKEY_1, 		KEY_FN_1 },
+	{ SONYPI_EVENT_FNKEY_2, 		KEY_FN_2 },
+	{ SONYPI_EVENT_FNKEY_D,			KEY_FN_D },
+	{ SONYPI_EVENT_FNKEY_E,			KEY_FN_E },
+	{ SONYPI_EVENT_FNKEY_F,			KEY_FN_F },
+	{ SONYPI_EVENT_FNKEY_S,			KEY_FN_S },
+	{ SONYPI_EVENT_FNKEY_B,			KEY_FN_B },
+	{ SONYPI_EVENT_BLUETOOTH_PRESSED, 	KEY_BLUE },
+	{ SONYPI_EVENT_BLUETOOTH_ON, 		KEY_BLUE },
+	{ SONYPI_EVENT_PKEY_P1, 		KEY_PROG1 },
+	{ SONYPI_EVENT_PKEY_P2, 		KEY_PROG2 },
+	{ SONYPI_EVENT_PKEY_P3, 		KEY_PROG3 },
+	{ SONYPI_EVENT_BACK_PRESSED, 		KEY_BACK },
+	{ SONYPI_EVENT_HELP_PRESSED, 		KEY_HELP },
+	{ SONYPI_EVENT_ZOOM_PRESSED, 		KEY_ZOOM },
+	{ SONYPI_EVENT_THUMBPHRASE_PRESSED, 	BTN_THUMB },
+	{ 0, 0 },
+};
+
+struct sonypi_keypress {
+	struct input_dev *dev;
+	int key;
+};
+
+static struct sonypi_device {
+	struct pci_dev *dev;
+	u16 irq;
+	u16 bits;
+	u16 ioport1;
+	u16 ioport2;
+	u16 region_size;
+	u16 evtype_offset;
+	int camera_power;
+	int bluetooth_power;
+	struct mutex lock;
+	struct kfifo fifo;
+	spinlock_t fifo_lock;
+	wait_queue_head_t fifo_proc_list;
+	struct fasync_struct *fifo_async;
+	int open_count;
+	int model;
+	struct input_dev *input_jog_dev;
+	struct input_dev *input_key_dev;
+	struct work_struct input_work;
+	struct kfifo input_fifo;
+	spinlock_t input_fifo_lock;
+} sonypi_device;
+
+#define ITERATIONS_LONG		10000
+#define ITERATIONS_SHORT	10
+
+#define wait_on_command(quiet, command, iterations) { \
+	unsigned int n = iterations; \
+	while (--n && (command)) \
+		udelay(1); \
+	if (!n && (verbose || !quiet)) \
+		printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
+}
+
+#ifdef CONFIG_ACPI
+#define SONYPI_ACPI_ACTIVE (!acpi_disabled)
+#else
+#define SONYPI_ACPI_ACTIVE 0
+#endif				/* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI
+static struct acpi_device *sonypi_acpi_device;
+static int acpi_driver_registered;
+#endif
+
+static int sonypi_ec_write(u8 addr, u8 value)
+{
+#ifdef CONFIG_ACPI
+	if (SONYPI_ACPI_ACTIVE)
+		return ec_write(addr, value);
+#endif
+	wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+	outb_p(0x81, SONYPI_CST_IOPORT);
+	wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+	outb_p(addr, SONYPI_DATA_IOPORT);
+	wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+	outb_p(value, SONYPI_DATA_IOPORT);
+	wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+	return 0;
+}
+
+static int sonypi_ec_read(u8 addr, u8 *value)
+{
+#ifdef CONFIG_ACPI
+	if (SONYPI_ACPI_ACTIVE)
+		return ec_read(addr, value);
+#endif
+	wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+	outb_p(0x80, SONYPI_CST_IOPORT);
+	wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+	outb_p(addr, SONYPI_DATA_IOPORT);
+	wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+	*value = inb_p(SONYPI_DATA_IOPORT);
+	return 0;
+}
+
+static int ec_read16(u8 addr, u16 *value)
+{
+	u8 val_lb, val_hb;
+	if (sonypi_ec_read(addr, &val_lb))
+		return -1;
+	if (sonypi_ec_read(addr + 1, &val_hb))
+		return -1;
+	*value = val_lb | (val_hb << 8);
+	return 0;
+}
+
+/* Initializes the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_srs(void)
+{
+	u32 v;
+
+	pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+	v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1);
+	pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+	pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+	v = (v & 0xFFF0FFFF) |
+	    (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16);
+	pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+	v = inl(SONYPI_IRQ_PORT);
+	v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT);
+	v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT);
+	outl(v, SONYPI_IRQ_PORT);
+
+	pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+	v = (v & 0xFF1FFFFF) | 0x00C00000;
+	pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+}
+
+static void sonypi_type2_srs(void)
+{
+	if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8))
+		printk(KERN_WARNING "ec_write failed\n");
+	if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF))
+		printk(KERN_WARNING "ec_write failed\n");
+	if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits))
+		printk(KERN_WARNING "ec_write failed\n");
+	udelay(10);
+}
+
+static void sonypi_type3_srs(void)
+{
+	u16 v16;
+	u8  v8;
+
+	/* This model type uses the same initialization of
+	 * the embedded controller as the type2 models. */
+	sonypi_type2_srs();
+
+	/* Initialization of PCI config space of the LPC interface bridge. */
+	v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
+	pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
+	pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
+	v8 = (v8 & 0xCF) | 0x10;
+	pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
+}
+
+/* Disables the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_dis(void)
+{
+	u32 v;
+
+	pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+	v = v & 0xFF3FFFFF;
+	pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+	v = inl(SONYPI_IRQ_PORT);
+	v |= (0x3 << SONYPI_IRQ_SHIFT);
+	outl(v, SONYPI_IRQ_PORT);
+}
+
+static void sonypi_type2_dis(void)
+{
+	if (sonypi_ec_write(SONYPI_SHIB, 0))
+		printk(KERN_WARNING "ec_write failed\n");
+	if (sonypi_ec_write(SONYPI_SLOB, 0))
+		printk(KERN_WARNING "ec_write failed\n");
+	if (sonypi_ec_write(SONYPI_SIRQ, 0))
+		printk(KERN_WARNING "ec_write failed\n");
+}
+
+static void sonypi_type3_dis(void)
+{
+	sonypi_type2_dis();
+	udelay(10);
+	pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
+}
+
+static u8 sonypi_call1(u8 dev)
+{
+	u8 v1, v2;
+
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(dev, sonypi_device.ioport2);
+	v1 = inb_p(sonypi_device.ioport2);
+	v2 = inb_p(sonypi_device.ioport1);
+	return v2;
+}
+
+static u8 sonypi_call2(u8 dev, u8 fn)
+{
+	u8 v1;
+
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(dev, sonypi_device.ioport2);
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(fn, sonypi_device.ioport1);
+	v1 = inb_p(sonypi_device.ioport1);
+	return v1;
+}
+
+static u8 sonypi_call3(u8 dev, u8 fn, u8 v)
+{
+	u8 v1;
+
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(dev, sonypi_device.ioport2);
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(fn, sonypi_device.ioport1);
+	wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+	outb(v, sonypi_device.ioport1);
+	v1 = inb_p(sonypi_device.ioport1);
+	return v1;
+}
+
+#if 0
+/* Get brightness, hue etc. Unreliable... */
+static u8 sonypi_read(u8 fn)
+{
+	u8 v1, v2;
+	int n = 100;
+
+	while (n--) {
+		v1 = sonypi_call2(0x8f, fn);
+		v2 = sonypi_call2(0x8f, fn);
+		if (v1 == v2 && v1 != 0xff)
+			return v1;
+	}
+	return 0xff;
+}
+#endif
+
+/* Set brightness, hue etc */
+static void sonypi_set(u8 fn, u8 v)
+{
+	wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT);
+}
+
+/* Tests if the camera is ready */
+static int sonypi_camera_ready(void)
+{
+	u8 v;
+
+	v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS);
+	return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
+}
+
+/* Turns the camera off */
+static void sonypi_camera_off(void)
+{
+	sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK);
+
+	if (!sonypi_device.camera_power)
+		return;
+
+	sonypi_call2(0x91, 0);
+	sonypi_device.camera_power = 0;
+}
+
+/* Turns the camera on */
+static void sonypi_camera_on(void)
+{
+	int i, j;
+
+	if (sonypi_device.camera_power)
+		return;
+
+	for (j = 5; j > 0; j--) {
+
+		while (sonypi_call2(0x91, 0x1))
+			msleep(10);
+		sonypi_call1(0x93);
+
+		for (i = 400; i > 0; i--) {
+			if (sonypi_camera_ready())
+				break;
+			msleep(10);
+		}
+		if (i)
+			break;
+	}
+
+	if (j == 0) {
+		printk(KERN_WARNING "sonypi: failed to power on camera\n");
+		return;
+	}
+
+	sonypi_set(0x10, 0x5a);
+	sonypi_device.camera_power = 1;
+}
+
+/* sets the bluetooth subsystem power state */
+static void sonypi_setbluetoothpower(u8 state)
+{
+	state = !!state;
+
+	if (sonypi_device.bluetooth_power == state)
+		return;
+
+	sonypi_call2(0x96, state);
+	sonypi_call1(0x82);
+	sonypi_device.bluetooth_power = state;
+}
+
+static void input_keyrelease(struct work_struct *work)
+{
+	struct sonypi_keypress kp;
+
+	while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
+			 sizeof(kp), &sonypi_device.input_fifo_lock)
+			== sizeof(kp)) {
+		msleep(10);
+		input_report_key(kp.dev, kp.key, 0);
+		input_sync(kp.dev);
+	}
+}
+
+static void sonypi_report_input_event(u8 event)
+{
+	struct input_dev *jog_dev = sonypi_device.input_jog_dev;
+	struct input_dev *key_dev = sonypi_device.input_key_dev;
+	struct sonypi_keypress kp = { NULL };
+	int i;
+
+	switch (event) {
+	case SONYPI_EVENT_JOGDIAL_UP:
+	case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
+		input_report_rel(jog_dev, REL_WHEEL, 1);
+		input_sync(jog_dev);
+		break;
+
+	case SONYPI_EVENT_JOGDIAL_DOWN:
+	case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
+		input_report_rel(jog_dev, REL_WHEEL, -1);
+		input_sync(jog_dev);
+		break;
+
+	case SONYPI_EVENT_JOGDIAL_PRESSED:
+		kp.key = BTN_MIDDLE;
+		kp.dev = jog_dev;
+		break;
+
+	case SONYPI_EVENT_FNKEY_RELEASED:
+		/* Nothing, not all VAIOs generate this event */
+		break;
+
+	default:
+		for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+			if (event == sonypi_inputkeys[i].sonypiev) {
+				kp.dev = key_dev;
+				kp.key = sonypi_inputkeys[i].inputev;
+				break;
+			}
+		break;
+	}
+
+	if (kp.dev) {
+		input_report_key(kp.dev, kp.key, 1);
+		input_sync(kp.dev);
+		kfifo_in_locked(&sonypi_device.input_fifo,
+			(unsigned char *)&kp, sizeof(kp),
+			&sonypi_device.input_fifo_lock);
+		schedule_work(&sonypi_device.input_work);
+	}
+}
+
+/* Interrupt handler: some event is available */
+static irqreturn_t sonypi_irq(int irq, void *dev_id)
+{
+	u8 v1, v2, event = 0;
+	int i, j;
+
+	v1 = inb_p(sonypi_device.ioport1);
+	v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset);
+
+	for (i = 0; sonypi_eventtypes[i].model; i++) {
+		if (sonypi_device.model != sonypi_eventtypes[i].model)
+			continue;
+		if ((v2 & sonypi_eventtypes[i].data) !=
+		    sonypi_eventtypes[i].data)
+			continue;
+		if (!(mask & sonypi_eventtypes[i].mask))
+			continue;
+		for (j = 0; sonypi_eventtypes[i].events[j].event; j++) {
+			if (v1 == sonypi_eventtypes[i].events[j].data) {
+				event = sonypi_eventtypes[i].events[j].event;
+				goto found;
+			}
+		}
+	}
+
+	if (verbose)
+		printk(KERN_WARNING
+		       "sonypi: unknown event port1=0x%02x,port2=0x%02x\n",
+		       v1, v2);
+	/* We need to return IRQ_HANDLED here because there *are*
+	 * events belonging to the sonypi device we don't know about,
+	 * but we still don't want those to pollute the logs... */
+	return IRQ_HANDLED;
+
+found:
+	if (verbose > 1)
+		printk(KERN_INFO
+		       "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2);
+
+	if (useinput)
+		sonypi_report_input_event(event);
+
+	kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
+			sizeof(event), &sonypi_device.fifo_lock);
+	kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
+	wake_up_interruptible(&sonypi_device.fifo_proc_list);
+
+	return IRQ_HANDLED;
+}
+
+static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+{
+	return fasync_helper(fd, filp, on, &sonypi_device.fifo_async);
+}
+
+static int sonypi_misc_release(struct inode *inode, struct file *file)
+{
+	mutex_lock(&sonypi_device.lock);
+	sonypi_device.open_count--;
+	mutex_unlock(&sonypi_device.lock);
+	return 0;
+}
+
+static int sonypi_misc_open(struct inode *inode, struct file *file)
+{
+	mutex_lock(&sonypi_device.lock);
+	/* Flush input queue on first open */
+	if (!sonypi_device.open_count)
+		kfifo_reset(&sonypi_device.fifo);
+	sonypi_device.open_count++;
+	mutex_unlock(&sonypi_device.lock);
+
+	return 0;
+}
+
+static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	ssize_t ret;
+	unsigned char c;
+
+	if ((kfifo_len(&sonypi_device.fifo) == 0) &&
+	    (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
+				       kfifo_len(&sonypi_device.fifo) != 0);
+	if (ret)
+		return ret;
+
+	while (ret < count &&
+	       (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
+				 &sonypi_device.fifo_lock) == sizeof(c))) {
+		if (put_user(c, buf++))
+			return -EFAULT;
+		ret++;
+	}
+
+	if (ret > 0) {
+		struct inode *inode = file_inode(file);
+		inode->i_atime = current_time(inode);
+	}
+
+	return ret;
+}
+
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
+{
+	poll_wait(file, &sonypi_device.fifo_proc_list, wait);
+	if (kfifo_len(&sonypi_device.fifo))
+		return EPOLLIN | EPOLLRDNORM;
+	return 0;
+}
+
+static long sonypi_misc_ioctl(struct file *fp,
+			     unsigned int cmd, unsigned long arg)
+{
+	long ret = 0;
+	void __user *argp = (void __user *)arg;
+	u8 val8;
+	u16 val16;
+
+	mutex_lock(&sonypi_device.lock);
+	switch (cmd) {
+	case SONYPI_IOCGBRT:
+		if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCSBRT:
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8))
+			ret = -EIO;
+		break;
+	case SONYPI_IOCGBAT1CAP:
+		if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT1REM:
+		if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT2CAP:
+		if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT2REM:
+		if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBATFLAGS:
+		if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		val8 &= 0x07;
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBLUE:
+		val8 = sonypi_device.bluetooth_power;
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCSBLUE:
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		sonypi_setbluetoothpower(val8);
+		break;
+	/* FAN Controls */
+	case SONYPI_IOCGFAN:
+		if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCSFAN:
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8))
+			ret = -EIO;
+		break;
+	/* GET Temperature (useful under APM) */
+	case SONYPI_IOCGTEMP:
+		if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	mutex_unlock(&sonypi_device.lock);
+	return ret;
+}
+
+static const struct file_operations sonypi_misc_fops = {
+	.owner		= THIS_MODULE,
+	.read		= sonypi_misc_read,
+	.poll		= sonypi_misc_poll,
+	.open		= sonypi_misc_open,
+	.release	= sonypi_misc_release,
+	.fasync		= sonypi_misc_fasync,
+	.unlocked_ioctl	= sonypi_misc_ioctl,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice sonypi_misc_device = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "sonypi",
+	.fops		= &sonypi_misc_fops,
+};
+
+static void sonypi_enable(unsigned int camera_on)
+{
+	switch (sonypi_device.model) {
+	case SONYPI_DEVICE_MODEL_TYPE1:
+		sonypi_type1_srs();
+		break;
+	case SONYPI_DEVICE_MODEL_TYPE2:
+		sonypi_type2_srs();
+		break;
+	case SONYPI_DEVICE_MODEL_TYPE3:
+		sonypi_type3_srs();
+		break;
+	}
+
+	sonypi_call1(0x82);
+	sonypi_call2(0x81, 0xff);
+	sonypi_call1(compat ? 0x92 : 0x82);
+
+	/* Enable ACPI mode to get Fn key events */
+	if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+		outb(0xf0, 0xb2);
+
+	if (camera && camera_on)
+		sonypi_camera_on();
+}
+
+static int sonypi_disable(void)
+{
+	sonypi_call2(0x81, 0);	/* make sure we don't get any more events */
+	if (camera)
+		sonypi_camera_off();
+
+	/* disable ACPI mode */
+	if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+		outb(0xf1, 0xb2);
+
+	switch (sonypi_device.model) {
+	case SONYPI_DEVICE_MODEL_TYPE1:
+		sonypi_type1_dis();
+		break;
+	case SONYPI_DEVICE_MODEL_TYPE2:
+		sonypi_type2_dis();
+		break;
+	case SONYPI_DEVICE_MODEL_TYPE3:
+		sonypi_type3_dis();
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int sonypi_acpi_add(struct acpi_device *device)
+{
+	sonypi_acpi_device = device;
+	strcpy(acpi_device_name(device), "Sony laptop hotkeys");
+	strcpy(acpi_device_class(device), "sony/hotkey");
+	return 0;
+}
+
+static int sonypi_acpi_remove(struct acpi_device *device)
+{
+	sonypi_acpi_device = NULL;
+	return 0;
+}
+
+static const struct acpi_device_id sonypi_device_ids[] = {
+	{"SNY6001", 0},
+	{"", 0},
+};
+
+static struct acpi_driver sonypi_acpi_driver = {
+	.name           = "sonypi",
+	.class          = "hkey",
+	.ids            = sonypi_device_ids,
+	.ops            = {
+		           .add = sonypi_acpi_add,
+			   .remove = sonypi_acpi_remove,
+	},
+};
+#endif
+
+static int sonypi_create_input_devices(struct platform_device *pdev)
+{
+	struct input_dev *jog_dev;
+	struct input_dev *key_dev;
+	int i;
+	int error;
+
+	sonypi_device.input_jog_dev = jog_dev = input_allocate_device();
+	if (!jog_dev)
+		return -ENOMEM;
+
+	jog_dev->name = "Sony Vaio Jogdial";
+	jog_dev->id.bustype = BUS_ISA;
+	jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
+	jog_dev->dev.parent = &pdev->dev;
+
+	jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+	jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
+	jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
+
+	sonypi_device.input_key_dev = key_dev = input_allocate_device();
+	if (!key_dev) {
+		error = -ENOMEM;
+		goto err_free_jogdev;
+	}
+
+	key_dev->name = "Sony Vaio Keys";
+	key_dev->id.bustype = BUS_ISA;
+	key_dev->id.vendor = PCI_VENDOR_ID_SONY;
+	key_dev->dev.parent = &pdev->dev;
+
+	/* Initialize the Input Drivers: special keys */
+	key_dev->evbit[0] = BIT_MASK(EV_KEY);
+	for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+		if (sonypi_inputkeys[i].inputev)
+			set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit);
+
+	error = input_register_device(jog_dev);
+	if (error)
+		goto err_free_keydev;
+
+	error = input_register_device(key_dev);
+	if (error)
+		goto err_unregister_jogdev;
+
+	return 0;
+
+ err_unregister_jogdev:
+	input_unregister_device(jog_dev);
+	/* Set to NULL so we don't free it again below */
+	jog_dev = NULL;
+ err_free_keydev:
+	input_free_device(key_dev);
+	sonypi_device.input_key_dev = NULL;
+ err_free_jogdev:
+	input_free_device(jog_dev);
+	sonypi_device.input_jog_dev = NULL;
+
+	return error;
+}
+
+static int sonypi_setup_ioports(struct sonypi_device *dev,
+				const struct sonypi_ioport_list *ioport_list)
+{
+	/* try to detect if sony-laptop is being used and thus
+	 * has already requested one of the known ioports.
+	 * As in the deprecated check_region this is racy has we have
+	 * multiple ioports available and one of them can be requested
+	 * between this check and the subsequent request. Anyway, as an
+	 * attempt to be some more user-friendly as we currently are,
+	 * this is enough.
+	 */
+	const struct sonypi_ioport_list *check = ioport_list;
+	while (check_ioport && check->port1) {
+		if (!request_region(check->port1,
+				   sonypi_device.region_size,
+				   "Sony Programmable I/O Device Check")) {
+			printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? "
+					"if not use check_ioport=0\n",
+					check->port1);
+			return -EBUSY;
+		}
+		release_region(check->port1, sonypi_device.region_size);
+		check++;
+	}
+
+	while (ioport_list->port1) {
+
+		if (request_region(ioport_list->port1,
+				   sonypi_device.region_size,
+				   "Sony Programmable I/O Device")) {
+			dev->ioport1 = ioport_list->port1;
+			dev->ioport2 = ioport_list->port2;
+			return 0;
+		}
+		ioport_list++;
+	}
+
+	return -EBUSY;
+}
+
+static int sonypi_setup_irq(struct sonypi_device *dev,
+				      const struct sonypi_irq_list *irq_list)
+{
+	while (irq_list->irq) {
+
+		if (!request_irq(irq_list->irq, sonypi_irq,
+				 IRQF_SHARED, "sonypi", sonypi_irq)) {
+			dev->irq = irq_list->irq;
+			dev->bits = irq_list->bits;
+			return 0;
+		}
+		irq_list++;
+	}
+
+	return -EBUSY;
+}
+
+static void sonypi_display_info(void)
+{
+	printk(KERN_INFO "sonypi: detected type%d model, "
+	       "verbose = %d, fnkeyinit = %s, camera = %s, "
+	       "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
+	       sonypi_device.model,
+	       verbose,
+	       fnkeyinit ? "on" : "off",
+	       camera ? "on" : "off",
+	       compat ? "on" : "off",
+	       mask,
+	       useinput ? "on" : "off",
+	       SONYPI_ACPI_ACTIVE ? "on" : "off");
+	printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
+	       sonypi_device.irq,
+	       sonypi_device.ioport1, sonypi_device.ioport2);
+
+	if (minor == -1)
+		printk(KERN_INFO "sonypi: device allocated minor is %d\n",
+		       sonypi_misc_device.minor);
+}
+
+static int sonypi_probe(struct platform_device *dev)
+{
+	const struct sonypi_ioport_list *ioport_list;
+	const struct sonypi_irq_list *irq_list;
+	struct pci_dev *pcidev;
+	int error;
+
+	printk(KERN_WARNING "sonypi: please try the sony-laptop module instead "
+			"and report failures, see also "
+			"http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
+
+	spin_lock_init(&sonypi_device.fifo_lock);
+	error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
+	if (error) {
+		printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+		return error;
+	}
+
+	init_waitqueue_head(&sonypi_device.fifo_proc_list);
+	mutex_init(&sonypi_device.lock);
+	sonypi_device.bluetooth_power = -1;
+
+	if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				     PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
+		sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
+	else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					  PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
+		sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+	else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					  PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
+		sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+	else
+		sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
+
+	if (pcidev && pci_enable_device(pcidev)) {
+		printk(KERN_ERR "sonypi: pci_enable_device failed\n");
+		error = -EIO;
+		goto err_put_pcidev;
+	}
+
+	sonypi_device.dev = pcidev;
+
+	if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
+		ioport_list = sonypi_type1_ioport_list;
+		sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
+		sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
+		irq_list = sonypi_type1_irq_list;
+	} else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
+		ioport_list = sonypi_type2_ioport_list;
+		sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
+		sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
+		irq_list = sonypi_type2_irq_list;
+	} else {
+		ioport_list = sonypi_type3_ioport_list;
+		sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
+		sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
+		irq_list = sonypi_type3_irq_list;
+	}
+
+	error = sonypi_setup_ioports(&sonypi_device, ioport_list);
+	if (error) {
+		printk(KERN_ERR "sonypi: failed to request ioports\n");
+		goto err_disable_pcidev;
+	}
+
+	error = sonypi_setup_irq(&sonypi_device, irq_list);
+	if (error) {
+		printk(KERN_ERR "sonypi: request_irq failed\n");
+		goto err_free_ioports;
+	}
+
+	if (minor != -1)
+		sonypi_misc_device.minor = minor;
+	error = misc_register(&sonypi_misc_device);
+	if (error) {
+		printk(KERN_ERR "sonypi: misc_register failed\n");
+		goto err_free_irq;
+	}
+
+	sonypi_display_info();
+
+	if (useinput) {
+
+		error = sonypi_create_input_devices(dev);
+		if (error) {
+			printk(KERN_ERR
+				"sonypi: failed to create input devices\n");
+			goto err_miscdev_unregister;
+		}
+
+		spin_lock_init(&sonypi_device.input_fifo_lock);
+		error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+				GFP_KERNEL);
+		if (error) {
+			printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+			goto err_inpdev_unregister;
+		}
+
+		INIT_WORK(&sonypi_device.input_work, input_keyrelease);
+	}
+
+	sonypi_enable(0);
+
+	return 0;
+
+ err_inpdev_unregister:
+	input_unregister_device(sonypi_device.input_key_dev);
+	input_unregister_device(sonypi_device.input_jog_dev);
+ err_miscdev_unregister:
+	misc_deregister(&sonypi_misc_device);
+ err_free_irq:
+	free_irq(sonypi_device.irq, sonypi_irq);
+ err_free_ioports:
+	release_region(sonypi_device.ioport1, sonypi_device.region_size);
+ err_disable_pcidev:
+	if (pcidev)
+		pci_disable_device(pcidev);
+ err_put_pcidev:
+	pci_dev_put(pcidev);
+	kfifo_free(&sonypi_device.fifo);
+
+	return error;
+}
+
+static int sonypi_remove(struct platform_device *dev)
+{
+	sonypi_disable();
+
+	synchronize_irq(sonypi_device.irq);
+	flush_work(&sonypi_device.input_work);
+
+	if (useinput) {
+		input_unregister_device(sonypi_device.input_key_dev);
+		input_unregister_device(sonypi_device.input_jog_dev);
+		kfifo_free(&sonypi_device.input_fifo);
+	}
+
+	misc_deregister(&sonypi_misc_device);
+
+	free_irq(sonypi_device.irq, sonypi_irq);
+	release_region(sonypi_device.ioport1, sonypi_device.region_size);
+
+	if (sonypi_device.dev) {
+		pci_disable_device(sonypi_device.dev);
+		pci_dev_put(sonypi_device.dev);
+	}
+
+	kfifo_free(&sonypi_device.fifo);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int old_camera_power;
+
+static int sonypi_suspend(struct device *dev)
+{
+	old_camera_power = sonypi_device.camera_power;
+	sonypi_disable();
+
+	return 0;
+}
+
+static int sonypi_resume(struct device *dev)
+{
+	sonypi_enable(old_camera_power);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
+#define SONYPI_PM	(&sonypi_pm)
+#else
+#define SONYPI_PM	NULL
+#endif
+
+static void sonypi_shutdown(struct platform_device *dev)
+{
+	sonypi_disable();
+}
+
+static struct platform_driver sonypi_driver = {
+	.driver		= {
+		.name	= "sonypi",
+		.pm	= SONYPI_PM,
+	},
+	.probe		= sonypi_probe,
+	.remove		= sonypi_remove,
+	.shutdown	= sonypi_shutdown,
+};
+
+static struct platform_device *sonypi_platform_device;
+
+static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
+	{
+		.ident = "Sony Vaio",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
+		},
+	},
+	{
+		.ident = "Sony Vaio",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
+		},
+	},
+	{ }
+};
+
+static int __init sonypi_init(void)
+{
+	int error;
+
+	printk(KERN_INFO
+		"sonypi: Sony Programmable I/O Controller Driver v%s.\n",
+		SONYPI_DRIVER_VERSION);
+
+	if (!dmi_check_system(sonypi_dmi_table))
+		return -ENODEV;
+
+	error = platform_driver_register(&sonypi_driver);
+	if (error)
+		return error;
+
+	sonypi_platform_device = platform_device_alloc("sonypi", -1);
+	if (!sonypi_platform_device) {
+		error = -ENOMEM;
+		goto err_driver_unregister;
+	}
+
+	error = platform_device_add(sonypi_platform_device);
+	if (error)
+		goto err_free_device;
+
+#ifdef CONFIG_ACPI
+	if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
+		acpi_driver_registered = 1;
+#endif
+
+	return 0;
+
+ err_free_device:
+	platform_device_put(sonypi_platform_device);
+ err_driver_unregister:
+	platform_driver_unregister(&sonypi_driver);
+	return error;
+}
+
+static void __exit sonypi_exit(void)
+{
+#ifdef CONFIG_ACPI
+	if (acpi_driver_registered)
+		acpi_bus_unregister_driver(&sonypi_acpi_driver);
+#endif
+	platform_device_unregister(sonypi_platform_device);
+	platform_driver_unregister(&sonypi_driver);
+	printk(KERN_INFO "sonypi: removed.\n");
+}
+
+module_init(sonypi_init);
+module_exit(sonypi_exit);
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
new file mode 100644
index 0000000..7c19d9b
--- /dev/null
+++ b/drivers/char/tb0219.c
@@ -0,0 +1,372 @@
+/*
+ *  Driver for TANBAC TB0219 base board.
+ *
+ *  Copyright (C) 2005  Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/tb0219.h>
+
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
+MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
+MODULE_LICENSE("GPL");
+
+static int major;	/* default is dynamic major device number */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static void (*old_machine_restart)(char *command);
+static void __iomem *tb0219_base;
+static DEFINE_SPINLOCK(tb0219_lock);
+
+#define tb0219_read(offset)		readw(tb0219_base + (offset))
+#define tb0219_write(offset, value)	writew((value), tb0219_base + (offset))
+
+#define TB0219_START	0x0a000000UL
+#define TB0219_SIZE	0x20UL
+
+#define TB0219_LED			0x00
+#define TB0219_GPIO_INPUT		0x02
+#define TB0219_GPIO_OUTPUT		0x04
+#define TB0219_DIP_SWITCH		0x06
+#define TB0219_MISC			0x08
+#define TB0219_RESET			0x0e
+#define TB0219_PCI_SLOT1_IRQ_STATUS	0x10
+#define TB0219_PCI_SLOT2_IRQ_STATUS	0x12
+#define TB0219_PCI_SLOT3_IRQ_STATUS	0x14
+
+typedef enum {
+	TYPE_LED,
+	TYPE_GPIO_OUTPUT,
+} tb0219_type_t;
+
+/*
+ * Minor device number
+ *	 0 = 7 segment LED
+ *
+ *	16 = GPIO IN 0
+ *	17 = GPIO IN 1
+ *	18 = GPIO IN 2
+ *	19 = GPIO IN 3
+ *	20 = GPIO IN 4
+ *	21 = GPIO IN 5
+ *	22 = GPIO IN 6
+ *	23 = GPIO IN 7
+ *
+ *	32 = GPIO OUT 0
+ *	33 = GPIO OUT 1
+ *	34 = GPIO OUT 2
+ *	35 = GPIO OUT 3
+ *	36 = GPIO OUT 4
+ *	37 = GPIO OUT 5
+ *	38 = GPIO OUT 6
+ *	39 = GPIO OUT 7
+ *
+ *	48 = DIP switch 1
+ *	49 = DIP switch 2
+ *	50 = DIP switch 3
+ *	51 = DIP switch 4
+ *	52 = DIP switch 5
+ *	53 = DIP switch 6
+ *	54 = DIP switch 7
+ *	55 = DIP switch 8
+ */
+
+static inline char get_led(void)
+{
+	return (char)tb0219_read(TB0219_LED);
+}
+
+static inline char get_gpio_input_pin(unsigned int pin)
+{
+	uint16_t values;
+
+	values = tb0219_read(TB0219_GPIO_INPUT);
+	if (values & (1 << pin))
+		return '1';
+
+	return '0';
+}
+
+static inline char get_gpio_output_pin(unsigned int pin)
+{
+	uint16_t values;
+
+	values = tb0219_read(TB0219_GPIO_OUTPUT);
+	if (values & (1 << pin))
+		return '1';
+
+	return '0';
+}
+
+static inline char get_dip_switch(unsigned int pin)
+{
+	uint16_t values;
+
+	values = tb0219_read(TB0219_DIP_SWITCH);
+	if (values & (1 << pin))
+		return '1';
+
+	return '0';
+}
+
+static inline int set_led(char command)
+{
+	tb0219_write(TB0219_LED, command);
+
+	return 0;
+}
+
+static inline int set_gpio_output_pin(unsigned int pin, char command)
+{
+	unsigned long flags;
+	uint16_t value;
+
+	if (command != '0' && command != '1')
+		return -EINVAL;
+
+	spin_lock_irqsave(&tb0219_lock, flags);
+	value = tb0219_read(TB0219_GPIO_OUTPUT);
+	if (command == '0')
+		value &= ~(1 << pin);
+	else
+		value |= 1 << pin;
+	tb0219_write(TB0219_GPIO_OUTPUT, value);
+	spin_unlock_irqrestore(&tb0219_lock, flags);
+
+	return 0;
+
+}
+
+static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len,
+                                  loff_t *ppos)
+{
+	unsigned int minor;
+	char value;
+
+	minor = iminor(file_inode(file));
+	switch (minor) {
+	case 0:
+		value = get_led();
+		break;
+	case 16 ... 23:
+		value = get_gpio_input_pin(minor - 16);
+		break;
+	case 32 ... 39:
+		value = get_gpio_output_pin(minor - 32);
+		break;
+	case 48 ... 55:
+		value = get_dip_switch(minor - 48);
+		break;
+	default:
+		return -EBADF;
+	}
+
+	if (len <= 0)
+		return -EFAULT;
+
+	if (put_user(value, buf))
+		return -EFAULT;
+
+	return 1;
+}
+
+static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
+                                   size_t len, loff_t *ppos)
+{
+	unsigned int minor;
+	tb0219_type_t type;
+	size_t i;
+	int retval = 0;
+	char c;
+
+	minor = iminor(file_inode(file));
+	switch (minor) {
+	case 0:
+		type = TYPE_LED;
+		break;
+	case 32 ... 39:
+		type = TYPE_GPIO_OUTPUT;
+		break;
+	default:
+		return -EBADF;
+	}
+
+	for (i = 0; i < len; i++) {
+		if (get_user(c, data + i))
+			return -EFAULT;
+
+		switch (type) {
+		case TYPE_LED:
+			retval = set_led(c);
+			break;
+		case TYPE_GPIO_OUTPUT:
+			retval = set_gpio_output_pin(minor - 32, c);
+			break;
+		}
+
+		if (retval < 0)
+			break;
+	}
+
+	return i;
+}
+
+static int tanbac_tb0219_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor;
+
+	minor = iminor(inode);
+	switch (minor) {
+	case 0:
+	case 16 ... 23:
+	case 32 ... 39:
+	case 48 ... 55:
+		return nonseekable_open(inode, file);
+	default:
+		break;
+	}
+
+	return -EBADF;
+}
+
+static int tanbac_tb0219_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations tb0219_fops = {
+	.owner		= THIS_MODULE,
+	.read		= tanbac_tb0219_read,
+	.write		= tanbac_tb0219_write,
+	.open		= tanbac_tb0219_open,
+	.release	= tanbac_tb0219_release,
+	.llseek		= no_llseek,
+};
+
+static void tb0219_restart(char *command)
+{
+	tb0219_write(TB0219_RESET, 0);
+}
+
+static void tb0219_pci_irq_init(void)
+{
+	/* PCI Slot 1 */
+	vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+	vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW);
+
+	/* PCI Slot 2 */
+	vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+	vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW);
+
+	/* PCI Slot 3 */
+	vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+	vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
+}
+
+static int tb0219_probe(struct platform_device *dev)
+{
+	int retval;
+
+	if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL)
+		return -EBUSY;
+
+	tb0219_base = ioremap(TB0219_START, TB0219_SIZE);
+	if (tb0219_base == NULL) {
+		release_mem_region(TB0219_START, TB0219_SIZE);
+		return -ENOMEM;
+	}
+
+	retval = register_chrdev(major, "TB0219", &tb0219_fops);
+	if (retval < 0) {
+		iounmap(tb0219_base);
+		tb0219_base = NULL;
+		release_mem_region(TB0219_START, TB0219_SIZE);
+		return retval;
+	}
+
+	old_machine_restart = _machine_restart;
+	_machine_restart = tb0219_restart;
+
+	tb0219_pci_irq_init();
+
+	if (major == 0) {
+		major = retval;
+		printk(KERN_INFO "TB0219: major number %d\n", major);
+	}
+
+	return 0;
+}
+
+static int tb0219_remove(struct platform_device *dev)
+{
+	_machine_restart = old_machine_restart;
+
+	iounmap(tb0219_base);
+	tb0219_base = NULL;
+
+	release_mem_region(TB0219_START, TB0219_SIZE);
+
+	return 0;
+}
+
+static struct platform_device *tb0219_platform_device;
+
+static struct platform_driver tb0219_device_driver = {
+	.probe		= tb0219_probe,
+	.remove		= tb0219_remove,
+	.driver		= {
+		.name	= "TB0219",
+	},
+};
+
+static int __init tanbac_tb0219_init(void)
+{
+	int retval;
+
+	tb0219_platform_device = platform_device_alloc("TB0219", -1);
+	if (!tb0219_platform_device)
+		return -ENOMEM;
+
+	retval = platform_device_add(tb0219_platform_device);
+	if (retval < 0) {
+		platform_device_put(tb0219_platform_device);
+		return retval;
+	}
+
+	retval = platform_driver_register(&tb0219_device_driver);
+	if (retval < 0)
+		platform_device_unregister(tb0219_platform_device);
+
+	return retval;
+}
+
+static void __exit tanbac_tb0219_exit(void)
+{
+	platform_driver_unregister(&tb0219_device_driver);
+	platform_device_unregister(tb0219_platform_device);
+}
+
+module_init(tanbac_tb0219_init);
+module_exit(tanbac_tb0219_exit);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
new file mode 100644
index 0000000..8eeb419
--- /dev/null
+++ b/drivers/char/tlclk.c
@@ -0,0 +1,935 @@
+/*
+ * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010
+ *
+ * Copyright (C) 2005 Kontron Canada
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <sebastien.bouchard@ca.kontron.com> and the current
+ * Maintainer  <mark.gross@intel.com>
+ *
+ * Description : This is the TELECOM CLOCK module driver for the ATCA
+ * MPCBL0010 ATCA computer.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>	/* printk() */
+#include <linux/fs.h>		/* everything... */
+#include <linux/errno.h>	/* error codes */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>		/* inb/outb */
+#include <linux/uaccess.h>
+
+MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
+MODULE_LICENSE("GPL");
+
+/*Hardware Reset of the PLL */
+#define RESET_ON	0x00
+#define RESET_OFF	0x01
+
+/* MODE SELECT */
+#define NORMAL_MODE 	0x00
+#define HOLDOVER_MODE	0x10
+#define FREERUN_MODE	0x20
+
+/* FILTER SELECT */
+#define FILTER_6HZ	0x04
+#define FILTER_12HZ	0x00
+
+/* SELECT REFERENCE FREQUENCY */
+#define REF_CLK1_8kHz		0x00
+#define REF_CLK2_19_44MHz	0x02
+
+/* Select primary or secondary redundant clock */
+#define PRIMARY_CLOCK	0x00
+#define SECONDARY_CLOCK	0x01
+
+/* CLOCK TRANSMISSION DEFINE */
+#define CLK_8kHz	0xff
+#define CLK_16_384MHz	0xfb
+
+#define CLK_1_544MHz	0x00
+#define CLK_2_048MHz	0x01
+#define CLK_4_096MHz	0x02
+#define CLK_6_312MHz	0x03
+#define CLK_8_192MHz	0x04
+#define CLK_19_440MHz	0x06
+
+#define CLK_8_592MHz	0x08
+#define CLK_11_184MHz	0x09
+#define CLK_34_368MHz	0x0b
+#define CLK_44_736MHz	0x0a
+
+/* RECEIVED REFERENCE */
+#define AMC_B1 0
+#define AMC_B2 1
+
+/* HARDWARE SWITCHING DEFINE */
+#define HW_ENABLE	0x80
+#define HW_DISABLE	0x00
+
+/* HARDWARE SWITCHING MODE DEFINE */
+#define PLL_HOLDOVER	0x40
+#define LOST_CLOCK	0x00
+
+/* ALARMS DEFINE */
+#define UNLOCK_MASK	0x10
+#define HOLDOVER_MASK	0x20
+#define SEC_LOST_MASK	0x40
+#define PRI_LOST_MASK	0x80
+
+/* INTERRUPT CAUSE DEFINE */
+
+#define PRI_LOS_01_MASK		0x01
+#define PRI_LOS_10_MASK		0x02
+
+#define SEC_LOS_01_MASK		0x04
+#define SEC_LOS_10_MASK		0x08
+
+#define HOLDOVER_01_MASK	0x10
+#define HOLDOVER_10_MASK	0x20
+
+#define UNLOCK_01_MASK		0x40
+#define UNLOCK_10_MASK		0x80
+
+struct tlclk_alarms {
+	__u32 lost_clocks;
+	__u32 lost_primary_clock;
+	__u32 lost_secondary_clock;
+	__u32 primary_clock_back;
+	__u32 secondary_clock_back;
+	__u32 switchover_primary;
+	__u32 switchover_secondary;
+	__u32 pll_holdover;
+	__u32 pll_end_holdover;
+	__u32 pll_lost_sync;
+	__u32 pll_sync;
+};
+/* Telecom clock I/O register definition */
+#define TLCLK_BASE 0xa08
+#define TLCLK_REG0 TLCLK_BASE
+#define TLCLK_REG1 (TLCLK_BASE+1)
+#define TLCLK_REG2 (TLCLK_BASE+2)
+#define TLCLK_REG3 (TLCLK_BASE+3)
+#define TLCLK_REG4 (TLCLK_BASE+4)
+#define TLCLK_REG5 (TLCLK_BASE+5)
+#define TLCLK_REG6 (TLCLK_BASE+6)
+#define TLCLK_REG7 (TLCLK_BASE+7)
+
+#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port)
+
+/* 0 = Dynamic allocation of the major device number */
+#define TLCLK_MAJOR 0
+
+/* sysfs interface definition:
+Upon loading the driver will create a sysfs directory under
+/sys/devices/platform/telco_clock.
+
+This directory exports the following interfaces.  There operation is
+documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
+alarms				:
+current_ref			:
+received_ref_clk3a		:
+received_ref_clk3b		:
+enable_clk3a_output		:
+enable_clk3b_output		:
+enable_clka0_output		:
+enable_clka1_output		:
+enable_clkb0_output		:
+enable_clkb1_output		:
+filter_select			:
+hardware_switching		:
+hardware_switching_mode		:
+telclock_version		:
+mode_select			:
+refalign			:
+reset				:
+select_amcb1_transmit_clock	:
+select_amcb2_transmit_clock	:
+select_redundant_clock		:
+select_ref_frequency		:
+
+All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
+has the same effect as echo 0x99 > refalign.
+*/
+
+static unsigned int telclk_interrupt;
+
+static int int_events;		/* Event that generate a interrupt */
+static int got_event;		/* if events processing have been done */
+
+static void switchover_timeout(struct timer_list *t);
+static struct timer_list switchover_timer;
+static unsigned long tlclk_timer_data;
+
+static struct tlclk_alarms *alarm_events;
+
+static DEFINE_SPINLOCK(event_lock);
+
+static int tlclk_major = TLCLK_MAJOR;
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id);
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static unsigned long useflags;
+static DEFINE_MUTEX(tlclk_mutex);
+
+static int tlclk_open(struct inode *inode, struct file *filp)
+{
+	int result;
+
+	mutex_lock(&tlclk_mutex);
+	if (test_and_set_bit(0, &useflags)) {
+		result = -EBUSY;
+		/* this legacy device is always one per system and it doesn't
+		 * know how to handle multiple concurrent clients.
+		 */
+		goto out;
+	}
+
+	/* Make sure there is no interrupt pending while
+	 * initialising interrupt handler */
+	inb(TLCLK_REG6);
+
+	/* This device is wired through the FPGA IO space of the ATCA blade
+	 * we can't share this IRQ */
+	result = request_irq(telclk_interrupt, &tlclk_interrupt,
+			     0, "telco_clock", tlclk_interrupt);
+	if (result == -EBUSY)
+		printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
+	else
+		inb(TLCLK_REG6);	/* Clear interrupt events */
+
+out:
+	mutex_unlock(&tlclk_mutex);
+	return result;
+}
+
+static int tlclk_release(struct inode *inode, struct file *filp)
+{
+	free_irq(telclk_interrupt, tlclk_interrupt);
+	clear_bit(0, &useflags);
+
+	return 0;
+}
+
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	if (count < sizeof(struct tlclk_alarms))
+		return -EIO;
+	if (mutex_lock_interruptible(&tlclk_mutex))
+		return -EINTR;
+
+
+	wait_event_interruptible(wq, got_event);
+	if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) {
+		mutex_unlock(&tlclk_mutex);
+		return -EFAULT;
+	}
+
+	memset(alarm_events, 0, sizeof(struct tlclk_alarms));
+	got_event = 0;
+
+	mutex_unlock(&tlclk_mutex);
+	return  sizeof(struct tlclk_alarms);
+}
+
+static const struct file_operations tlclk_fops = {
+	.read = tlclk_read,
+	.open = tlclk_open,
+	.release = tlclk_release,
+	.llseek = noop_llseek,
+
+};
+
+static struct miscdevice tlclk_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "telco_clock",
+	.fops = &tlclk_fops,
+};
+
+static ssize_t show_current_ref(struct device *d,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned long ret_val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_lock, flags);
+	ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
+
+
+static ssize_t show_telclock_version(struct device *d,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned long ret_val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_lock, flags);
+	ret_val = inb(TLCLK_REG5);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+		show_telclock_version, NULL);
+
+static ssize_t show_alarms(struct device *d,
+		struct device_attribute *attr,  char *buf)
+{
+	unsigned long ret_val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_lock, flags);
+	ret_val = (inb(TLCLK_REG2) & 0xf0);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static ssize_t store_received_ref_clk3a(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL,
+		store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL,
+		store_received_ref_clk3b);
+
+
+static ssize_t store_enable_clk3b_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clk3b_output);
+
+static ssize_t store_enable_clk3a_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clk3a_output);
+
+static ssize_t store_enable_clkb1_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clkb1_output);
+
+
+static ssize_t store_enable_clka1_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clka1_output);
+
+static ssize_t store_enable_clkb0_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clkb0_output);
+
+static ssize_t store_enable_clka0_output(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG2, 0xfe, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL,
+		store_enable_clka0_output);
+
+static ssize_t store_select_amcb2_transmit_clock(struct device *d,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	unsigned long tmp;
+	unsigned char val;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+		if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+			SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
+			SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+		} else if (val >= CLK_8_592MHz) {
+			SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
+			switch (val) {
+			case CLK_8_592MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+				break;
+			case CLK_11_184MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+				break;
+			case CLK_34_368MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+				break;
+			case CLK_44_736MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+				break;
+			}
+		} else
+			SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
+
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+	store_select_amcb2_transmit_clock);
+
+static ssize_t store_select_amcb1_transmit_clock(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+		if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+			SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
+			SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+		} else if (val >= CLK_8_592MHz) {
+			SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
+			switch (val) {
+			case CLK_8_592MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+				break;
+			case CLK_11_184MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+				break;
+			case CLK_34_368MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+				break;
+			case CLK_44_736MHz:
+				SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+				break;
+			}
+		} else
+			SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+		store_select_amcb1_transmit_clock);
+
+static ssize_t store_select_redundant_clock(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG1, 0xfe, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL,
+		store_select_redundant_clock);
+
+static ssize_t store_select_ref_frequency(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG1, 0xfd, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL,
+		store_select_ref_frequency);
+
+static ssize_t store_filter_select(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG0, 0xfb, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select);
+
+static ssize_t store_hardware_switching_mode(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG0, 0xbf, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL,
+		store_hardware_switching_mode);
+
+static ssize_t store_hardware_switching(struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG0, 0x7f, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL,
+		store_hardware_switching);
+
+static ssize_t store_refalign (struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+	SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
+	SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign);
+
+static ssize_t store_mode_select (struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG0, 0xcf, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select);
+
+static ssize_t store_reset (struct device *d,
+		 struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long tmp;
+	unsigned char val;
+	unsigned long flags;
+
+	sscanf(buf, "%lX", &tmp);
+	dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+	val = (unsigned char)tmp;
+	spin_lock_irqsave(&event_lock, flags);
+	SET_PORT_BITS(TLCLK_REG4, 0xfd, val);
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
+
+static struct attribute *tlclk_sysfs_entries[] = {
+	&dev_attr_current_ref.attr,
+	&dev_attr_telclock_version.attr,
+	&dev_attr_alarms.attr,
+	&dev_attr_received_ref_clk3a.attr,
+	&dev_attr_received_ref_clk3b.attr,
+	&dev_attr_enable_clk3a_output.attr,
+	&dev_attr_enable_clk3b_output.attr,
+	&dev_attr_enable_clkb1_output.attr,
+	&dev_attr_enable_clka1_output.attr,
+	&dev_attr_enable_clkb0_output.attr,
+	&dev_attr_enable_clka0_output.attr,
+	&dev_attr_select_amcb1_transmit_clock.attr,
+	&dev_attr_select_amcb2_transmit_clock.attr,
+	&dev_attr_select_redundant_clock.attr,
+	&dev_attr_select_ref_frequency.attr,
+	&dev_attr_filter_select.attr,
+	&dev_attr_hardware_switching_mode.attr,
+	&dev_attr_hardware_switching.attr,
+	&dev_attr_refalign.attr,
+	&dev_attr_mode_select.attr,
+	&dev_attr_reset.attr,
+	NULL
+};
+
+static const struct attribute_group tlclk_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = tlclk_sysfs_entries,
+};
+
+static struct platform_device *tlclk_device;
+
+static int __init tlclk_init(void)
+{
+	int ret;
+
+	ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
+	if (ret < 0) {
+		printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+		return ret;
+	}
+	tlclk_major = ret;
+	alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+	if (!alarm_events) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	/* Read telecom clock IRQ number (Set by BIOS) */
+	if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
+		printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
+			TLCLK_BASE);
+		ret = -EBUSY;
+		goto out2;
+	}
+	telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+	if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
+		printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
+			telclk_interrupt);
+		ret = -ENXIO;
+		goto out3;
+	}
+
+	timer_setup(&switchover_timer, switchover_timeout, 0);
+
+	ret = misc_register(&tlclk_miscdev);
+	if (ret < 0) {
+		printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret);
+		goto out3;
+	}
+
+	tlclk_device = platform_device_register_simple("telco_clock",
+				-1, NULL, 0);
+	if (IS_ERR(tlclk_device)) {
+		printk(KERN_ERR "tlclk: platform_device_register failed.\n");
+		ret = PTR_ERR(tlclk_device);
+		goto out4;
+	}
+
+	ret = sysfs_create_group(&tlclk_device->dev.kobj,
+			&tlclk_attribute_group);
+	if (ret) {
+		printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
+		goto out5;
+	}
+
+	return 0;
+out5:
+	platform_device_unregister(tlclk_device);
+out4:
+	misc_deregister(&tlclk_miscdev);
+out3:
+	release_region(TLCLK_BASE, 8);
+out2:
+	kfree(alarm_events);
+out1:
+	unregister_chrdev(tlclk_major, "telco_clock");
+	return ret;
+}
+
+static void __exit tlclk_cleanup(void)
+{
+	sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
+	platform_device_unregister(tlclk_device);
+	misc_deregister(&tlclk_miscdev);
+	unregister_chrdev(tlclk_major, "telco_clock");
+
+	release_region(TLCLK_BASE, 8);
+	del_timer_sync(&switchover_timer);
+	kfree(alarm_events);
+
+}
+
+static void switchover_timeout(struct timer_list *unused)
+{
+	unsigned long flags = tlclk_timer_data;
+
+	if ((flags & 1)) {
+		if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+			alarm_events->switchover_primary++;
+	} else {
+		if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+			alarm_events->switchover_secondary++;
+	}
+
+	/* Alarm processing is done, wake up read task */
+	del_timer(&switchover_timer);
+	got_event = 1;
+	wake_up(&wq);
+}
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_lock, flags);
+	/* Read and clear interrupt events */
+	int_events = inb(TLCLK_REG6);
+
+	/* Primary_Los changed from 0 to 1 ? */
+	if (int_events & PRI_LOS_01_MASK) {
+		if (inb(TLCLK_REG2) & SEC_LOST_MASK)
+			alarm_events->lost_clocks++;
+		else
+			alarm_events->lost_primary_clock++;
+	}
+
+	/* Primary_Los changed from 1 to 0 ? */
+	if (int_events & PRI_LOS_10_MASK) {
+		alarm_events->primary_clock_back++;
+		SET_PORT_BITS(TLCLK_REG1, 0xFE, 1);
+	}
+	/* Secondary_Los changed from 0 to 1 ? */
+	if (int_events & SEC_LOS_01_MASK) {
+		if (inb(TLCLK_REG2) & PRI_LOST_MASK)
+			alarm_events->lost_clocks++;
+		else
+			alarm_events->lost_secondary_clock++;
+	}
+	/* Secondary_Los changed from 1 to 0 ? */
+	if (int_events & SEC_LOS_10_MASK) {
+		alarm_events->secondary_clock_back++;
+		SET_PORT_BITS(TLCLK_REG1, 0xFE, 0);
+	}
+	if (int_events & HOLDOVER_10_MASK)
+		alarm_events->pll_end_holdover++;
+
+	if (int_events & UNLOCK_01_MASK)
+		alarm_events->pll_lost_sync++;
+
+	if (int_events & UNLOCK_10_MASK)
+		alarm_events->pll_sync++;
+
+	/* Holdover changed from 0 to 1 ? */
+	if (int_events & HOLDOVER_01_MASK) {
+		alarm_events->pll_holdover++;
+
+		/* TIMEOUT in ~10ms */
+		switchover_timer.expires = jiffies + msecs_to_jiffies(10);
+		tlclk_timer_data = inb(TLCLK_REG1);
+		mod_timer(&switchover_timer, switchover_timer.expires);
+	} else {
+		got_event = 1;
+		wake_up(&wq);
+	}
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+module_init(tlclk_init);
+module_exit(tlclk_cleanup);
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
new file mode 100644
index 0000000..802376f
--- /dev/null
+++ b/drivers/char/toshiba.c
@@ -0,0 +1,533 @@
+/* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops
+ *
+ * Copyright (c) 1996-2001  Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ *
+ * Valuable assistance and patches from:
+ *     Tom May <tom@you-bastards.com>
+ *     Rob Napier <rnapier@employees.org>
+ *
+ * Fn status port numbers for machine ID's courtesy of
+ *     0xfc02: Scott Eisert <scott.e@sky-eye.com>
+ *     0xfc04: Steve VanDevender <stevev@efn.org>
+ *     0xfc08: Garth Berry <garth@itsbruce.net>
+ *     0xfc0a: Egbert Eich <eich@xfree86.org>
+ *     0xfc10: Andrew Lofthouse <Andrew.Lofthouse@robins.af.mil>
+ *     0xfc11: Spencer Olson <solson@novell.com>
+ *     0xfc13: Claudius Frankewitz <kryp@gmx.de>
+ *     0xfc15: Tom May <tom@you-bastards.com>
+ *     0xfc17: Dave Konrad <konrad@xenia.it>
+ *     0xfc1a: George Betzos <betzos@engr.colostate.edu>
+ *     0xfc1b: Munemasa Wada <munemasa@jnovel.co.jp>
+ *     0xfc1d: Arthur Liu <armie@slap.mine.nu>
+ *     0xfc5a: Jacques L'helgoualc'h <lhh@free.fr>
+ *     0xfcd1: Mr. Dave Konrad <konrad@xenia.it>
+ *
+ * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ *
+ *   This code is covered by the GNU GPL and you are free to make any
+ *   changes you wish to it under the terms of the license. However the
+ *   code has the potential to render your computer and/or someone else's
+ *   unusable. Please proceed with care when modifying the code.
+ *
+ * Note: Unfortunately the laptop hardware can close the System Configuration
+ *       Interface on it's own accord. It is therefore necessary for *all*
+ *       programs using this driver to be aware that *any* SCI call can fail at
+ *       *any* time. It is up to any program to be aware of this eventuality
+ *       and take appropriate steps.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The information used to write this driver has been obtained by reverse
+ * engineering the software supplied by Toshiba for their portable computers in
+ * strict accordance with the European Council Directive 92/250/EEC on the legal
+ * protection of computer programs, and it's implementation into English Law by
+ * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233).
+ *
+ */
+
+#define TOSH_VERSION "1.11 26/9/2001"
+#define TOSH_DEBUG 0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/toshiba.h>
+
+#define TOSH_MINOR_DEV 181
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
+MODULE_DESCRIPTION("Toshiba laptop SMM driver");
+MODULE_SUPPORTED_DEVICE("toshiba");
+
+static DEFINE_MUTEX(tosh_mutex);
+static int tosh_fn;
+module_param_named(fn, tosh_fn, int, 0);
+MODULE_PARM_DESC(fn, "User specified Fn key detection port");
+
+static int tosh_id;
+static int tosh_bios;
+static int tosh_date;
+static int tosh_sci;
+static int tosh_fan;
+
+static long tosh_ioctl(struct file *, unsigned int,
+	unsigned long);
+
+
+static const struct file_operations tosh_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= tosh_ioctl,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice tosh_device = {
+	TOSH_MINOR_DEV,
+	"toshiba",
+	&tosh_fops
+};
+
+/*
+ * Read the Fn key status
+ */
+#ifdef CONFIG_PROC_FS
+static int tosh_fn_status(void)
+{
+        unsigned char scan;
+	unsigned long flags;
+
+	if (tosh_fn!=0) {
+		scan = inb(tosh_fn);
+	} else {
+		local_irq_save(flags);
+		outb(0x8e, 0xe4);
+		scan = inb(0xe5);
+		local_irq_restore(flags);
+	}
+
+        return (int) scan;
+}
+#endif
+
+
+/*
+ * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function
+ */
+static int tosh_emulate_fan(SMMRegisters *regs)
+{
+	unsigned long eax,ecx,flags;
+	unsigned char al;
+
+	eax = regs->eax & 0xff00;
+	ecx = regs->ecx & 0xffff;
+
+	/* Portage 610CT */
+
+	if (tosh_id==0xfccb) {
+		if (eax==0xfe00) {
+			/* fan status */
+			local_irq_save(flags);
+			outb(0xbe, 0xe4);
+			al = inb(0xe5);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = (unsigned int) (al & 0x01);
+		}
+		if ((eax==0xff00) && (ecx==0x0000)) {
+			/* fan off */
+			local_irq_save(flags);
+			outb(0xbe, 0xe4);
+			al = inb(0xe5);
+			outb(0xbe, 0xe4);
+			outb (al | 0x01, 0xe5);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = 0x00;
+		}
+		if ((eax==0xff00) && (ecx==0x0001)) {
+			/* fan on */
+			local_irq_save(flags);
+			outb(0xbe, 0xe4);
+			al = inb(0xe5);
+			outb(0xbe, 0xe4);
+			outb(al & 0xfe, 0xe5);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = 0x01;
+		}
+	}
+
+	/* Tecra 700CS/CDT */
+
+	if (tosh_id==0xfccc) {
+		if (eax==0xfe00) {
+			/* fan status */
+			local_irq_save(flags);
+			outb(0xe0, 0xe4);
+			al = inb(0xe5);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = al & 0x01;
+		}
+		if ((eax==0xff00) && (ecx==0x0000)) {
+			/* fan off */
+			local_irq_save(flags);
+			outb(0xe0, 0xe4);
+			al = inb(0xe5);
+			outw(0xe0 | ((al & 0xfe) << 8), 0xe4);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = 0x00;
+		}
+		if ((eax==0xff00) && (ecx==0x0001)) {
+			/* fan on */
+			local_irq_save(flags);
+			outb(0xe0, 0xe4);
+			al = inb(0xe5);
+			outw(0xe0 | ((al | 0x01) << 8), 0xe4);
+			local_irq_restore(flags);
+			regs->eax = 0x00;
+			regs->ecx = 0x01;
+		}
+	}
+
+	return 0;
+}
+
+
+/*
+ * Put the laptop into System Management Mode
+ */
+int tosh_smm(SMMRegisters *regs)
+{
+	int eax;
+
+	asm ("# load the values into the registers\n\t" \
+		"pushl %%eax\n\t" \
+		"movl 0(%%eax),%%edx\n\t" \
+		"push %%edx\n\t" \
+		"movl 4(%%eax),%%ebx\n\t" \
+		"movl 8(%%eax),%%ecx\n\t" \
+		"movl 12(%%eax),%%edx\n\t" \
+		"movl 16(%%eax),%%esi\n\t" \
+		"movl 20(%%eax),%%edi\n\t" \
+		"popl %%eax\n\t" \
+		"# call the System Management mode\n\t" \
+		"inb $0xb2,%%al\n\t"
+		"# fill out the memory with the values in the registers\n\t" \
+		"xchgl %%eax,(%%esp)\n\t"
+		"movl %%ebx,4(%%eax)\n\t" \
+		"movl %%ecx,8(%%eax)\n\t" \
+		"movl %%edx,12(%%eax)\n\t" \
+		"movl %%esi,16(%%eax)\n\t" \
+		"movl %%edi,20(%%eax)\n\t" \
+		"popl %%edx\n\t" \
+		"movl %%edx,0(%%eax)\n\t" \
+		"# setup the return value to the carry flag\n\t" \
+		"lahf\n\t" \
+		"shrl $8,%%eax\n\t" \
+		"andl $1,%%eax\n" \
+		: "=a" (eax)
+		: "a" (regs)
+		: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+
+	return eax;
+}
+EXPORT_SYMBOL(tosh_smm);
+
+
+static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	SMMRegisters regs;
+	SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+	unsigned short ax,bx;
+	int err;
+
+	if (!argp)
+		return -EINVAL;
+
+	if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+		return -EFAULT;
+
+	switch (cmd) {
+		case TOSH_SMM:
+			ax = regs.eax & 0xff00;
+			bx = regs.ebx & 0xffff;
+			/* block HCI calls to read/write memory & PCI devices */
+			if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069))
+				return -EINVAL;
+
+			/* do we need to emulate the fan ? */
+			mutex_lock(&tosh_mutex);
+			if (tosh_fan==1) {
+				if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
+					err = tosh_emulate_fan(&regs);
+					mutex_unlock(&tosh_mutex);
+					break;
+				}
+			}
+			err = tosh_smm(&regs);
+			mutex_unlock(&tosh_mutex);
+			break;
+		default:
+			return -EINVAL;
+	}
+
+        if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+        	return -EFAULT;
+
+	return (err==0) ? 0:-EINVAL;
+}
+
+
+/*
+ * Print the information for /proc/toshiba
+ */
+#ifdef CONFIG_PROC_FS
+static int proc_toshiba_show(struct seq_file *m, void *v)
+{
+	int key;
+
+	key = tosh_fn_status();
+
+	/* Arguments
+	     0) Linux driver version (this will change if format changes)
+	     1) Machine ID
+	     2) SCI version
+	     3) BIOS version (major, minor)
+	     4) BIOS date (in SCI date format)
+	     5) Fn Key status
+	*/
+	seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n",
+		tosh_id,
+		(tosh_sci & 0xff00)>>8,
+		tosh_sci & 0xff,
+		(tosh_bios & 0xff00)>>8,
+		tosh_bios & 0xff,
+		tosh_date,
+		key);
+	return 0;
+}
+#endif
+
+
+/*
+ * Determine which port to use for the Fn key status
+ */
+static void tosh_set_fn_port(void)
+{
+	switch (tosh_id) {
+		case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10:
+		case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b:
+		case 0xfc5a:
+			tosh_fn = 0x62;
+			break;
+		case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0:
+		case 0xfce2:
+			tosh_fn = 0x68;
+			break;
+		default:
+			tosh_fn = 0x00;
+			break;
+	}
+
+	return;
+}
+
+
+/*
+ * Get the machine identification number of the current model
+ */
+static int tosh_get_machine_id(void __iomem *bios)
+{
+	int id;
+	SMMRegisters regs;
+	unsigned short bx,cx;
+	unsigned long address;
+
+	id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa));
+
+	/* do we have a SCTTable machine identication number on our hands */
+
+	if (id==0xfc2f) {
+
+		/* start by getting a pointer into the BIOS */
+
+		regs.eax = 0xc000;
+		regs.ebx = 0x0000;
+		regs.ecx = 0x0000;
+		tosh_smm(&regs);
+		bx = (unsigned short) (regs.ebx & 0xffff);
+
+		/* At this point in the Toshiba routines under MS Windows
+		   the bx register holds 0xe6f5. However my code is producing
+		   a different value! For the time being I will just fudge the
+		   value. This has been verified on a Satellite Pro 430CDT,
+		   Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
+#if TOSH_DEBUG
+		printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
+#endif
+		bx = 0xe6f5;
+
+		/* now twiddle with our pointer a bit */
+
+		address = bx;
+		cx = readw(bios + address);
+		address = 9+bx+cx;
+		cx = readw(bios + address);
+		address = 0xa+cx;
+		cx = readw(bios + address);
+
+		/* now construct our machine identification number */
+
+		id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8);
+	}
+
+	return id;
+}
+
+
+/*
+ * Probe for the presence of a Toshiba laptop
+ *
+ *   returns and non-zero if unable to detect the presence of a Toshiba
+ *   laptop, otherwise zero and determines the Machine ID, BIOS version and
+ *   date, and SCI version.
+ */
+static int tosh_probe(void)
+{
+	int i,major,minor,day,year,month,flag;
+	unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
+	SMMRegisters regs;
+	void __iomem *bios = ioremap(0xf0000, 0x10000);
+
+	if (!bios)
+		return -ENOMEM;
+
+	/* extra sanity check for the string "TOSHIBA" in the BIOS because
+	   some machines that are not Toshiba's pass the next test */
+
+	for (i=0;i<7;i++) {
+		if (readb(bios+0xe010+i)!=signature[i]) {
+			printk("toshiba: not a supported Toshiba laptop\n");
+			iounmap(bios);
+			return -ENODEV;
+		}
+	}
+
+	/* call the Toshiba SCI support check routine */
+
+	regs.eax = 0xf0f0;
+	regs.ebx = 0x0000;
+	regs.ecx = 0x0000;
+	flag = tosh_smm(&regs);
+
+	/* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
+
+	if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
+		printk("toshiba: not a supported Toshiba laptop\n");
+		iounmap(bios);
+		return -ENODEV;
+	}
+
+	/* if we get this far then we are running on a Toshiba (probably)! */
+
+	tosh_sci = regs.edx & 0xffff;
+
+	/* next get the machine ID of the current laptop */
+
+	tosh_id = tosh_get_machine_id(bios);
+
+	/* get the BIOS version */
+
+	major = readb(bios+0xe009)-'0';
+	minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0');
+	tosh_bios = (major*0x100)+minor;
+
+	/* get the BIOS date */
+
+	day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0');
+	month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0');
+	year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0');
+	tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6)
+		| ((day & 0x1f)<<1);
+
+
+	/* in theory we should check the ports we are going to use for the
+	   fn key detection (and the fan on the Portage 610/Tecra700), and
+	   then request them to stop other drivers using them. However as
+	   the keyboard driver grabs 0x60-0x6f and the pic driver grabs
+	   0xa0-0xbf we can't. We just have to live dangerously and use the
+	   ports anyway, oh boy! */
+
+	/* do we need to emulate the fan? */
+
+	if ((tosh_id==0xfccb) || (tosh_id==0xfccc))
+		tosh_fan = 1;
+
+	iounmap(bios);
+
+	return 0;
+}
+
+static int __init toshiba_init(void)
+{
+	int retval;
+	/* are we running on a Toshiba laptop */
+
+	if (tosh_probe())
+		return -ENODEV;
+
+	printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n");
+
+	/* set the port to use for Fn status if not specified as a parameter */
+	if (tosh_fn==0x00)
+		tosh_set_fn_port();
+
+	/* register the device file */
+	retval = misc_register(&tosh_device);
+	if (retval < 0)
+		return retval;
+
+#ifdef CONFIG_PROC_FS
+	{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create_single("toshiba", 0, NULL, proc_toshiba_show);
+		if (!pde) {
+			misc_deregister(&tosh_device);
+			return -ENOMEM;
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static void __exit toshiba_exit(void)
+{
+	remove_proc_entry("toshiba", NULL);
+	misc_deregister(&tosh_device);
+}
+
+module_init(toshiba_init);
+module_exit(toshiba_exit);
+
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
new file mode 100644
index 0000000..18c81cb
--- /dev/null
+++ b/drivers/char/tpm/Kconfig
@@ -0,0 +1,169 @@
+#
+# TPM device configuration
+#
+
+menuconfig TCG_TPM
+	tristate "TPM Hardware Support"
+	depends on HAS_IOMEM
+	select SECURITYFS
+	select CRYPTO
+	select CRYPTO_HASH_INFO
+	---help---
+	  If you have a TPM security chip in your system, which
+	  implements the Trusted Computing Group's specification,
+	  say Yes and it will be accessible from within Linux.  For
+	  more information see <http://www.trustedcomputinggroup.org>. 
+	  An implementation of the Trusted Software Stack (TSS), the 
+	  userspace enablement piece of the specification, can be 
+	  obtained at: <http://sourceforge.net/projects/trousers>.  To 
+	  compile this driver as a module, choose M here; the module 
+	  will be called tpm. If unsure, say N.
+	  Notes:
+	  1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+	  and CONFIG_PNPACPI.
+	  2) Without ACPI enabled, the BIOS event log won't be accessible,
+	  which is required to validate the PCR 0-7 values.
+
+if TCG_TPM
+
+config HW_RANDOM_TPM
+	bool "TPM HW Random Number Generator support"
+	depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+	default y
+	---help---
+	  This setting exposes the TPM's Random Number Generator as a hwrng
+	  device. This allows the kernel to collect randomness from the TPM at
+	  boot, and provides the TPM randomines in /dev/hwrng.
+
+	  If unsure, say Y.
+
+config TCG_TIS_CORE
+	tristate
+	---help---
+	TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks
+	into the TPM kernel APIs. Physical layers will register against it.
+
+config TCG_TIS
+	tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
+	depends on X86 || OF
+	select TCG_TIS_CORE
+	---help---
+	  If you have a TPM security chip that is compliant with the
+	  TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+	  specification (TPM2.0) say Yes and it will be accessible from
+	  within Linux. To compile this driver as a module, choose  M here;
+	  the module will be called tpm_tis.
+
+config TCG_TIS_SPI
+	tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)"
+	depends on SPI
+	select TCG_TIS_CORE
+	---help---
+	  If you have a TPM security chip which is connected to a regular,
+	  non-tcg SPI master (i.e. most embedded platforms) that is compliant with the
+	  TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO
+	  specification (TPM2.0) say Yes and it will be accessible from
+	  within Linux. To compile this driver as a module, choose  M here;
+	  the module will be called tpm_tis_spi.
+
+config TCG_TIS_I2C_ATMEL
+	tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+	depends on I2C
+	---help---
+	  If you have an Atmel I2C TPM security chip say Yes and it will be
+	  accessible from within Linux.
+	  To compile this driver as a module, choose M here; the module will
+	  be called tpm_tis_i2c_atmel.
+
+config TCG_TIS_I2C_INFINEON
+	tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
+	depends on I2C
+	---help---
+	  If you have a TPM security chip that is compliant with the
+	  TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack
+	  Specification 0.20 say Yes and it will be accessible from within
+	  Linux.
+	  To compile this driver as a module, choose M here; the module
+	  will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+	tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+	depends on I2C
+	---help---
+	  If you have a TPM security chip with an I2C interface from
+	  Nuvoton Technology Corp. say Yes and it will be accessible
+	  from within Linux.
+	  To compile this driver as a module, choose M here; the module
+	  will be called tpm_i2c_nuvoton.
+
+config TCG_NSC
+	tristate "National Semiconductor TPM Interface"
+	depends on X86
+	---help---
+	  If you have a TPM security chip from National Semiconductor 
+	  say Yes and it will be accessible from within Linux.  To 
+	  compile this driver as a module, choose M here; the module 
+	  will be called tpm_nsc.
+
+config TCG_ATMEL
+	tristate "Atmel TPM Interface"
+	depends on PPC64 || HAS_IOPORT_MAP
+	---help---
+	  If you have a TPM security chip from Atmel say Yes and it 
+	  will be accessible from within Linux.  To compile this driver 
+	  as a module, choose M here; the module will be called tpm_atmel.
+
+config TCG_INFINEON
+	tristate "Infineon Technologies TPM Interface"
+	depends on PNP
+	---help---
+	  If you have a TPM security chip from Infineon Technologies
+	  (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
+	  will be accessible from within Linux.
+	  To compile this driver as a module, choose M here; the module
+	  will be called tpm_infineon.
+	  Further information on this driver and the supported hardware
+	  can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 
+
+config TCG_IBMVTPM
+	tristate "IBM VTPM Interface"
+	depends on PPC_PSERIES
+	---help---
+	  If you have IBM virtual TPM (VTPM) support say Yes and it
+	  will be accessible from within Linux.  To compile this driver
+	  as a module, choose M here; the module will be called tpm_ibmvtpm.
+
+config TCG_XEN
+	tristate "XEN TPM Interface"
+	depends on TCG_TPM && XEN
+	select XEN_XENBUS_FRONTEND
+	---help---
+	  If you want to make TPM support available to a Xen user domain,
+	  say Yes and it will be accessible from within Linux. See
+	  the manpages for xl, xl.conf, and docs/misc/vtpm.txt in
+	  the Xen source repository for more details.
+	  To compile this driver as a module, choose M here; the module
+	  will be called xen-tpmfront.
+
+config TCG_CRB
+	tristate "TPM 2.0 CRB Interface"
+	depends on ACPI
+	---help---
+	  If you have a TPM security chip that is compliant with the
+	  TCG CRB 2.0 TPM specification say Yes and it will be accessible
+	  from within Linux.  To compile this driver as a module, choose
+	  M here; the module will be called tpm_crb.
+
+config TCG_VTPM_PROXY
+	tristate "VTPM Proxy Interface"
+	depends on TCG_TPM
+	select ANON_INODES
+	---help---
+	  This driver proxies for an emulated TPM (vTPM) running in userspace.
+	  A device /dev/vtpmx is provided that creates a device pair
+	  /dev/vtpmX and a server-side file descriptor on which the vTPM
+	  can receive commands.
+
+
+source "drivers/char/tpm/st33zp24/Kconfig"
+endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
new file mode 100644
index 0000000..4e9c33c
--- /dev/null
+++ b/drivers/char/tpm/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel tpm device drivers.
+#
+obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+	 tpm-dev-common.o tpmrm-dev.o eventlog/common.o eventlog/tpm1.o \
+	 eventlog/tpm2.o tpm2-space.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
+tpm-$(CONFIG_EFI) += eventlog/efi.o
+tpm-$(CONFIG_OF) += eventlog/of.o
+obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
+obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
+obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
+obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
+obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
+obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
+obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
new file mode 100644
index 0000000..7c53b19
--- /dev/null
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ *	Seiji Munetoh <munetoh@jp.ibm.com>
+ *	Stefan Berger <stefanb@us.ibm.com>
+ *	Reiner Sailer <sailer@watson.ibm.com>
+ *	Kylene Hall <kjhall@us.ibm.com>
+ *	Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log extended by the TCG BIOS of PC platform
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+struct acpi_tcpa {
+	struct acpi_table_header hdr;
+	u16 platform_class;
+	union {
+		struct client_hdr {
+			u32 log_max_len __packed;
+			u64 log_start_addr __packed;
+		} client;
+		struct server_hdr {
+			u16 reserved;
+			u64 log_max_len __packed;
+			u64 log_start_addr __packed;
+		} server;
+	};
+};
+
+/* read binary bios log */
+int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+	struct acpi_tcpa *buff;
+	acpi_status status;
+	void __iomem *virt;
+	u64 len, start;
+	struct tpm_bios_log *log;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return -ENODEV;
+
+	log = &chip->log;
+
+	/* Unfortuntely ACPI does not associate the event log with a specific
+	 * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+	 */
+	if (!chip->acpi_dev_handle)
+		return -ENODEV;
+
+	/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+	status = acpi_get_table(ACPI_SIG_TCPA, 1,
+				(struct acpi_table_header **)&buff);
+
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	switch(buff->platform_class) {
+	case BIOS_SERVER:
+		len = buff->server.log_max_len;
+		start = buff->server.log_start_addr;
+		break;
+	case BIOS_CLIENT:
+	default:
+		len = buff->client.log_max_len;
+		start = buff->client.log_start_addr;
+		break;
+	}
+	if (!len) {
+		dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
+		return -EIO;
+	}
+
+	/* malloc EventLog space */
+	log->bios_event_log = kmalloc(len, GFP_KERNEL);
+	if (!log->bios_event_log)
+		return -ENOMEM;
+
+	log->bios_event_log_end = log->bios_event_log + len;
+
+	virt = acpi_os_map_iomem(start, len);
+	if (!virt)
+		goto err;
+
+	memcpy_fromio(log->bios_event_log, virt, len);
+
+	acpi_os_unmap_iomem(virt, len);
+	return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+
+err:
+	kfree(log->bios_event_log);
+	log->bios_event_log = NULL;
+	return -EIO;
+
+}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
new file mode 100644
index 0000000..5a8720d
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ *	Kent Yoder <key@linux.vnet.ibm.com>
+ *	Seiji Munetoh <munetoh@jp.ibm.com>
+ *	Stefan Berger <stefanb@us.ibm.com>
+ *	Reiner Sailer <sailer@watson.ibm.com>
+ *	Kylene Hall <kjhall@us.ibm.com>
+ *	Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_bios_measurements_open(struct inode *inode,
+					    struct file *file)
+{
+	int err;
+	struct seq_file *seq;
+	struct tpm_chip_seqops *chip_seqops;
+	const struct seq_operations *seqops;
+	struct tpm_chip *chip;
+
+	inode_lock(inode);
+	if (!inode->i_private) {
+		inode_unlock(inode);
+		return -ENODEV;
+	}
+	chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+	seqops = chip_seqops->seqops;
+	chip = chip_seqops->chip;
+	get_device(&chip->dev);
+	inode_unlock(inode);
+
+	/* now register seq file */
+	err = seq_open(file, seqops);
+	if (!err) {
+		seq = file->private_data;
+		seq->private = chip;
+	}
+
+	return err;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+					 struct file *file)
+{
+	struct seq_file *seq = (struct seq_file *)file->private_data;
+	struct tpm_chip *chip = (struct tpm_chip *)seq->private;
+
+	put_device(&chip->dev);
+
+	return seq_release(inode, file);
+}
+
+static const struct file_operations tpm_bios_measurements_ops = {
+	.owner = THIS_MODULE,
+	.open = tpm_bios_measurements_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = tpm_bios_measurements_release,
+};
+
+static int tpm_read_log(struct tpm_chip *chip)
+{
+	int rc;
+
+	if (chip->log.bios_event_log != NULL) {
+		dev_dbg(&chip->dev,
+			"%s: ERROR - event log already initialized\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	rc = tpm_read_log_acpi(chip);
+	if (rc != -ENODEV)
+		return rc;
+
+	rc = tpm_read_log_efi(chip);
+	if (rc != -ENODEV)
+		return rc;
+
+	return tpm_read_log_of(chip);
+}
+
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
+{
+	const char *name = dev_name(&chip->dev);
+	unsigned int cnt;
+	int log_version;
+	int rc = 0;
+
+	rc = tpm_read_log(chip);
+	if (rc < 0)
+		return rc;
+	log_version = rc;
+
+	cnt = 0;
+	chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+	/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+	 * compiled out. The caller should ignore the ENODEV return code.
+	 */
+	if (IS_ERR(chip->bios_dir[cnt]))
+		goto err;
+	cnt++;
+
+	chip->bin_log_seqops.chip = chip;
+	if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+		chip->bin_log_seqops.seqops =
+			&tpm2_binary_b_measurements_seqops;
+	else
+		chip->bin_log_seqops.seqops =
+			&tpm1_binary_b_measurements_seqops;
+
+
+	chip->bios_dir[cnt] =
+	    securityfs_create_file("binary_bios_measurements",
+				   0440, chip->bios_dir[0],
+				   (void *)&chip->bin_log_seqops,
+				   &tpm_bios_measurements_ops);
+	if (IS_ERR(chip->bios_dir[cnt]))
+		goto err;
+	cnt++;
+
+	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+
+		chip->ascii_log_seqops.chip = chip;
+		chip->ascii_log_seqops.seqops =
+			&tpm1_ascii_b_measurements_seqops;
+
+		chip->bios_dir[cnt] =
+			securityfs_create_file("ascii_bios_measurements",
+					       0440, chip->bios_dir[0],
+					       (void *)&chip->ascii_log_seqops,
+					       &tpm_bios_measurements_ops);
+		if (IS_ERR(chip->bios_dir[cnt]))
+			goto err;
+		cnt++;
+	}
+
+	return 0;
+
+err:
+	rc = PTR_ERR(chip->bios_dir[cnt]);
+	chip->bios_dir[cnt] = NULL;
+	tpm_bios_log_teardown(chip);
+	return rc;
+}
+
+void tpm_bios_log_teardown(struct tpm_chip *chip)
+{
+	int i;
+	struct inode *inode;
+
+	/* securityfs_remove currently doesn't take care of handling sync
+	 * between removal and opening of pseudo files. To handle this, a
+	 * workaround is added by making i_private = NULL here during removal
+	 * and to check it during open(), both within inode_lock()/unlock().
+	 * This design ensures that open() either safely gets kref or fails.
+	 */
+	for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+		if (chip->bios_dir[i]) {
+			inode = d_inode(chip->bios_dir[i]);
+			inode_lock(inode);
+			inode->i_private = NULL;
+			inode_unlock(inode);
+			securityfs_remove(chip->bios_dir[i]);
+		}
+	}
+}
diff --git a/drivers/char/tpm/eventlog/common.h b/drivers/char/tpm/eventlog/common.h
new file mode 100644
index 0000000..47ff813
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.h
@@ -0,0 +1,35 @@
+#ifndef __TPM_EVENTLOG_COMMON_H__
+#define __TPM_EVENTLOG_COMMON_H__
+
+#include "../tpm.h"
+
+extern const struct seq_operations tpm1_ascii_b_measurements_seqops;
+extern const struct seq_operations tpm1_binary_b_measurements_seqops;
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+	return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+	return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
new file mode 100644
index 0000000..3e673ab
--- /dev/null
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ *      Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+	struct linux_efi_tpm_eventlog *log_tbl;
+	struct tpm_bios_log *log;
+	u32 log_size;
+	u8 tpm_log_version;
+
+	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+		return -ENODEV;
+
+	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+		return -ENODEV;
+
+	log = &chip->log;
+
+	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+	if (!log_tbl) {
+		pr_err("Could not map UEFI TPM log table !\n");
+		return -ENOMEM;
+	}
+
+	log_size = log_tbl->size;
+	memunmap(log_tbl);
+
+	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+			   MEMREMAP_WB);
+	if (!log_tbl) {
+		pr_err("Could not map UEFI TPM log table payload!\n");
+		return -ENOMEM;
+	}
+
+	/* malloc EventLog space */
+	log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
+	if (!log->bios_event_log)
+		goto err_memunmap;
+	log->bios_event_log_end = log->bios_event_log + log_size;
+
+	tpm_log_version = log_tbl->version;
+	memunmap(log_tbl);
+	return tpm_log_version;
+
+err_memunmap:
+	memunmap(log_tbl);
+	return -ENOMEM;
+}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
new file mode 100644
index 0000000..bba5fba
--- /dev/null
+++ b/drivers/char/tpm/eventlog/of.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *         Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Read the event log created by the firmware on PPC64
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+int tpm_read_log_of(struct tpm_chip *chip)
+{
+	struct device_node *np;
+	const u32 *sizep;
+	const u64 *basep;
+	struct tpm_bios_log *log;
+	u32 size;
+	u64 base;
+
+	log = &chip->log;
+	if (chip->dev.parent && chip->dev.parent->of_node)
+		np = chip->dev.parent->of_node;
+	else
+		return -ENODEV;
+
+	if (of_property_read_bool(np, "powered-while-suspended"))
+		chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+
+	sizep = of_get_property(np, "linux,sml-size", NULL);
+	basep = of_get_property(np, "linux,sml-base", NULL);
+	if (sizep == NULL && basep == NULL)
+		return -ENODEV;
+	if (sizep == NULL || basep == NULL)
+		return -EIO;
+
+	/*
+	 * For both vtpm/tpm, firmware has log addr and log size in big
+	 * endian format. But in case of vtpm, there is a method called
+	 * sml-handover which is run during kernel init even before
+	 * device tree is setup. This sml-handover function takes care
+	 * of endianness and writes to sml-base and sml-size in little
+	 * endian format. For this reason, vtpm doesn't need conversion
+	 * but physical tpm needs the conversion.
+	 */
+	if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
+		size = be32_to_cpup((__force __be32 *)sizep);
+		base = be64_to_cpup((__force __be64 *)basep);
+	} else {
+		size = *sizep;
+		base = *basep;
+	}
+
+	if (size == 0) {
+		dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+		return -EIO;
+	}
+
+	log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
+	if (!log->bios_event_log)
+		return -ENOMEM;
+
+	log->bios_event_log_end = log->bios_event_log + size;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+	return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
new file mode 100644
index 0000000..58c8478
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ *	Kent Yoder <key@linux.vnet.ibm.com>
+ *	Seiji Munetoh <munetoh@jp.ibm.com>
+ *	Stefan Berger <stefanb@us.ibm.com>
+ *	Reiner Sailer <sailer@watson.ibm.com>
+ *	Kylene Hall <kjhall@us.ibm.com>
+ *	Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/efi.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+
+static const char* tcpa_event_type_strings[] = {
+	"PREBOOT",
+	"POST CODE",
+	"",
+	"NO ACTION",
+	"SEPARATOR",
+	"ACTION",
+	"EVENT TAG",
+	"S-CRTM Contents",
+	"S-CRTM Version",
+	"CPU Microcode",
+	"Platform Config Flags",
+	"Table of Devices",
+	"Compact Hash",
+	"IPL",
+	"IPL Partition Data",
+	"Non-Host Code",
+	"Non-Host Config",
+	"Non-Host Info"
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+	"",
+	"SMBIOS",
+	"BIS Certificate",
+	"POST BIOS ",
+	"ESCD ",
+	"CMOS",
+	"NVRAM",
+	"Option ROM",
+	"Option ROM config",
+	"",
+	"Option ROM microcode ",
+	"S-CRTM Version",
+	"S-CRTM Contents ",
+	"POST Contents ",
+	"Table of Devices",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+	loff_t i;
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
+	void *addr = log->bios_event_log;
+	void *limit = log->bios_event_log_end;
+	struct tcpa_event *event;
+	u32 converted_event_size;
+	u32 converted_event_type;
+
+
+	/* read over *pos measurements */
+	for (i = 0; i < *pos; i++) {
+		event = addr;
+
+		converted_event_size =
+		    do_endian_conversion(event->event_size);
+		converted_event_type =
+		    do_endian_conversion(event->event_type);
+
+		if ((addr + sizeof(struct tcpa_event)) < limit) {
+			if ((converted_event_type == 0) &&
+			    (converted_event_size == 0))
+				return NULL;
+			addr += (sizeof(struct tcpa_event) +
+				 converted_event_size);
+		}
+	}
+
+	/* now check if current entry is valid */
+	if ((addr + sizeof(struct tcpa_event)) >= limit)
+		return NULL;
+
+	event = addr;
+
+	converted_event_size = do_endian_conversion(event->event_size);
+	converted_event_type = do_endian_conversion(event->event_type);
+
+	if (((converted_event_type == 0) && (converted_event_size == 0))
+	    || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+		>= limit))
+		return NULL;
+
+	return addr;
+}
+
+static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
+					loff_t *pos)
+{
+	struct tcpa_event *event = v;
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
+	void *limit = log->bios_event_log_end;
+	u32 converted_event_size;
+	u32 converted_event_type;
+
+	converted_event_size = do_endian_conversion(event->event_size);
+
+	v += sizeof(struct tcpa_event) + converted_event_size;
+
+	/* now check if current entry is valid */
+	if ((v + sizeof(struct tcpa_event)) >= limit)
+		return NULL;
+
+	event = v;
+
+	converted_event_size = do_endian_conversion(event->event_size);
+	converted_event_type = do_endian_conversion(event->event_type);
+
+	if (((converted_event_type == 0) && (converted_event_size == 0)) ||
+	    ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
+		return NULL;
+
+	(*pos)++;
+	return v;
+}
+
+static void tpm1_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+			unsigned char * event_entry)
+{
+	const char *name = "";
+	/* 41 so there is room for 40 data and 1 nul */
+	char data[41] = "";
+	int i, n_len = 0, d_len = 0;
+	struct tcpa_pc_event *pc_event;
+
+	switch (do_endian_conversion(event->event_type)) {
+	case PREBOOT:
+	case POST_CODE:
+	case UNUSED:
+	case NO_ACTION:
+	case SCRTM_CONTENTS:
+	case SCRTM_VERSION:
+	case CPU_MICROCODE:
+	case PLATFORM_CONFIG_FLAGS:
+	case TABLE_OF_DEVICES:
+	case COMPACT_HASH:
+	case IPL:
+	case IPL_PARTITION_DATA:
+	case NONHOST_CODE:
+	case NONHOST_CONFIG:
+	case NONHOST_INFO:
+		name = tcpa_event_type_strings[do_endian_conversion
+						(event->event_type)];
+		n_len = strlen(name);
+		break;
+	case SEPARATOR:
+	case ACTION:
+		if (MAX_TEXT_EVENT >
+		    do_endian_conversion(event->event_size)) {
+			name = event_entry;
+			n_len = do_endian_conversion(event->event_size);
+		}
+		break;
+	case EVENT_TAG:
+		pc_event = (struct tcpa_pc_event *)event_entry;
+
+		/* ToDo Row data -> Base64 */
+
+		switch (do_endian_conversion(pc_event->event_id)) {
+		case SMBIOS:
+		case BIS_CERT:
+		case CMOS:
+		case NVRAM:
+		case OPTION_ROM_EXEC:
+		case OPTION_ROM_CONFIG:
+		case S_CRTM_VERSION:
+			name = tcpa_pc_event_id_strings[do_endian_conversion
+							(pc_event->event_id)];
+			n_len = strlen(name);
+			break;
+		/* hash data */
+		case POST_BIOS_ROM:
+		case ESCD:
+		case OPTION_ROM_MICROCODE:
+		case S_CRTM_CONTENTS:
+		case POST_CONTENTS:
+			name = tcpa_pc_event_id_strings[do_endian_conversion
+							(pc_event->event_id)];
+			n_len = strlen(name);
+			for (i = 0; i < 20; i++)
+				d_len += sprintf(&data[2*i], "%02x",
+						pc_event->event_data[i]);
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+			n_len, name, d_len, data);
+
+}
+
+static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+	struct tcpa_event *event = v;
+	struct tcpa_event temp_event;
+	char *temp_ptr;
+	int i;
+
+	memcpy(&temp_event, event, sizeof(struct tcpa_event));
+
+	/* convert raw integers for endianness */
+	temp_event.pcr_index = do_endian_conversion(event->pcr_index);
+	temp_event.event_type = do_endian_conversion(event->event_type);
+	temp_event.event_size = do_endian_conversion(event->event_size);
+
+	temp_ptr = (char *) &temp_event;
+
+	for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+		seq_putc(m, temp_ptr[i]);
+
+	temp_ptr = (char *) v;
+
+	for (i = (sizeof(struct tcpa_event) - 1);
+	     i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+		seq_putc(m, temp_ptr[i]);
+
+	return 0;
+
+}
+
+static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+	int len = 0;
+	char *eventname;
+	struct tcpa_event *event = v;
+	unsigned char *event_entry =
+	    (unsigned char *)(v + sizeof(struct tcpa_event));
+
+	eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+	if (!eventname) {
+		printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+		       __func__);
+		return -EFAULT;
+	}
+
+	/* 1st: PCR */
+	seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
+
+	/* 2nd: SHA1 */
+	seq_printf(m, "%20phN", event->pcr_value);
+
+	/* 3rd: event type identifier */
+	seq_printf(m, " %02x", do_endian_conversion(event->event_type));
+
+	len += get_event_name(eventname, event, event_entry);
+
+	/* 4th: eventname <= max + \'0' delimiter */
+	seq_printf(m, " %s\n", eventname);
+
+	kfree(eventname);
+	return 0;
+}
+
+const struct seq_operations tpm1_ascii_b_measurements_seqops = {
+	.start = tpm1_bios_measurements_start,
+	.next = tpm1_bios_measurements_next,
+	.stop = tpm1_bios_measurements_stop,
+	.show = tpm1_ascii_bios_measurements_show,
+};
+
+const struct seq_operations tpm1_binary_b_measurements_seqops = {
+	.start = tpm1_bios_measurements_start,
+	.next = tpm1_bios_measurements_next,
+	.stop = tpm1_bios_measurements_stop,
+	.show = tpm1_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
new file mode 100644
index 0000000..1b8fa9d
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ *      Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to TPM 2.0 event log as written by Firmware.
+ * It assumes that writer of event log has followed TCG Specification
+ * for Family "2.0" and written the event data in little endian.
+ * With that, it doesn't need any endian conversion for structure
+ * content.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/*
+ * calc_tpm2_event_size() - calculate the event size, where event
+ * is an entry in the TPM 2.0 event log. The event is of type Crypto
+ * Agile Log Entry Format as defined in TCG EFI Protocol Specification
+ * Family "2.0".
+
+ * @event: event whose size is to be calculated.
+ * @event_header: the first event in the event log.
+ *
+ * Returns size of the event. If it is an invalid event, returns 0.
+ */
+static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
+				struct tcg_pcr_event *event_header)
+{
+	struct tcg_efi_specid_event *efispecid;
+	struct tcg_event_field *event_field;
+	void *marker;
+	void *marker_start;
+	u32 halg_size;
+	size_t size;
+	u16 halg;
+	int i;
+	int j;
+
+	marker = event;
+	marker_start = marker;
+	marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type)
+		+ sizeof(event->count);
+
+	efispecid = (struct tcg_efi_specid_event *)event_header->event;
+
+	/* Check if event is malformed. */
+	if (event->count > efispecid->num_algs)
+		return 0;
+
+	for (i = 0; i < event->count; i++) {
+		halg_size = sizeof(event->digests[i].alg_id);
+		memcpy(&halg, marker, halg_size);
+		marker = marker + halg_size;
+		for (j = 0; j < efispecid->num_algs; j++) {
+			if (halg == efispecid->digest_sizes[j].alg_id) {
+				marker +=
+					efispecid->digest_sizes[j].digest_size;
+				break;
+			}
+		}
+		/* Algorithm without known length. Such event is unparseable. */
+		if (j == efispecid->num_algs)
+			return 0;
+	}
+
+	event_field = (struct tcg_event_field *)marker;
+	marker = marker + sizeof(event_field->event_size)
+		+ event_field->event_size;
+	size = marker - marker_start;
+
+	if ((event->event_type == 0) && (event_field->event_size == 0))
+		return 0;
+
+	return size;
+}
+
+static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
+	void *addr = log->bios_event_log;
+	void *limit = log->bios_event_log_end;
+	struct tcg_pcr_event *event_header;
+	struct tcg_pcr_event2 *event;
+	size_t size;
+	int i;
+
+	event_header = addr;
+	size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event)
+		+ event_header->event_size;
+
+	if (*pos == 0) {
+		if (addr + size < limit) {
+			if ((event_header->event_type == 0) &&
+			    (event_header->event_size == 0))
+				return NULL;
+			return SEQ_START_TOKEN;
+		}
+	}
+
+	if (*pos > 0) {
+		addr += size;
+		event = addr;
+		size = calc_tpm2_event_size(event, event_header);
+		if ((addr + size >=  limit) || (size == 0))
+			return NULL;
+	}
+
+	for (i = 0; i < (*pos - 1); i++) {
+		event = addr;
+		size = calc_tpm2_event_size(event, event_header);
+
+		if ((addr + size >= limit) || (size == 0))
+			return NULL;
+		addr += size;
+	}
+
+	return addr;
+}
+
+static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
+					 loff_t *pos)
+{
+	struct tcg_pcr_event *event_header;
+	struct tcg_pcr_event2 *event;
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
+	void *limit = log->bios_event_log_end;
+	size_t event_size;
+	void *marker;
+
+	event_header = log->bios_event_log;
+
+	if (v == SEQ_START_TOKEN) {
+		event_size = sizeof(struct tcg_pcr_event) -
+			sizeof(event_header->event) + event_header->event_size;
+		marker = event_header;
+	} else {
+		event = v;
+		event_size = calc_tpm2_event_size(event, event_header);
+		if (event_size == 0)
+			return NULL;
+		marker = event;
+	}
+
+	marker = marker + event_size;
+	if (marker >= limit)
+		return NULL;
+	v = marker;
+	event = v;
+
+	event_size = calc_tpm2_event_size(event, event_header);
+	if (((v + event_size) >= limit) || (event_size == 0))
+		return NULL;
+
+	(*pos)++;
+	return v;
+}
+
+static void tpm2_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
+	struct tcg_pcr_event *event_header = log->bios_event_log;
+	struct tcg_pcr_event2 *event = v;
+	void *temp_ptr;
+	size_t size;
+
+	if (v == SEQ_START_TOKEN) {
+		size = sizeof(struct tcg_pcr_event) -
+			sizeof(event_header->event) + event_header->event_size;
+
+		temp_ptr = event_header;
+
+		if (size > 0)
+			seq_write(m, temp_ptr, size);
+	} else {
+		size = calc_tpm2_event_size(event, event_header);
+		temp_ptr = event;
+		if (size > 0)
+			seq_write(m, temp_ptr, size);
+	}
+
+	return 0;
+}
+
+const struct seq_operations tpm2_binary_b_measurements_seqops = {
+	.start = tpm2_bios_measurements_start,
+	.next = tpm2_bios_measurements_next,
+	.stop = tpm2_bios_measurements_stop,
+	.show = tpm2_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
new file mode 100644
index 0000000..e74c6f2
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -0,0 +1,29 @@
+config TCG_TIS_ST33ZP24
+	tristate
+	---help---
+	  STMicroelectronics ST33ZP24 core driver. It implements the core
+	  TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
+	  register against it.
+
+	  To compile this driver as a module, choose m here. The module will be called
+	  tpm_st33zp24.
+
+config TCG_TIS_ST33ZP24_I2C
+	tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)"
+	depends on I2C
+	select TCG_TIS_ST33ZP24
+	---help---
+	  This module adds support for the STMicroelectronics TPM security chip
+	  ST33ZP24 with i2c interface.
+	  To compile this driver as a module, choose M here; the module will be
+	  called tpm_st33zp24_i2c.
+
+config TCG_TIS_ST33ZP24_SPI
+	tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)"
+	depends on SPI
+	select TCG_TIS_ST33ZP24
+	---help---
+	  This module adds support for the STMicroelectronics TPM security chip
+	  ST33ZP24 with spi interface.
+	  To compile this driver as a module, choose M here; the module will be
+	  called tpm_st33zp24_spi.
diff --git a/drivers/char/tpm/st33zp24/Makefile b/drivers/char/tpm/st33zp24/Makefile
new file mode 100644
index 0000000..649e411
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for ST33ZP24 TPM 1.2 driver
+#
+
+tpm_st33zp24-objs = st33zp24.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += tpm_st33zp24.o
+
+tpm_st33zp24_i2c-objs = i2c.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_I2C) += tpm_st33zp24_i2c.o
+
+tpm_st33zp24_spi-objs = spi.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_SPI) += tpm_st33zp24_spi.o
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
new file mode 100644
index 0000000..be5d1ab
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -0,0 +1,329 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+#include <linux/platform_data/st33zp24.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DUMMY_BYTE			0xAA
+
+struct st33zp24_i2c_phy {
+	struct i2c_client *client;
+	u8 buf[TPM_BUFSIZE + 1];
+	int io_lpcpd;
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+	struct st33zp24_i2c_phy *phy = phy_id;
+
+	phy->buf[0] = tpm_register;
+	memcpy(phy->buf + 1, tpm_data, tpm_size);
+	return i2c_master_send(phy->client, phy->buf, tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+	struct st33zp24_i2c_phy *phy = phy_id;
+	u8 status = 0;
+	u8 data;
+
+	data = TPM_DUMMY_BYTE;
+	status = write8_reg(phy, tpm_register, &data, 1);
+	if (status == 2)
+		status = i2c_master_recv(phy->client, tpm_data, tpm_size);
+	return status;
+} /* read8_reg() */
+
+/*
+ * st33zp24_i2c_send
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, the length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+			     int tpm_size)
+{
+	return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data,
+			  tpm_size);
+}
+
+/*
+ * st33zp24_i2c_recv
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+			     int tpm_size)
+{
+	return read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+}
+
+static const struct st33zp24_phy_ops i2c_phy_ops = {
+	.send = st33zp24_i2c_send,
+	.recv = st33zp24_i2c_recv,
+};
+
+static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
+	{ "lpcpd-gpios", &lpcpd_gpios, 1 },
+	{},
+};
+
+static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client)
+{
+	struct tpm_chip *chip = i2c_get_clientdata(client);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
+	struct gpio_desc *gpiod_lpcpd;
+	struct device *dev = &client->dev;
+	int ret;
+
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
+	if (ret)
+		return ret;
+
+	/* Get LPCPD GPIO from ACPI */
+	gpiod_lpcpd = devm_gpiod_get(dev, "lpcpd", GPIOD_OUT_HIGH);
+	if (IS_ERR(gpiod_lpcpd)) {
+		dev_err(&client->dev,
+			"Failed to retrieve lpcpd-gpios from acpi.\n");
+		phy->io_lpcpd = -1;
+		/*
+		 * lpcpd pin is not specified. This is not an issue as
+		 * power management can be also managed by TPM specific
+		 * commands. So leave with a success status code.
+		 */
+		return 0;
+	}
+
+	phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
+
+	return 0;
+}
+
+static int st33zp24_i2c_of_request_resources(struct i2c_client *client)
+{
+	struct tpm_chip *chip = i2c_get_clientdata(client);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
+	struct device_node *pp;
+	int gpio;
+	int ret;
+
+	pp = client->dev.of_node;
+	if (!pp) {
+		dev_err(&client->dev, "No platform data\n");
+		return -ENODEV;
+	}
+
+	/* Get GPIO from device tree */
+	gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
+	if (gpio < 0) {
+		dev_err(&client->dev,
+			"Failed to retrieve lpcpd-gpios from dts.\n");
+		phy->io_lpcpd = -1;
+		/*
+		 * lpcpd pin is not specified. This is not an issue as
+		 * power management can be also managed by TPM specific
+		 * commands. So leave with a success status code.
+		 */
+		return 0;
+	}
+	/* GPIO request and configuration */
+	ret = devm_gpio_request_one(&client->dev, gpio,
+			GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
+	if (ret) {
+		dev_err(&client->dev, "Failed to request lpcpd pin\n");
+		return -ENODEV;
+	}
+	phy->io_lpcpd = gpio;
+
+	return 0;
+}
+
+static int st33zp24_i2c_request_resources(struct i2c_client *client)
+{
+	struct tpm_chip *chip = i2c_get_clientdata(client);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
+	struct st33zp24_platform_data *pdata;
+	int ret;
+
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "No platform data\n");
+		return -ENODEV;
+	}
+
+	/* store for late use */
+	phy->io_lpcpd = pdata->io_lpcpd;
+
+	if (gpio_is_valid(pdata->io_lpcpd)) {
+		ret = devm_gpio_request_one(&client->dev,
+				pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
+				"TPM IO_LPCPD");
+		if (ret) {
+			dev_err(&client->dev, "Failed to request lpcpd pin\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * st33zp24_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ *	 -1 in other case.
+ */
+static int st33zp24_i2c_probe(struct i2c_client *client,
+			      const struct i2c_device_id *id)
+{
+	int ret;
+	struct st33zp24_platform_data *pdata;
+	struct st33zp24_i2c_phy *phy;
+
+	if (!client) {
+		pr_info("%s: i2c client is NULL. Device not accessible.\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_info(&client->dev, "client not i2c capable\n");
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->client = client;
+
+	pdata = client->dev.platform_data;
+	if (!pdata && client->dev.of_node) {
+		ret = st33zp24_i2c_of_request_resources(client);
+		if (ret)
+			return ret;
+	} else if (pdata) {
+		ret = st33zp24_i2c_request_resources(client);
+		if (ret)
+			return ret;
+	} else if (ACPI_HANDLE(&client->dev)) {
+		ret = st33zp24_i2c_acpi_request_resources(client);
+		if (ret)
+			return ret;
+	}
+
+	return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq,
+			      phy->io_lpcpd);
+}
+
+/*
+ * st33zp24_i2c_remove remove the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static int st33zp24_i2c_remove(struct i2c_client *client)
+{
+	struct tpm_chip *chip = i2c_get_clientdata(client);
+	int ret;
+
+	ret = st33zp24_remove(chip);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct i2c_device_id st33zp24_i2c_id[] = {
+	{TPM_ST33_I2C, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
+
+static const struct of_device_id of_st33zp24_i2c_match[] = {
+	{ .compatible = "st,st33zp24-i2c", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
+
+static const struct acpi_device_id st33zp24_i2c_acpi_match[] = {
+	{"SMO3324"},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
+			 st33zp24_pm_resume);
+
+static struct i2c_driver st33zp24_i2c_driver = {
+	.driver = {
+		.name = TPM_ST33_I2C,
+		.pm = &st33zp24_i2c_ops,
+		.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+		.acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
+	},
+	.probe = st33zp24_i2c_probe,
+	.remove = st33zp24_i2c_remove,
+	.id_table = st33zp24_i2c_id
+};
+
+module_i2c_driver(st33zp24_i2c_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
new file mode 100644
index 0000000..d7909ab
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -0,0 +1,446 @@
+/*
+ * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+#include <linux/platform_data/st33zp24.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DATA_FIFO           0x24
+#define TPM_INTF_CAPABILITY     0x14
+
+#define TPM_DUMMY_BYTE		0x00
+
+#define MAX_SPI_LATENCY		15
+#define LOCALITY0		0
+
+#define ST33ZP24_OK					0x5A
+#define ST33ZP24_UNDEFINED_ERR				0x80
+#define ST33ZP24_BADLOCALITY				0x81
+#define ST33ZP24_TISREGISTER_UNKNOWN			0x82
+#define ST33ZP24_LOCALITY_NOT_ACTIVATED			0x83
+#define ST33ZP24_HASH_END_BEFORE_HASH_START		0x84
+#define ST33ZP24_BAD_COMMAND_ORDER			0x85
+#define ST33ZP24_INCORECT_RECEIVED_LENGTH		0x86
+#define ST33ZP24_TPM_FIFO_OVERFLOW			0x89
+#define ST33ZP24_UNEXPECTED_READ_FIFO			0x8A
+#define ST33ZP24_UNEXPECTED_WRITE_FIFO			0x8B
+#define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END	0x90
+#define ST33ZP24_DUMMY_BYTES				0x00
+
+/*
+ * TPM command can be up to 2048 byte, A TPM response can be up to
+ * 1024 byte.
+ * Between command and response, there are latency byte (up to 15
+ * usually on st33zp24 2 are enough).
+ *
+ * Overall when sending a command and expecting an answer we need if
+ * worst case:
+ * 2048 (for the TPM command) + 1024 (for the TPM answer).  We need
+ * some latency byte before the answer is available (max 15).
+ * We have 2048 + 1024 + 15.
+ */
+#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
+				  MAX_SPI_LATENCY)
+
+
+struct st33zp24_spi_phy {
+	struct spi_device *spi_device;
+
+	u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+	u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+
+	int io_lpcpd;
+	int latency;
+};
+
+static int st33zp24_status_to_errno(u8 code)
+{
+	switch (code) {
+	case ST33ZP24_OK:
+		return 0;
+	case ST33ZP24_UNDEFINED_ERR:
+	case ST33ZP24_BADLOCALITY:
+	case ST33ZP24_TISREGISTER_UNKNOWN:
+	case ST33ZP24_LOCALITY_NOT_ACTIVATED:
+	case ST33ZP24_HASH_END_BEFORE_HASH_START:
+	case ST33ZP24_BAD_COMMAND_ORDER:
+	case ST33ZP24_UNEXPECTED_READ_FIFO:
+	case ST33ZP24_UNEXPECTED_WRITE_FIFO:
+	case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END:
+		return -EPROTO;
+	case ST33ZP24_INCORECT_RECEIVED_LENGTH:
+	case ST33ZP24_TPM_FIFO_OVERFLOW:
+		return -EMSGSIZE;
+	case ST33ZP24_DUMMY_BYTES:
+		return -ENOSYS;
+	}
+	return code;
+}
+
+/*
+ * st33zp24_spi_send
+ * Send byte to the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+			     int tpm_size)
+{
+	int total_length = 0, ret = 0;
+	struct st33zp24_spi_phy *phy = phy_id;
+	struct spi_device *dev = phy->spi_device;
+	struct spi_transfer spi_xfer = {
+		.tx_buf = phy->tx_buf,
+		.rx_buf = phy->rx_buf,
+	};
+
+	/* Pre-Header */
+	phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0;
+	phy->tx_buf[total_length++] = tpm_register;
+
+	if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) {
+		phy->tx_buf[total_length++] = tpm_size >> 8;
+		phy->tx_buf[total_length++] = tpm_size;
+	}
+
+	memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size);
+	total_length += tpm_size;
+
+	memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency);
+
+	spi_xfer.len = total_length + phy->latency;
+
+	ret = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (ret == 0)
+		ret = phy->rx_buf[total_length + phy->latency - 1];
+
+	return st33zp24_status_to_errno(ret);
+} /* st33zp24_spi_send() */
+
+/*
+ * st33zp24_spi_read8_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data,
+				  int tpm_size)
+{
+	int total_length = 0, ret;
+	struct st33zp24_spi_phy *phy = phy_id;
+	struct spi_device *dev = phy->spi_device;
+	struct spi_transfer spi_xfer = {
+		.tx_buf = phy->tx_buf,
+		.rx_buf = phy->rx_buf,
+	};
+
+	/* Pre-Header */
+	phy->tx_buf[total_length++] = LOCALITY0;
+	phy->tx_buf[total_length++] = tpm_register;
+
+	memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE,
+	       phy->latency + tpm_size);
+
+	spi_xfer.len = total_length + phy->latency + tpm_size;
+
+	/* header + status byte + size of the data + status byte */
+	ret = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (tpm_size > 0 && ret == 0) {
+		ret = phy->rx_buf[total_length + phy->latency - 1];
+
+		memcpy(tpm_data, phy->rx_buf + total_length + phy->latency,
+		       tpm_size);
+	}
+
+	return ret;
+} /* st33zp24_spi_read8_reg() */
+
+/*
+ * st33zp24_spi_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+			     int tpm_size)
+{
+	int ret;
+
+	ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+	if (!st33zp24_status_to_errno(ret))
+		return tpm_size;
+	return ret;
+} /* st33zp24_spi_recv() */
+
+static int st33zp24_spi_evaluate_latency(void *phy_id)
+{
+	struct st33zp24_spi_phy *phy = phy_id;
+	int latency = 1, status = 0;
+	u8 data = 0;
+
+	while (!status && latency < MAX_SPI_LATENCY) {
+		phy->latency = latency;
+		status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY,
+						&data, 1);
+		latency++;
+	}
+	if (status < 0)
+		return status;
+	if (latency == MAX_SPI_LATENCY)
+		return -ENODEV;
+
+	return latency - 1;
+} /* evaluate_latency() */
+
+static const struct st33zp24_phy_ops spi_phy_ops = {
+	.send = st33zp24_spi_send,
+	.recv = st33zp24_spi_recv,
+};
+
+static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
+	{ "lpcpd-gpios", &lpcpd_gpios, 1 },
+	{},
+};
+
+static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev)
+{
+	struct tpm_chip *chip = spi_get_drvdata(spi_dev);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
+	struct gpio_desc *gpiod_lpcpd;
+	struct device *dev = &spi_dev->dev;
+	int ret;
+
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
+	if (ret)
+		return ret;
+
+	/* Get LPCPD GPIO from ACPI */
+	gpiod_lpcpd = devm_gpiod_get(dev, "lpcpd", GPIOD_OUT_HIGH);
+	if (IS_ERR(gpiod_lpcpd)) {
+		dev_err(dev, "Failed to retrieve lpcpd-gpios from acpi.\n");
+		phy->io_lpcpd = -1;
+		/*
+		 * lpcpd pin is not specified. This is not an issue as
+		 * power management can be also managed by TPM specific
+		 * commands. So leave with a success status code.
+		 */
+		return 0;
+	}
+
+	phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
+
+	return 0;
+}
+
+static int st33zp24_spi_of_request_resources(struct spi_device *spi_dev)
+{
+	struct tpm_chip *chip = spi_get_drvdata(spi_dev);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
+	struct device_node *pp;
+	int gpio;
+	int ret;
+
+	pp = spi_dev->dev.of_node;
+	if (!pp) {
+		dev_err(&spi_dev->dev, "No platform data\n");
+		return -ENODEV;
+	}
+
+	/* Get GPIO from device tree */
+	gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
+	if (gpio < 0) {
+		dev_err(&spi_dev->dev,
+			"Failed to retrieve lpcpd-gpios from dts.\n");
+		phy->io_lpcpd = -1;
+		/*
+		 * lpcpd pin is not specified. This is not an issue as
+		 * power management can be also managed by TPM specific
+		 * commands. So leave with a success status code.
+		 */
+		return 0;
+	}
+	/* GPIO request and configuration */
+	ret = devm_gpio_request_one(&spi_dev->dev, gpio,
+			GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
+	if (ret) {
+		dev_err(&spi_dev->dev, "Failed to request lpcpd pin\n");
+		return -ENODEV;
+	}
+	phy->io_lpcpd = gpio;
+
+	return 0;
+}
+
+static int st33zp24_spi_request_resources(struct spi_device *dev)
+{
+	struct tpm_chip *chip = spi_get_drvdata(dev);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
+	struct st33zp24_platform_data *pdata;
+	int ret;
+
+	pdata = dev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&dev->dev, "No platform data\n");
+		return -ENODEV;
+	}
+
+	/* store for late use */
+	phy->io_lpcpd = pdata->io_lpcpd;
+
+	if (gpio_is_valid(pdata->io_lpcpd)) {
+		ret = devm_gpio_request_one(&dev->dev,
+				pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
+				"TPM IO_LPCPD");
+		if (ret) {
+			dev_err(&dev->dev, "%s : reset gpio_request failed\n",
+				__FILE__);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * st33zp24_spi_probe initialize the TPM device
+ * @param: dev, the spi_device drescription (TPM SPI description).
+ * @return: 0 in case of success.
+ *	 or a negative value describing the error.
+ */
+static int st33zp24_spi_probe(struct spi_device *dev)
+{
+	int ret;
+	struct st33zp24_platform_data *pdata;
+	struct st33zp24_spi_phy *phy;
+
+	/* Check SPI platform functionnalities */
+	if (!dev) {
+		pr_info("%s: dev is NULL. Device is not accessible.\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->spi_device = dev;
+
+	pdata = dev->dev.platform_data;
+	if (!pdata && dev->dev.of_node) {
+		ret = st33zp24_spi_of_request_resources(dev);
+		if (ret)
+			return ret;
+	} else if (pdata) {
+		ret = st33zp24_spi_request_resources(dev);
+		if (ret)
+			return ret;
+	} else if (ACPI_HANDLE(&dev->dev)) {
+		ret = st33zp24_spi_acpi_request_resources(dev);
+		if (ret)
+			return ret;
+	}
+
+	phy->latency = st33zp24_spi_evaluate_latency(phy);
+	if (phy->latency <= 0)
+		return -ENODEV;
+
+	return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq,
+			      phy->io_lpcpd);
+}
+
+/*
+ * st33zp24_spi_remove remove the TPM device
+ * @param: client, the spi_device drescription (TPM SPI description).
+ * @return: 0 in case of success.
+ */
+static int st33zp24_spi_remove(struct spi_device *dev)
+{
+	struct tpm_chip *chip = spi_get_drvdata(dev);
+	int ret;
+
+	ret = st33zp24_remove(chip);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct spi_device_id st33zp24_spi_id[] = {
+	{TPM_ST33_SPI, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
+
+static const struct of_device_id of_st33zp24_spi_match[] = {
+	{ .compatible = "st,st33zp24-spi", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
+
+static const struct acpi_device_id st33zp24_spi_acpi_match[] = {
+	{"SMO3324"},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
+			 st33zp24_pm_resume);
+
+static struct spi_driver st33zp24_spi_driver = {
+	.driver = {
+		.name = TPM_ST33_SPI,
+		.pm = &st33zp24_spi_ops,
+		.of_match_table = of_match_ptr(of_st33zp24_spi_match),
+		.acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match),
+	},
+	.probe = st33zp24_spi_probe,
+	.remove = st33zp24_spi_remove,
+	.id_table = st33zp24_spi_id,
+};
+
+module_spi_driver(st33zp24_spi_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
new file mode 100644
index 0000000..abd675b
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -0,0 +1,662 @@
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/freezer.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_ACCESS			0x0
+#define TPM_STS				0x18
+#define TPM_DATA_FIFO			0x24
+#define TPM_INTF_CAPABILITY		0x14
+#define TPM_INT_STATUS			0x10
+#define TPM_INT_ENABLE			0x08
+
+#define LOCALITY0			0
+
+enum st33zp24_access {
+	TPM_ACCESS_VALID = 0x80,
+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+	TPM_ACCESS_REQUEST_PENDING = 0x04,
+	TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum st33zp24_status {
+	TPM_STS_VALID = 0x80,
+	TPM_STS_COMMAND_READY = 0x40,
+	TPM_STS_GO = 0x20,
+	TPM_STS_DATA_AVAIL = 0x10,
+	TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum st33zp24_int_flags {
+	TPM_GLOBAL_INT_ENABLE = 0x80,
+	TPM_INTF_CMD_READY_INT = 0x080,
+	TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+	TPM_INTF_WAKE_UP_READY_INT = 0x020,
+	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+	TPM_INTF_STS_VALID_INT = 0x002,
+	TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+	TIS_SHORT_TIMEOUT = 750,
+	TIS_LONG_TIMEOUT = 2000,
+};
+
+/*
+ * clear_interruption clear the pending interrupt.
+ * @param: tpm_dev, the tpm device device.
+ * @return: the interrupt status value.
+ */
+static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
+{
+	u8 interrupt;
+
+	tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+	tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+	return interrupt;
+} /* clear_interruption() */
+
+/*
+ * st33zp24_cancel, cancel the current command execution or
+ * set STS to COMMAND READY.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ */
+static void st33zp24_cancel(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	u8 data;
+
+	data = TPM_STS_COMMAND_READY;
+	tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+} /* st33zp24_cancel() */
+
+/*
+ * st33zp24_status return the TPM_STS register
+ * @param: chip, the tpm chip description
+ * @return: the TPM_STS register value.
+ */
+static u8 st33zp24_status(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	u8 data;
+
+	tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
+	return data;
+} /* st33zp24_status() */
+
+/*
+ * check_locality if the locality is active
+ * @param: chip, the tpm chip description
+ * @return: true if LOCALITY0 is active, otherwise false
+ */
+static bool check_locality(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	u8 data;
+	u8 status;
+
+	status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+	if (status && (data &
+		(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+		(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+		return true;
+
+	return false;
+} /* check_locality() */
+
+/*
+ * request_locality request the TPM locality
+ * @param: chip, the chip description
+ * @return: the active locality or negative value.
+ */
+static int request_locality(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	unsigned long stop;
+	long ret;
+	u8 data;
+
+	if (check_locality(chip))
+		return tpm_dev->locality;
+
+	data = TPM_ACCESS_REQUEST_USE;
+	ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+	if (ret < 0)
+		return ret;
+
+	stop = jiffies + chip->timeout_a;
+
+	/* Request locality is usually effective after the request */
+	do {
+		if (check_locality(chip))
+			return tpm_dev->locality;
+		msleep(TPM_TIMEOUT);
+	} while (time_before(jiffies, stop));
+
+	/* could not get locality */
+	return -EACCES;
+} /* request_locality() */
+
+/*
+ * release_locality release the active locality
+ * @param: chip, the tpm chip description.
+ */
+static void release_locality(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	u8 data;
+
+	data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+	tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount value
+ * @param: chip, the chip description
+ * return: the burstcount or negative value.
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	unsigned long stop;
+	int burstcnt, status;
+	u8 temp;
+
+	stop = jiffies + chip->timeout_d;
+	do {
+		status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1,
+					    &temp, 1);
+		if (status < 0)
+			return -EBUSY;
+
+		burstcnt = temp;
+		status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2,
+					    &temp, 1);
+		if (status < 0)
+			return -EBUSY;
+
+		burstcnt |= temp << 8;
+		if (burstcnt)
+			return burstcnt;
+		msleep(TPM_TIMEOUT);
+	} while (time_before(jiffies, stop));
+	return -EBUSY;
+} /* get_burstcount() */
+
+
+/*
+ * wait_for_tpm_stat_cond
+ * @param: chip, chip description
+ * @param: mask, expected mask value
+ * @param: check_cancel, does the command expected to be canceled ?
+ * @param: canceled, did we received a cancel request ?
+ * @return: true if status == mask or if the command is canceled.
+ * false in other cases.
+ */
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+				bool check_cancel, bool *canceled)
+{
+	u8 status = chip->ops->status(chip);
+
+	*canceled = false;
+	if ((status & mask) == mask)
+		return true;
+	if (check_cancel && chip->ops->req_canceled(chip, status)) {
+		*canceled = true;
+		return true;
+	}
+	return false;
+}
+
+/*
+ * wait_for_stat wait for a TPM_STS value
+ * @param: chip, the tpm chip description
+ * @param: mask, the value mask to wait
+ * @param: timeout, the timeout
+ * @param: queue, the wait queue.
+ * @param: check_cancel, does the command can be cancelled ?
+ * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+			wait_queue_head_t *queue, bool check_cancel)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	unsigned long stop;
+	int ret = 0;
+	bool canceled = false;
+	bool condition;
+	u32 cur_intrs;
+	u8 status;
+
+	/* check current status */
+	status = st33zp24_status(chip);
+	if ((status & mask) == mask)
+		return 0;
+
+	stop = jiffies + timeout;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+		cur_intrs = tpm_dev->intrs;
+		clear_interruption(tpm_dev);
+		enable_irq(tpm_dev->irq);
+
+		do {
+			if (ret == -ERESTARTSYS && freezing(current))
+				clear_thread_flag(TIF_SIGPENDING);
+
+			timeout = stop - jiffies;
+			if ((long) timeout <= 0)
+				return -1;
+
+			ret = wait_event_interruptible_timeout(*queue,
+						cur_intrs != tpm_dev->intrs,
+						timeout);
+			clear_interruption(tpm_dev);
+			condition = wait_for_tpm_stat_cond(chip, mask,
+						check_cancel, &canceled);
+			if (ret >= 0 && condition) {
+				if (canceled)
+					return -ECANCELED;
+				return 0;
+			}
+		} while (ret == -ERESTARTSYS && freezing(current));
+
+		disable_irq_nosync(tpm_dev->irq);
+
+	} else {
+		do {
+			msleep(TPM_TIMEOUT);
+			status = chip->ops->status(chip);
+			if ((status & mask) == mask)
+				return 0;
+		} while (time_before(jiffies, stop));
+	}
+
+	return -ETIME;
+} /* wait_for_stat() */
+
+/*
+ * recv_data receive data
+ * @param: chip, the tpm chip description
+ * @param: buf, the buffer where the data are received
+ * @param: count, the number of data to receive
+ * @return: the number of bytes read from TPM FIFO.
+ */
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	int size = 0, burstcnt, len, ret;
+
+	while (size < count &&
+	       wait_for_stat(chip,
+			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+			     chip->timeout_c,
+			     &tpm_dev->read_queue, true) == 0) {
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0)
+			return burstcnt;
+		len = min_t(int, burstcnt, count - size);
+		ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO,
+					 buf + size, len);
+		if (ret < 0)
+			return ret;
+
+		size += len;
+	}
+	return size;
+}
+
+/*
+ * tpm_ioserirq_handler the serirq irq handler
+ * @param: irq, the tpm chip description
+ * @param: dev_id, the description of the chip
+ * @return: the status of the handler.
+ */
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+	struct tpm_chip *chip = dev_id;
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+	tpm_dev->intrs++;
+	wake_up_interruptible(&tpm_dev->read_queue);
+	disable_irq_nosync(tpm_dev->irq);
+
+	return IRQ_HANDLED;
+} /* tpm_ioserirq_handler() */
+
+/*
+ * st33zp24_send send TPM commands through the I2C bus.
+ *
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @param: buf,	the buffer to send.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes sent.
+ *			In other case, a < 0 value describing the issue.
+ */
+static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
+			 size_t len)
+{
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	u32 status, i, size, ordinal;
+	int burstcnt = 0;
+	int ret;
+	u8 data;
+
+	if (len < TPM_HEADER_SIZE)
+		return -EBUSY;
+
+	ret = request_locality(chip);
+	if (ret < 0)
+		return ret;
+
+	status = st33zp24_status(chip);
+	if ((status & TPM_STS_COMMAND_READY) == 0) {
+		st33zp24_cancel(chip);
+		if (wait_for_stat
+		    (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+		     &tpm_dev->read_queue, false) < 0) {
+			ret = -ETIME;
+			goto out_err;
+		}
+	}
+
+	for (i = 0; i < len - 1;) {
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0)
+			return burstcnt;
+		size = min_t(int, len - i - 1, burstcnt);
+		ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+					 buf + i, size);
+		if (ret < 0)
+			goto out_err;
+
+		i += size;
+	}
+
+	status = st33zp24_status(chip);
+	if ((status & TPM_STS_DATA_EXPECT) == 0) {
+		ret = -EIO;
+		goto out_err;
+	}
+
+	ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+				 buf + len - 1, 1);
+	if (ret < 0)
+		goto out_err;
+
+	status = st33zp24_status(chip);
+	if ((status & TPM_STS_DATA_EXPECT) != 0) {
+		ret = -EIO;
+		goto out_err;
+	}
+
+	data = TPM_STS_GO;
+	ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+	if (ret < 0)
+		goto out_err;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+		ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+				tpm_calc_ordinal_duration(chip, ordinal),
+				&tpm_dev->read_queue, false);
+		if (ret < 0)
+			goto out_err;
+	}
+
+	return len;
+out_err:
+	st33zp24_cancel(chip);
+	release_locality(chip);
+	return ret;
+}
+
+/*
+ * st33zp24_recv received TPM response through TPM phy.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: buf,	the buffer to store datas.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes received.
+ *	    In other case, a < 0 value describing the issue.
+ */
+static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+			    size_t count)
+{
+	int size = 0;
+	u32 expected;
+
+	if (!chip)
+		return -EBUSY;
+
+	if (count < TPM_HEADER_SIZE) {
+		size = -EIO;
+		goto out;
+	}
+
+	size = recv_data(chip, buf, TPM_HEADER_SIZE);
+	if (size < TPM_HEADER_SIZE) {
+		dev_err(&chip->dev, "Unable to read header\n");
+		goto out;
+	}
+
+	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+	if (expected > count || expected < TPM_HEADER_SIZE) {
+		size = -EIO;
+		goto out;
+	}
+
+	size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+			expected - TPM_HEADER_SIZE);
+	if (size < expected) {
+		dev_err(&chip->dev, "Unable to read remainder of result\n");
+		size = -ETIME;
+	}
+
+out:
+	st33zp24_cancel(chip);
+	release_locality(chip);
+	return size;
+}
+
+/*
+ * st33zp24_req_canceled
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: status, the TPM status.
+ * @return: Does TPM ready to compute a new command ? true.
+ */
+static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops st33zp24_tpm = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.send = st33zp24_send,
+	.recv = st33zp24_recv,
+	.cancel = st33zp24_cancel,
+	.status = st33zp24_status,
+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_canceled = st33zp24_req_canceled,
+};
+
+/*
+ * st33zp24_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ *	 -1 in other case.
+ */
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+		   struct device *dev, int irq, int io_lpcpd)
+{
+	int ret;
+	u8 intmask = 0;
+	struct tpm_chip *chip;
+	struct st33zp24_dev *tpm_dev;
+
+	chip = tpmm_chip_alloc(dev, &st33zp24_tpm);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev),
+			       GFP_KERNEL);
+	if (!tpm_dev)
+		return -ENOMEM;
+
+	tpm_dev->phy_id = phy_id;
+	tpm_dev->ops = ops;
+	dev_set_drvdata(&chip->dev, tpm_dev);
+
+	chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+	chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+	chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+	chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+	tpm_dev->locality = LOCALITY0;
+
+	if (irq) {
+		/* INTERRUPT Setup */
+		init_waitqueue_head(&tpm_dev->read_queue);
+		tpm_dev->intrs = 0;
+
+		if (request_locality(chip) != LOCALITY0) {
+			ret = -ENODEV;
+			goto _tpm_clean_answer;
+		}
+
+		clear_interruption(tpm_dev);
+		ret = devm_request_irq(dev, irq, tpm_ioserirq_handler,
+				IRQF_TRIGGER_HIGH, "TPM SERIRQ management",
+				chip);
+		if (ret < 0) {
+			dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n",
+				irq);
+			goto _tpm_clean_answer;
+		}
+
+		intmask |= TPM_INTF_CMD_READY_INT
+			|  TPM_INTF_STS_VALID_INT
+			|  TPM_INTF_DATA_AVAIL_INT;
+
+		ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE,
+					 &intmask, 1);
+		if (ret < 0)
+			goto _tpm_clean_answer;
+
+		intmask = TPM_GLOBAL_INT_ENABLE;
+		ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3),
+					 &intmask, 1);
+		if (ret < 0)
+			goto _tpm_clean_answer;
+
+		tpm_dev->irq = irq;
+		chip->flags |= TPM_CHIP_FLAG_IRQ;
+
+		disable_irq_nosync(tpm_dev->irq);
+	}
+
+	return tpm_chip_register(chip);
+_tpm_clean_answer:
+	dev_info(&chip->dev, "TPM initialization fail\n");
+	return ret;
+}
+EXPORT_SYMBOL(st33zp24_probe);
+
+/*
+ * st33zp24_remove remove the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @return: 0 in case of success.
+ */
+int st33zp24_remove(struct tpm_chip *chip)
+{
+	tpm_chip_unregister(chip);
+	return 0;
+}
+EXPORT_SYMBOL(st33zp24_remove);
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * st33zp24_pm_suspend suspend the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @param: mesg, the power management message.
+ * @return: 0 in case of success.
+ */
+int st33zp24_pm_suspend(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+	int ret = 0;
+
+	if (gpio_is_valid(tpm_dev->io_lpcpd))
+		gpio_set_value(tpm_dev->io_lpcpd, 0);
+	else
+		ret = tpm_pm_suspend(dev);
+
+	return ret;
+} /* st33zp24_pm_suspend() */
+EXPORT_SYMBOL(st33zp24_pm_suspend);
+
+/*
+ * st33zp24_pm_resume resume the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @return: 0 in case of success.
+ */
+int st33zp24_pm_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+	int ret = 0;
+
+	if (gpio_is_valid(tpm_dev->io_lpcpd)) {
+		gpio_set_value(tpm_dev->io_lpcpd, 1);
+		ret = wait_for_stat(chip,
+				TPM_STS_VALID, chip->timeout_b,
+				&tpm_dev->read_queue, false);
+	} else {
+		ret = tpm_pm_resume(dev);
+		if (!ret)
+			tpm_do_selftest(chip);
+	}
+	return ret;
+} /* st33zp24_pm_resume() */
+EXPORT_SYMBOL(st33zp24_pm_resume);
+#endif
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
new file mode 100644
index 0000000..6f4a419
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -0,0 +1,49 @@
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016  STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_ST33ZP24_H__
+#define __LOCAL_ST33ZP24_H__
+
+#define TPM_WRITE_DIRECTION             0x80
+#define TPM_BUFSIZE                     2048
+
+struct st33zp24_dev {
+	struct tpm_chip *chip;
+	void *phy_id;
+	const struct st33zp24_phy_ops *ops;
+	int locality;
+	int irq;
+	u32 intrs;
+	int io_lpcpd;
+	wait_queue_head_t read_queue;
+};
+
+
+struct st33zp24_phy_ops {
+	int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+	int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+};
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev);
+int st33zp24_pm_resume(struct device *dev);
+#endif
+
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+		   struct device *dev, int irq, int io_lpcpd);
+int st33zp24_remove(struct tpm_chip *chip);
+#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
new file mode 100644
index 0000000..46caadc
--- /dev/null
+++ b/drivers/char/tpm/tpm-chip.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * TPM chip management routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
+#include "tpm.h"
+
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
+
+struct class *tpm_class;
+struct class *tpmrm_class;
+dev_t tpm_devt;
+
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
+ */
+int tpm_try_get_ops(struct tpm_chip *chip)
+{
+	int rc = -EIO;
+
+	get_device(&chip->dev);
+
+	down_read(&chip->ops_sem);
+	if (!chip->ops)
+		goto out_lock;
+
+	return 0;
+out_lock:
+	up_read(&chip->ops_sem);
+	put_device(&chip->dev);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+	up_read(&chip->ops_sem);
+	put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+	struct tpm_chip *chip, *res = NULL;
+	int chip_num = 0;
+	int chip_prev;
+
+	mutex_lock(&idr_lock);
+
+	do {
+		chip_prev = chip_num;
+		chip = idr_get_next(&dev_nums_idr, &chip_num);
+		if (chip) {
+			get_device(&chip->dev);
+			res = chip;
+			break;
+		}
+	} while (chip_prev != chip_num);
+
+	mutex_unlock(&idr_lock);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
+ */
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
+{
+	int rc;
+
+	if (chip) {
+		if (!tpm_try_get_ops(chip))
+			return chip;
+		return NULL;
+	}
+
+	chip = tpm_default_chip();
+	if (!chip)
+		return NULL;
+	rc = tpm_try_get_ops(chip);
+	/* release additional reference we got from tpm_default_chip() */
+	put_device(&chip->dev);
+	if (rc)
+		return NULL;
+	return chip;
+}
+
+/**
+ * tpm_dev_release() - free chip memory and the device number
+ * @dev: the character device for the TPM chip
+ *
+ * This is used as the release function for the character device.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+	struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+	mutex_lock(&idr_lock);
+	idr_remove(&dev_nums_idr, chip->dev_num);
+	mutex_unlock(&idr_lock);
+
+	kfree(chip->log.bios_event_log);
+	kfree(chip->work_space.context_buf);
+	kfree(chip->work_space.session_buf);
+	kfree(chip);
+}
+
+static void tpm_devs_release(struct device *dev)
+{
+	struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+	/* release the master device reference */
+	put_device(&chip->dev);
+}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+	struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		down_write(&chip->ops_sem);
+		tpm2_shutdown(chip, TPM2_SU_CLEAR);
+		chip->ops = NULL;
+		up_write(&chip->ops_sem);
+	}
+
+	return 0;
+}
+
+/**
+ * tpm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: device to which the chip is associated
+ *        At this point pdev mst be initialized, but does not have to
+ *        be registered
+ * @ops: struct tpm_class_ops instance
+ *
+ * Allocates a new struct tpm_chip instance and assigns a free
+ * device number for it. Must be paired with put_device(&chip->dev).
+ */
+struct tpm_chip *tpm_chip_alloc(struct device *pdev,
+				const struct tpm_class_ops *ops)
+{
+	struct tpm_chip *chip;
+	int rc;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&chip->tpm_mutex);
+	init_rwsem(&chip->ops_sem);
+
+	chip->ops = ops;
+
+	mutex_lock(&idr_lock);
+	rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+	mutex_unlock(&idr_lock);
+	if (rc < 0) {
+		dev_err(pdev, "No available tpm device numbers\n");
+		kfree(chip);
+		return ERR_PTR(rc);
+	}
+	chip->dev_num = rc;
+
+	device_initialize(&chip->dev);
+	device_initialize(&chip->devs);
+
+	chip->dev.class = tpm_class;
+	chip->dev.class->shutdown_pre = tpm_class_shutdown;
+	chip->dev.release = tpm_dev_release;
+	chip->dev.parent = pdev;
+	chip->dev.groups = chip->groups;
+
+	chip->devs.parent = pdev;
+	chip->devs.class = tpmrm_class;
+	chip->devs.release = tpm_devs_release;
+	/* get extra reference on main device to hold on
+	 * behalf of devs.  This holds the chip structure
+	 * while cdevs is in use.  The corresponding put
+	 * is in the tpm_devs_release (TPM2 only)
+	 */
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		get_device(&chip->dev);
+
+	if (chip->dev_num == 0)
+		chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
+	else
+		chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
+
+	chip->devs.devt =
+		MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+
+	rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
+	if (rc)
+		goto out;
+	rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+	if (rc)
+		goto out;
+
+	if (!pdev)
+		chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
+
+	cdev_init(&chip->cdev, &tpm_fops);
+	cdev_init(&chip->cdevs, &tpmrm_fops);
+	chip->cdev.owner = THIS_MODULE;
+	chip->cdevs.owner = THIS_MODULE;
+
+	chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!chip->work_space.context_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!chip->work_space.session_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	chip->locality = -1;
+	return chip;
+
+out:
+	put_device(&chip->devs);
+	put_device(&chip->dev);
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_alloc);
+
+/**
+ * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: parent device to which the chip is associated
+ * @ops: struct tpm_class_ops instance
+ *
+ * Same as tpm_chip_alloc except devm is used to do the put_device
+ */
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+				 const struct tpm_class_ops *ops)
+{
+	struct tpm_chip *chip;
+	int rc;
+
+	chip = tpm_chip_alloc(pdev, ops);
+	if (IS_ERR(chip))
+		return chip;
+
+	rc = devm_add_action_or_reset(pdev,
+				      (void (*)(void *)) put_device,
+				      &chip->dev);
+	if (rc)
+		return ERR_PTR(rc);
+
+	dev_set_drvdata(pdev, chip);
+
+	return chip;
+}
+EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+
+static int tpm_add_char_device(struct tpm_chip *chip)
+{
+	int rc;
+
+	rc = cdev_device_add(&chip->cdev, &chip->dev);
+	if (rc) {
+		dev_err(&chip->dev,
+			"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+			dev_name(&chip->dev), MAJOR(chip->dev.devt),
+			MINOR(chip->dev.devt), rc);
+		return rc;
+	}
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		rc = cdev_device_add(&chip->cdevs, &chip->devs);
+		if (rc) {
+			dev_err(&chip->devs,
+				"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+				dev_name(&chip->devs), MAJOR(chip->devs.devt),
+				MINOR(chip->devs.devt), rc);
+			return rc;
+		}
+	}
+
+	/* Make the chip available. */
+	mutex_lock(&idr_lock);
+	idr_replace(&dev_nums_idr, chip, chip->dev_num);
+	mutex_unlock(&idr_lock);
+
+	return rc;
+}
+
+static void tpm_del_char_device(struct tpm_chip *chip)
+{
+	cdev_device_del(&chip->cdev, &chip->dev);
+
+	/* Make the chip unavailable. */
+	mutex_lock(&idr_lock);
+	idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+	mutex_unlock(&idr_lock);
+
+	/* Make the driver uncallable. */
+	down_write(&chip->ops_sem);
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		tpm2_shutdown(chip, TPM2_SU_CLEAR);
+	chip->ops = NULL;
+	up_write(&chip->ops_sem);
+}
+
+static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
+{
+	struct attribute **i;
+
+	if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+		return;
+
+	sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
+
+	for (i = chip->groups[0]->attrs; *i != NULL; ++i)
+		sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name);
+}
+
+/* For compatibility with legacy sysfs paths we provide symlinks from the
+ * parent dev directory to selected names within the tpm chip directory. Old
+ * kernel versions created these files directly under the parent.
+ */
+static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+{
+	struct attribute **i;
+	int rc;
+
+	if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+		return 0;
+
+	rc = __compat_only_sysfs_link_entry_to_kobj(
+		    &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
+	if (rc && rc != -ENOENT)
+		return rc;
+
+	/* All the names from tpm-sysfs */
+	for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
+		rc = __compat_only_sysfs_link_entry_to_kobj(
+		    &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name);
+		if (rc) {
+			tpm_del_legacy_sysfs(chip);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+	return tpm_get_random(chip, data, max);
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+		return 0;
+
+	snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+		 "tpm-rng-%d", chip->dev_num);
+	chip->hwrng.name = chip->hwrng_name;
+	chip->hwrng.read = tpm_hwrng_read;
+	return hwrng_register(&chip->hwrng);
+}
+
+/*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+ *
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
+ *
+ * This function should be only called after the chip initialization is
+ * complete.
+ */
+int tpm_chip_register(struct tpm_chip *chip)
+{
+	int rc;
+
+	if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) {
+		if (chip->flags & TPM_CHIP_FLAG_TPM2)
+			rc = tpm2_auto_startup(chip);
+		else
+			rc = tpm1_auto_startup(chip);
+		if (rc)
+			return rc;
+	}
+
+	tpm_sysfs_add_device(chip);
+
+	rc = tpm_bios_log_setup(chip);
+	if (rc != 0 && rc != -ENODEV)
+		return rc;
+
+	tpm_add_ppi(chip);
+
+	rc = tpm_add_hwrng(chip);
+	if (rc)
+		goto out_ppi;
+
+	rc = tpm_add_char_device(chip);
+	if (rc)
+		goto out_hwrng;
+
+	rc = tpm_add_legacy_sysfs(chip);
+	if (rc) {
+		tpm_chip_unregister(chip);
+		return rc;
+	}
+
+	return 0;
+
+out_hwrng:
+	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+		hwrng_unregister(&chip->hwrng);
+out_ppi:
+	tpm_bios_log_teardown(chip);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_register);
+
+/*
+ * tpm_chip_unregister() - release the TPM driver
+ * @chip: TPM chip to use.
+ *
+ * Takes the chip first away from the list of available TPM chips and then
+ * cleans up all the resources reserved by tpm_chip_register().
+ *
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
+ * NOTE: This function should be only called before deinitializing chip
+ * resources.
+ */
+void tpm_chip_unregister(struct tpm_chip *chip)
+{
+	tpm_del_legacy_sysfs(chip);
+	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+		hwrng_unregister(&chip->hwrng);
+	tpm_bios_log_teardown(chip);
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		cdev_device_del(&chip->cdevs, &chip->devs);
+	tpm_del_char_device(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
new file mode 100644
index 0000000..e4a04b2
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "tpm.h"
+#include "tpm-dev.h"
+
+static void user_reader_timeout(struct timer_list *t)
+{
+	struct file_priv *priv = from_timer(priv, t, user_read_timer);
+
+	pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
+		task_tgid_nr(current));
+
+	schedule_work(&priv->work);
+}
+
+static void timeout_work(struct work_struct *work)
+{
+	struct file_priv *priv = container_of(work, struct file_priv, work);
+
+	mutex_lock(&priv->buffer_mutex);
+	priv->data_pending = 0;
+	memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+	mutex_unlock(&priv->buffer_mutex);
+}
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+		     struct file_priv *priv)
+{
+	priv->chip = chip;
+	mutex_init(&priv->buffer_mutex);
+	timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
+	INIT_WORK(&priv->work, timeout_work);
+
+	file->private_data = priv;
+}
+
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+			size_t size, loff_t *off)
+{
+	struct file_priv *priv = file->private_data;
+	ssize_t ret_size = 0;
+	int rc;
+
+	del_singleshot_timer_sync(&priv->user_read_timer);
+	flush_work(&priv->work);
+	mutex_lock(&priv->buffer_mutex);
+
+	if (priv->data_pending) {
+		ret_size = min_t(ssize_t, size, priv->data_pending);
+		rc = copy_to_user(buf, priv->data_buffer, ret_size);
+		memset(priv->data_buffer, 0, priv->data_pending);
+		if (rc)
+			ret_size = -EFAULT;
+
+		priv->data_pending = 0;
+	}
+
+	mutex_unlock(&priv->buffer_mutex);
+	return ret_size;
+}
+
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+			 size_t size, loff_t *off, struct tpm_space *space)
+{
+	struct file_priv *priv = file->private_data;
+	size_t in_size = size;
+	ssize_t out_size;
+
+	if (in_size > TPM_BUFSIZE)
+		return -E2BIG;
+
+	mutex_lock(&priv->buffer_mutex);
+
+	/* Cannot perform a write until the read has cleared either via
+	 * tpm_read or a user_read_timer timeout. This also prevents split
+	 * buffered writes from blocking here.
+	 */
+	if (priv->data_pending != 0) {
+		mutex_unlock(&priv->buffer_mutex);
+		return -EBUSY;
+	}
+
+	if (copy_from_user
+	    (priv->data_buffer, (void __user *) buf, in_size)) {
+		mutex_unlock(&priv->buffer_mutex);
+		return -EFAULT;
+	}
+
+	if (in_size < 6 ||
+	    in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+		mutex_unlock(&priv->buffer_mutex);
+		return -EINVAL;
+	}
+
+	/* atomic tpm command send and result receive. We only hold the ops
+	 * lock during this period so that the tpm can be unregistered even if
+	 * the char dev is held open.
+	 */
+	if (tpm_try_get_ops(priv->chip)) {
+		mutex_unlock(&priv->buffer_mutex);
+		return -EPIPE;
+	}
+	out_size = tpm_transmit(priv->chip, space, priv->data_buffer,
+				sizeof(priv->data_buffer), 0);
+
+	tpm_put_ops(priv->chip);
+	if (out_size < 0) {
+		mutex_unlock(&priv->buffer_mutex);
+		return out_size;
+	}
+
+	priv->data_pending = out_size;
+	mutex_unlock(&priv->buffer_mutex);
+
+	/* Set a timeout by which the reader must come claim the result */
+	mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+
+	return in_size;
+}
+
+/*
+ * Called on file close
+ */
+void tpm_common_release(struct file *file, struct file_priv *priv)
+{
+	del_singleshot_timer_sync(&priv->user_read_timer);
+	flush_work(&priv->work);
+	file->private_data = NULL;
+	priv->data_pending = 0;
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
new file mode 100644
index 0000000..ebd74ab
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+static int tpm_open(struct inode *inode, struct file *file)
+{
+	struct tpm_chip *chip;
+	struct file_priv *priv;
+
+	chip = container_of(inode->i_cdev, struct tpm_chip, cdev);
+
+	/* It's assured that the chip will be opened just once,
+	 * by the check of is_open variable, which is protected
+	 * by driver_lock. */
+	if (test_and_set_bit(0, &chip->is_open)) {
+		dev_dbg(&chip->dev, "Another process owns this TPM\n");
+		return -EBUSY;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL)
+		goto out;
+
+	tpm_common_open(file, chip, priv);
+
+	return 0;
+
+ out:
+	clear_bit(0, &chip->is_open);
+	return -ENOMEM;
+}
+
+static ssize_t tpm_write(struct file *file, const char __user *buf,
+			 size_t size, loff_t *off)
+{
+	return tpm_common_write(file, buf, size, off, NULL);
+}
+
+/*
+ * Called on file close
+ */
+static int tpm_release(struct inode *inode, struct file *file)
+{
+	struct file_priv *priv = file->private_data;
+
+	tpm_common_release(file, priv);
+	clear_bit(0, &priv->chip->is_open);
+	kfree(priv);
+
+	return 0;
+}
+
+const struct file_operations tpm_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.open = tpm_open,
+	.read = tpm_common_read,
+	.write = tpm_write,
+	.release = tpm_release,
+};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
new file mode 100644
index 0000000..b24cfb4
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TPM_DEV_H
+#define _TPM_DEV_H
+
+#include "tpm.h"
+
+struct file_priv {
+	struct tpm_chip *chip;
+
+	/* Data passed to and from the tpm via the read/write calls */
+	size_t data_pending;
+	struct mutex buffer_mutex;
+
+	struct timer_list user_read_timer;      /* user needs to claim result */
+	struct work_struct work;
+
+	u8 data_buffer[TPM_BUFSIZE];
+};
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+		     struct file_priv *priv);
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+			size_t size, loff_t *off);
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+			 size_t size, loff_t *off, struct tpm_space *space);
+void tpm_common_release(struct file *file, struct file_priv *priv);
+
+#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
new file mode 100644
index 0000000..7d958ff
--- /dev/null
+++ b/drivers/char/tpm/tpm-interface.c
@@ -0,0 +1,1441 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Note, the TPM chip is not interrupt driven (only polling)
+ * and can have very long timeouts (minutes!). Hence the unusual
+ * calls to msleep.
+ *
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+#define TPM_MAX_ORDINAL 243
+#define TSC_MAX_ORDINAL 12
+#define TPM_PROTECTED_COMMAND 0x00
+#define TPM_CONNECTION_COMMAND 0x40
+
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+		 "PCR to use for dummy writes to facilitate flush on suspend.");
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result.  The ordinal
+ * designation of short, medium or long is defined in a table in
+ * TCG Specification TPM Main Part 2 TPM Structures Section 17. The
+ * values of the SHORT, MEDIUM, and LONG durations are retrieved
+ * from the chip during initialization with a call to tpm_get_timeouts.
+ */
+static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
+	TPM_UNDEFINED,		/* 0 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 5 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 10 */
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_LONG,
+	TPM_LONG,
+	TPM_MEDIUM,		/* 15 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_LONG,
+	TPM_SHORT,		/* 20 */
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_SHORT,		/* 25 */
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_MEDIUM,		/* 30 */
+	TPM_LONG,
+	TPM_MEDIUM,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,		/* 35 */
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_MEDIUM,		/* 40 */
+	TPM_LONG,
+	TPM_MEDIUM,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,		/* 45 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_LONG,
+	TPM_MEDIUM,		/* 50 */
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 55 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_MEDIUM,		/* 60 */
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_MEDIUM,		/* 65 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 70 */
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 75 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_LONG,		/* 80 */
+	TPM_UNDEFINED,
+	TPM_MEDIUM,
+	TPM_LONG,
+	TPM_SHORT,
+	TPM_UNDEFINED,		/* 85 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 90 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,		/* 95 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_MEDIUM,		/* 100 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 105 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 110 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,		/* 115 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_LONG,		/* 120 */
+	TPM_LONG,
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_SHORT,
+	TPM_SHORT,		/* 125 */
+	TPM_SHORT,
+	TPM_LONG,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,		/* 130 */
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_UNDEFINED,		/* 135 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 140 */
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 145 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 150 */
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,		/* 155 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 160 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 165 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_LONG,		/* 170 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 175 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_MEDIUM,		/* 180 */
+	TPM_SHORT,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_MEDIUM,		/* 185 */
+	TPM_SHORT,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 190 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 195 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 200 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,
+	TPM_SHORT,		/* 205 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_MEDIUM,		/* 210 */
+	TPM_UNDEFINED,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_MEDIUM,
+	TPM_UNDEFINED,		/* 215 */
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,
+	TPM_SHORT,		/* 220 */
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_SHORT,
+	TPM_UNDEFINED,		/* 225 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 230 */
+	TPM_LONG,
+	TPM_MEDIUM,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,		/* 235 */
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_UNDEFINED,
+	TPM_SHORT,		/* 240 */
+	TPM_UNDEFINED,
+	TPM_MEDIUM,
+};
+
+/*
+ * Returns max number of jiffies to wait
+ */
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
+					   u32 ordinal)
+{
+	int duration_idx = TPM_UNDEFINED;
+	int duration = 0;
+
+	/*
+	 * We only have a duration table for protected commands, where the upper
+	 * 16 bits are 0. For the few other ordinals the fallback will be used.
+	 */
+	if (ordinal < TPM_MAX_ORDINAL)
+		duration_idx = tpm_ordinal_duration[ordinal];
+
+	if (duration_idx != TPM_UNDEFINED)
+		duration = chip->duration[duration_idx];
+	if (duration <= 0)
+		return 2 * 60 * HZ;
+	else
+		return duration;
+}
+EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+
+static int tpm_validate_command(struct tpm_chip *chip,
+				 struct tpm_space *space,
+				 const u8 *cmd,
+				 size_t len)
+{
+	const struct tpm_input_header *header = (const void *)cmd;
+	int i;
+	u32 cc;
+	u32 attrs;
+	unsigned int nr_handles;
+
+	if (len < TPM_HEADER_SIZE)
+		return -EINVAL;
+
+	if (!space)
+		return 0;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
+		cc = be32_to_cpu(header->ordinal);
+
+		i = tpm2_find_cc(chip, cc);
+		if (i < 0) {
+			dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
+				cc);
+			return -EOPNOTSUPP;
+		}
+
+		attrs = chip->cc_attrs_tbl[i];
+		nr_handles =
+			4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0));
+		if (len < TPM_HEADER_SIZE + 4 * nr_handles)
+			goto err_len;
+	}
+
+	return 0;
+err_len:
+	dev_dbg(&chip->dev,
+		"%s: insufficient command length %zu", __func__, len);
+	return -EINVAL;
+}
+
+static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
+{
+	int rc;
+
+	if (flags & TPM_TRANSMIT_NESTED)
+		return 0;
+
+	if (!chip->ops->request_locality)
+		return 0;
+
+	rc = chip->ops->request_locality(chip, 0);
+	if (rc < 0)
+		return rc;
+
+	chip->locality = rc;
+
+	return 0;
+}
+
+static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
+{
+	int rc;
+
+	if (flags & TPM_TRANSMIT_NESTED)
+		return;
+
+	if (!chip->ops->relinquish_locality)
+		return;
+
+	rc = chip->ops->relinquish_locality(chip, chip->locality);
+	if (rc)
+		dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
+
+	chip->locality = -1;
+}
+
+static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
+{
+	if (flags & TPM_TRANSMIT_NESTED)
+		return 0;
+
+	if (!chip->ops->cmd_ready)
+		return 0;
+
+	return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
+{
+	if (flags & TPM_TRANSMIT_NESTED)
+		return 0;
+
+	if (!chip->ops->go_idle)
+		return 0;
+
+	return chip->ops->go_idle(chip);
+}
+
+static ssize_t tpm_try_transmit(struct tpm_chip *chip,
+				struct tpm_space *space,
+				u8 *buf, size_t bufsiz,
+				unsigned int flags)
+{
+	struct tpm_output_header *header = (void *)buf;
+	int rc;
+	ssize_t len = 0;
+	u32 count, ordinal;
+	unsigned long stop;
+	bool need_locality;
+
+	rc = tpm_validate_command(chip, space, buf, bufsiz);
+	if (rc == -EINVAL)
+		return rc;
+	/*
+	 * If the command is not implemented by the TPM, synthesize a
+	 * response with a TPM2_RC_COMMAND_CODE return for user-space.
+	 */
+	if (rc == -EOPNOTSUPP) {
+		header->length = cpu_to_be32(sizeof(*header));
+		header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+		header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+						  TSS2_RESMGR_TPM_RC_LAYER);
+		return sizeof(*header);
+	}
+
+	if (bufsiz > TPM_BUFSIZE)
+		bufsiz = TPM_BUFSIZE;
+
+	count = be32_to_cpu(*((__be32 *) (buf + 2)));
+	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+	if (count == 0)
+		return -ENODATA;
+	if (count > bufsiz) {
+		dev_err(&chip->dev,
+			"invalid count value %x %zx\n", count, bufsiz);
+		return -E2BIG;
+	}
+
+	if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
+		mutex_lock(&chip->tpm_mutex);
+
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, true);
+
+	/* Store the decision as chip->locality will be changed. */
+	need_locality = chip->locality == -1;
+
+	if (need_locality) {
+		rc = tpm_request_locality(chip, flags);
+		if (rc < 0)
+			goto out_no_locality;
+	}
+
+	rc = tpm_cmd_ready(chip, flags);
+	if (rc)
+		goto out;
+
+	rc = tpm2_prepare_space(chip, space, ordinal, buf);
+	if (rc)
+		goto out;
+
+	rc = chip->ops->send(chip, buf, count);
+	if (rc < 0) {
+		if (rc != -EPIPE)
+			dev_err(&chip->dev,
+				"%s: tpm_send: error %d\n", __func__, rc);
+		goto out;
+	}
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ)
+		goto out_recv;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		stop = jiffies + tpm2_calc_ordinal_duration(chip, ordinal);
+	else
+		stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
+	do {
+		u8 status = chip->ops->status(chip);
+		if ((status & chip->ops->req_complete_mask) ==
+		    chip->ops->req_complete_val)
+			goto out_recv;
+
+		if (chip->ops->req_canceled(chip, status)) {
+			dev_err(&chip->dev, "Operation Canceled\n");
+			rc = -ECANCELED;
+			goto out;
+		}
+
+		tpm_msleep(TPM_TIMEOUT_POLL);
+		rmb();
+	} while (time_before(jiffies, stop));
+
+	chip->ops->cancel(chip);
+	dev_err(&chip->dev, "Operation Timed out\n");
+	rc = -ETIME;
+	goto out;
+
+out_recv:
+	len = chip->ops->recv(chip, buf, bufsiz);
+	if (len < 0) {
+		rc = len;
+		dev_err(&chip->dev,
+			"tpm_transmit: tpm_recv: error %d\n", rc);
+		goto out;
+	} else if (len < TPM_HEADER_SIZE) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	if (len != be32_to_cpu(header->length)) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
+	if (rc)
+		dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
+
+out:
+	rc = tpm_go_idle(chip, flags);
+	if (rc)
+		goto out;
+
+	if (need_locality)
+		tpm_relinquish_locality(chip, flags);
+
+out_no_locality:
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, false);
+
+	if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
+		mutex_unlock(&chip->tpm_mutex);
+	return rc ? rc : len;
+}
+
+/**
+ * tpm_transmit - Internal kernel interface to transmit TPM commands.
+ *
+ * @chip: TPM chip to use
+ * @space: tpm space
+ * @buf: TPM command buffer
+ * @bufsiz: length of the TPM command buffer
+ * @flags: tpm transmit flags - bitmap
+ *
+ * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY
+ * returns from the TPM and retransmits the command after a delay up
+ * to a maximum wait of TPM2_DURATION_LONG.
+ *
+ * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2
+ * only
+ *
+ * Return:
+ *     the length of the return when the operation is successful.
+ *     A negative number for system errors (errno).
+ */
+ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+		     u8 *buf, size_t bufsiz, unsigned int flags)
+{
+	struct tpm_output_header *header = (struct tpm_output_header *)buf;
+	/* space for header and handles */
+	u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
+	unsigned int delay_msec = TPM2_DURATION_SHORT;
+	u32 rc = 0;
+	ssize_t ret;
+	const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE,
+				     bufsiz);
+	/* the command code is where the return code will be */
+	u32 cc = be32_to_cpu(header->return_code);
+
+	/*
+	 * Subtlety here: if we have a space, the handles will be
+	 * transformed, so when we restore the header we also have to
+	 * restore the handles.
+	 */
+	memcpy(save, buf, save_size);
+
+	for (;;) {
+		ret = tpm_try_transmit(chip, space, buf, bufsiz, flags);
+		if (ret < 0)
+			break;
+		rc = be32_to_cpu(header->return_code);
+		if (rc != TPM2_RC_RETRY && rc != TPM2_RC_TESTING)
+			break;
+		/*
+		 * return immediately if self test returns test
+		 * still running to shorten boot time.
+		 */
+		if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST)
+			break;
+
+		if (delay_msec > TPM2_DURATION_LONG) {
+			if (rc == TPM2_RC_RETRY)
+				dev_err(&chip->dev, "in retry loop\n");
+			else
+				dev_err(&chip->dev,
+					"self test is still running\n");
+			break;
+		}
+		tpm_msleep(delay_msec);
+		delay_msec *= 2;
+		memcpy(buf, save, save_size);
+	}
+	return ret;
+}
+/**
+ * tpm_transmit_cmd - send a tpm command to the device
+ *    The function extracts tpm out header return code
+ *
+ * @chip: TPM chip to use
+ * @space: tpm space
+ * @buf: TPM command buffer
+ * @bufsiz: length of the buffer
+ * @min_rsp_body_length: minimum expected length of response body
+ * @flags: tpm transmit flags - bitmap
+ * @desc: command description used in the error message
+ *
+ * Return:
+ *     0 when the operation is successful.
+ *     A negative number for system errors (errno).
+ *     A positive number for a TPM error.
+ */
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
+			 void *buf, size_t bufsiz,
+			 size_t min_rsp_body_length, unsigned int flags,
+			 const char *desc)
+{
+	const struct tpm_output_header *header = buf;
+	int err;
+	ssize_t len;
+
+	len = tpm_transmit(chip, space, buf, bufsiz, flags);
+	if (len <  0)
+		return len;
+
+	err = be32_to_cpu(header->return_code);
+	if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+	    && desc)
+		dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
+			desc);
+	if (err)
+		return err;
+
+	if (len < min_rsp_body_length + TPM_HEADER_SIZE)
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
+
+#define TPM_ORD_STARTUP 153
+#define TPM_ST_CLEAR 1
+
+/**
+ * tpm_startup - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+int tpm_startup(struct tpm_chip *chip)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	dev_info(&chip->dev, "starting up the TPM manually\n");
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP);
+		if (rc < 0)
+			return rc;
+
+		tpm_buf_append_u16(&buf, TPM2_SU_CLEAR);
+	} else {
+		rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP);
+		if (rc < 0)
+			return rc;
+
+		tpm_buf_append_u16(&buf, TPM_ST_CLEAR);
+	}
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+			      "attempting to start the TPM");
+
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+
+#define TPM_DIGEST_SIZE 20
+#define TPM_RET_CODE_IDX 6
+#define TPM_INTERNAL_RESULT_SIZE 200
+#define TPM_ORD_GET_CAP 101
+#define TPM_ORD_GET_RANDOM 70
+
+static const struct tpm_input_header tpm_getcap_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(22),
+	.ordinal = cpu_to_be32(TPM_ORD_GET_CAP)
+};
+
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+		   const char *desc, size_t min_cap_length)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP);
+	if (rc)
+		return rc;
+
+	if (subcap_id == TPM_CAP_VERSION_1_1 ||
+	    subcap_id == TPM_CAP_VERSION_1_2) {
+		tpm_buf_append_u32(&buf, subcap_id);
+		tpm_buf_append_u32(&buf, 0);
+	} else {
+		if (subcap_id == TPM_CAP_FLAG_PERM ||
+		    subcap_id == TPM_CAP_FLAG_VOL)
+			tpm_buf_append_u32(&buf, TPM_CAP_FLAG);
+		else
+			tpm_buf_append_u32(&buf, TPM_CAP_PROP);
+
+		tpm_buf_append_u32(&buf, 4);
+		tpm_buf_append_u32(&buf, subcap_id);
+	}
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
+			      min_cap_length, 0, desc);
+	if (!rc)
+		*cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4];
+
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_getcap);
+
+int tpm_get_timeouts(struct tpm_chip *chip)
+{
+	cap_t cap;
+	unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+	ssize_t rc;
+
+	if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+		return 0;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		/* Fixed timeouts for TPM2 */
+		chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+		chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+		chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+		chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+		chip->duration[TPM_SHORT] =
+		    msecs_to_jiffies(TPM2_DURATION_SHORT);
+		chip->duration[TPM_MEDIUM] =
+		    msecs_to_jiffies(TPM2_DURATION_MEDIUM);
+		chip->duration[TPM_LONG] =
+		    msecs_to_jiffies(TPM2_DURATION_LONG);
+		chip->duration[TPM_LONG_LONG] =
+		    msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
+
+		chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+		return 0;
+	}
+
+	rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
+			sizeof(cap.timeout));
+	if (rc == TPM_ERR_INVALID_POSTINIT) {
+		if (tpm_startup(chip))
+			return rc;
+
+		rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+				"attempting to determine the timeouts",
+				sizeof(cap.timeout));
+	}
+
+	if (rc) {
+		dev_err(&chip->dev,
+			"A TPM error (%zd) occurred attempting to determine the timeouts\n",
+			rc);
+		return rc;
+	}
+
+	timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+	timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+	timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+	timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+	timeout_chip[0] = be32_to_cpu(cap.timeout.a);
+	timeout_chip[1] = be32_to_cpu(cap.timeout.b);
+	timeout_chip[2] = be32_to_cpu(cap.timeout.c);
+	timeout_chip[3] = be32_to_cpu(cap.timeout.d);
+	memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
+
+	/*
+	 * Provide ability for vendor overrides of timeout values in case
+	 * of misreporting.
+	 */
+	if (chip->ops->update_timeouts != NULL)
+		chip->timeout_adjusted =
+			chip->ops->update_timeouts(chip, timeout_eff);
+
+	if (!chip->timeout_adjusted) {
+		/* Restore default if chip reported 0 */
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+			if (timeout_eff[i])
+				continue;
+
+			timeout_eff[i] = timeout_old[i];
+			chip->timeout_adjusted = true;
+		}
+
+		if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
+			/* timeouts in msec rather usec */
+			for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+				timeout_eff[i] *= 1000;
+			chip->timeout_adjusted = true;
+		}
+	}
+
+	/* Report adjusted timeouts */
+	if (chip->timeout_adjusted) {
+		dev_info(&chip->dev,
+			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+			 timeout_chip[0], timeout_eff[0],
+			 timeout_chip[1], timeout_eff[1],
+			 timeout_chip[2], timeout_eff[2],
+			 timeout_chip[3], timeout_eff[3]);
+	}
+
+	chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+	chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+	chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+	chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
+
+	rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+			"attempting to determine the durations",
+			sizeof(cap.duration));
+	if (rc)
+		return rc;
+
+	chip->duration[TPM_SHORT] =
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
+	chip->duration[TPM_MEDIUM] =
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
+	chip->duration[TPM_LONG] =
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
+	chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+
+	/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
+	 * value wrong and apparently reports msecs rather than usecs. So we
+	 * fix up the resulting too-small TPM_SHORT value to make things work.
+	 * We also scale the TPM_MEDIUM and -_LONG values by 1000.
+	 */
+	if (chip->duration[TPM_SHORT] < (HZ / 100)) {
+		chip->duration[TPM_SHORT] = HZ;
+		chip->duration[TPM_MEDIUM] *= 1000;
+		chip->duration[TPM_LONG] *= 1000;
+		chip->duration_adjusted = true;
+		dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
+	}
+
+	chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_get_timeouts);
+
+#define TPM_ORD_CONTINUE_SELFTEST 83
+#define CONTINUE_SELFTEST_RESULT_SIZE 10
+
+static const struct tpm_input_header continue_selftest_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(10),
+	.ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
+};
+
+/**
+ * tpm_continue_selftest -- run TPM's selftest
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+static int tpm_continue_selftest(struct tpm_chip *chip)
+{
+	int rc;
+	struct tpm_cmd_t cmd;
+
+	cmd.header.in = continue_selftest_header;
+	rc = tpm_transmit_cmd(chip, NULL, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+			      0, 0, "continue selftest");
+	return rc;
+}
+
+#define TPM_ORDINAL_PCRREAD 21
+#define READ_PCR_RESULT_SIZE 30
+#define READ_PCR_RESULT_BODY_SIZE 20
+static const struct tpm_input_header pcrread_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(14),
+	.ordinal = cpu_to_be32(TPM_ORDINAL_PCRREAD)
+};
+
+int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+	int rc;
+	struct tpm_cmd_t cmd;
+
+	cmd.header.in = pcrread_header;
+	cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
+	rc = tpm_transmit_cmd(chip, NULL, &cmd, READ_PCR_RESULT_SIZE,
+			      READ_PCR_RESULT_BODY_SIZE, 0,
+			      "attempting to read a pcr value");
+
+	if (rc == 0)
+		memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
+		       TPM_DIGEST_SIZE);
+	return rc;
+}
+
+/**
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
+ */
+int tpm_is_tpm2(struct tpm_chip *chip)
+{
+	int rc;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip)
+		return -ENODEV;
+
+	rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
+
+	tpm_put_ops(chip);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_is_tpm2);
+
+/**
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx:	the PCR to be retrieved
+ * @res_buf:	the value of the PCR
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+	int rc;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip)
+		return -ENODEV;
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
+	else
+		rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
+	tpm_put_ops(chip);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_read);
+
+#define TPM_ORD_PCR_EXTEND 20
+#define EXTEND_PCR_RESULT_SIZE 34
+#define EXTEND_PCR_RESULT_BODY_SIZE 20
+static const struct tpm_input_header pcrextend_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(34),
+	.ordinal = cpu_to_be32(TPM_ORD_PCR_EXTEND)
+};
+
+static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
+			   char *log_msg)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, pcr_idx);
+	tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE);
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, EXTEND_PCR_RESULT_SIZE,
+			      EXTEND_PCR_RESULT_BODY_SIZE, 0, log_msg);
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+
+/**
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx:	the PCR to be retrieved
+ * @hash:	the hash value used to extend the PCR value
+ *
+ * Note: with TPM 2.0 extends also those banks with a known digest size to the
+ * cryto subsystem in order to prevent malicious use of those PCR banks. In the
+ * future we should dynamically determine digest sizes.
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
+{
+	int rc;
+	struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
+	u32 count = 0;
+	int i;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		memset(digest_list, 0, sizeof(digest_list));
+
+		for (i = 0; i < ARRAY_SIZE(chip->active_banks) &&
+			    chip->active_banks[i] != TPM2_ALG_ERROR; i++) {
+			digest_list[i].alg_id = chip->active_banks[i];
+			memcpy(digest_list[i].digest, hash, TPM_DIGEST_SIZE);
+			count++;
+		}
+
+		rc = tpm2_pcr_extend(chip, pcr_idx, count, digest_list);
+		tpm_put_ops(chip);
+		return rc;
+	}
+
+	rc = tpm1_pcr_extend(chip, pcr_idx, hash,
+			     "attempting extend a PCR value");
+	tpm_put_ops(chip);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+
+/**
+ * tpm_do_selftest - have the TPM continue its selftest and wait until it
+ *                   can receive further commands
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+int tpm_do_selftest(struct tpm_chip *chip)
+{
+	int rc;
+	unsigned int loops;
+	unsigned int delay_msec = 100;
+	unsigned long duration;
+	u8 dummy[TPM_DIGEST_SIZE];
+
+	duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
+
+	loops = jiffies_to_msecs(duration) / delay_msec;
+
+	rc = tpm_continue_selftest(chip);
+	if (rc == TPM_ERR_INVALID_POSTINIT) {
+		chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+		dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
+	}
+	/* This may fail if there was no TPM driver during a suspend/resume
+	 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+	 */
+	if (rc)
+		return rc;
+
+	do {
+		/* Attempt to read a PCR value */
+		rc = tpm_pcr_read_dev(chip, 0, dummy);
+
+		/* Some buggy TPMs will not respond to tpm_tis_ready() for
+		 * around 300ms while the self test is ongoing, keep trying
+		 * until the self test duration expires. */
+		if (rc == -ETIME) {
+			dev_info(
+			    &chip->dev, HW_ERR
+			    "TPM command timed out during continue self test");
+			tpm_msleep(delay_msec);
+			continue;
+		}
+
+		if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+			dev_info(&chip->dev,
+				 "TPM is disabled/deactivated (0x%X)\n", rc);
+			/* TPM is disabled and/or deactivated; driver can
+			 * proceed and TPM does handle commands for
+			 * suspend/resume correctly
+			 */
+			return 0;
+		}
+		if (rc != TPM_WARN_DOING_SELFTEST)
+			return rc;
+		tpm_msleep(delay_msec);
+	} while (--loops > 0);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_do_selftest);
+
+/**
+ * tpm1_auto_startup - Perform the standard automatic TPM initialization
+ *                     sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm1_auto_startup(struct tpm_chip *chip)
+{
+	int rc;
+
+	rc = tpm_get_timeouts(chip);
+	if (rc)
+		goto out;
+	rc = tpm_do_selftest(chip);
+	if (rc) {
+		dev_err(&chip->dev, "TPM self test failed\n");
+		goto out;
+	}
+
+	return rc;
+out:
+	if (rc > 0)
+		rc = -ENODEV;
+	return rc;
+}
+
+/**
+ * tpm_send - send a TPM command
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd:	a TPM command buffer
+ * @buflen:	the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
+	int rc;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip)
+		return -ENODEV;
+
+	rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
+			      "attempting to a send a command");
+	tpm_put_ops(chip);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_send);
+
+#define TPM_ORD_SAVESTATE 152
+#define SAVESTATE_RESULT_SIZE 10
+
+static const struct tpm_input_header savestate_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(10),
+	.ordinal = cpu_to_be32(TPM_ORD_SAVESTATE)
+};
+
+/*
+ * We are about to suspend. Save the TPM state
+ * so that it can be restored.
+ */
+int tpm_pm_suspend(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct tpm_cmd_t cmd;
+	int rc, try;
+
+	u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
+
+	if (chip == NULL)
+		return -ENODEV;
+
+	if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
+		return 0;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		tpm2_shutdown(chip, TPM2_SU_STATE);
+		return 0;
+	}
+
+	/* for buggy tpm, flush pcrs with extend to selected dummy */
+	if (tpm_suspend_pcr)
+		rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash,
+				     "extending dummy pcr before suspend");
+
+	/* now do the actual savestate */
+	for (try = 0; try < TPM_RETRY; try++) {
+		cmd.header.in = savestate_header;
+		rc = tpm_transmit_cmd(chip, NULL, &cmd, SAVESTATE_RESULT_SIZE,
+				      0, 0, NULL);
+
+		/*
+		 * If the TPM indicates that it is too busy to respond to
+		 * this command then retry before giving up.  It can take
+		 * several seconds for this TPM to be ready.
+		 *
+		 * This can happen if the TPM has already been sent the
+		 * SaveState command before the driver has loaded.  TCG 1.2
+		 * specification states that any communication after SaveState
+		 * may cause the TPM to invalidate previously saved state.
+		 */
+		if (rc != TPM_WARN_RETRY)
+			break;
+		tpm_msleep(TPM_TIMEOUT_RETRY);
+	}
+
+	if (rc)
+		dev_err(&chip->dev,
+			"Error (%d) sending savestate before suspend\n", rc);
+	else if (try > 0)
+		dev_warn(&chip->dev, "TPM savestate took %dms\n",
+			 try * TPM_TIMEOUT_RETRY);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_suspend);
+
+/*
+ * Resume from a power safe. The BIOS already restored
+ * the TPM state.
+ */
+int tpm_pm_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+
+	if (chip == NULL)
+		return -ENODEV;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_resume);
+
+#define TPM_GETRANDOM_RESULT_SIZE	18
+static const struct tpm_input_header tpm_getrandom_header = {
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+	.length = cpu_to_be32(14),
+	.ordinal = cpu_to_be32(TPM_ORD_GET_RANDOM)
+};
+
+/**
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @out:	destination buffer for the random bytes
+ * @max:	the max number of bytes to write to @out
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+{
+	struct tpm_cmd_t tpm_cmd;
+	u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
+	int err, total = 0, retries = 5;
+	u8 *dest = out;
+
+	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+		return -EINVAL;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		err = tpm2_get_random(chip, out, max);
+		tpm_put_ops(chip);
+		return err;
+	}
+
+	do {
+		tpm_cmd.header.in = tpm_getrandom_header;
+		tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+
+		err = tpm_transmit_cmd(chip, NULL, &tpm_cmd,
+				       TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+				       offsetof(struct tpm_getrandom_out,
+						rng_data),
+				       0, "attempting get random");
+		if (err)
+			break;
+
+		recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+		if (recd > num_bytes) {
+			total = -EFAULT;
+			break;
+		}
+
+		rlength = be32_to_cpu(tpm_cmd.header.out.length);
+		if (rlength < TPM_HEADER_SIZE +
+			      offsetof(struct tpm_getrandom_out, rng_data) +
+			      recd) {
+			total = -EFAULT;
+			break;
+		}
+		memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
+
+		dest += recd;
+		total += recd;
+		num_bytes -= recd;
+	} while (retries-- && total < max);
+
+	tpm_put_ops(chip);
+	return total ? total : -EIO;
+}
+EXPORT_SYMBOL_GPL(tpm_get_random);
+
+/**
+ * tpm_seal_trusted() - seal a trusted key payload
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @options:	authentication values and other options
+ * @payload:	the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
+		     struct trusted_key_options *options)
+{
+	int rc;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+		return -ENODEV;
+
+	rc = tpm2_seal_trusted(chip, payload, options);
+
+	tpm_put_ops(chip);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_seal_trusted);
+
+/**
+ * tpm_unseal_trusted() - unseal a trusted key
+ * @chip:	a &struct tpm_chip instance, %NULL for the default chip
+ * @options:	authentication values and other options
+ * @payload:	the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_unseal_trusted(struct tpm_chip *chip,
+		       struct trusted_key_payload *payload,
+		       struct trusted_key_options *options)
+{
+	int rc;
+
+	chip = tpm_find_get_ops(chip);
+	if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+		return -ENODEV;
+
+	rc = tpm2_unseal_trusted(chip, payload, options);
+
+	tpm_put_ops(chip);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
+
+static int __init tpm_init(void)
+{
+	int rc;
+
+	tpm_class = class_create(THIS_MODULE, "tpm");
+	if (IS_ERR(tpm_class)) {
+		pr_err("couldn't create tpm class\n");
+		return PTR_ERR(tpm_class);
+	}
+
+	tpmrm_class = class_create(THIS_MODULE, "tpmrm");
+	if (IS_ERR(tpmrm_class)) {
+		pr_err("couldn't create tpmrm class\n");
+		class_destroy(tpm_class);
+		return PTR_ERR(tpmrm_class);
+	}
+
+	rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
+	if (rc < 0) {
+		pr_err("tpm: failed to allocate char dev region\n");
+		class_destroy(tpmrm_class);
+		class_destroy(tpm_class);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit tpm_exit(void)
+{
+	idr_destroy(&dev_nums_idr);
+	class_destroy(tpm_class);
+	class_destroy(tpmrm_class);
+	unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+}
+
+subsys_initcall(tpm_init);
+module_exit(tpm_exit);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
new file mode 100644
index 0000000..83a77a4
--- /dev/null
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * sysfs filesystem inspection interface to the TPM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/device.h>
+#include "tpm.h"
+
+struct tpm_readpubek_out {
+	u8 algorithm[4];
+	u8 encscheme[2];
+	u8 sigscheme[2];
+	__be32 paramsize;
+	u8 parameters[12];
+	__be32 keysize;
+	u8 modulus[256];
+	u8 checksum[20];
+} __packed;
+
+#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
+#define TPM_ORD_READPUBEK 124
+
+static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct tpm_buf tpm_buf;
+	struct tpm_readpubek_out *out;
+	ssize_t rc;
+	int i;
+	char *str = buf;
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	char anti_replay[20];
+
+	memset(&anti_replay, 0, sizeof(anti_replay));
+
+	rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
+	if (rc)
+		return rc;
+
+	tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+	rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+			      READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+			      "attempting to read the PUBEK");
+	if (rc) {
+		tpm_buf_destroy(&tpm_buf);
+		return 0;
+	}
+
+	out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
+	str +=
+	    sprintf(str,
+		    "Algorithm: %02X %02X %02X %02X\n"
+		    "Encscheme: %02X %02X\n"
+		    "Sigscheme: %02X %02X\n"
+		    "Parameters: %02X %02X %02X %02X "
+		    "%02X %02X %02X %02X "
+		    "%02X %02X %02X %02X\n"
+		    "Modulus length: %d\n"
+		    "Modulus:\n",
+		    out->algorithm[0], out->algorithm[1], out->algorithm[2],
+		    out->algorithm[3],
+		    out->encscheme[0], out->encscheme[1],
+		    out->sigscheme[0], out->sigscheme[1],
+		    out->parameters[0], out->parameters[1],
+		    out->parameters[2], out->parameters[3],
+		    out->parameters[4], out->parameters[5],
+		    out->parameters[6], out->parameters[7],
+		    out->parameters[8], out->parameters[9],
+		    out->parameters[10], out->parameters[11],
+		    be32_to_cpu(out->keysize));
+
+	for (i = 0; i < 256; i++) {
+		str += sprintf(str, "%02X ", out->modulus[i]);
+		if ((i + 1) % 16 == 0)
+			str += sprintf(str, "\n");
+	}
+
+	rc = str - buf;
+	tpm_buf_destroy(&tpm_buf);
+	return rc;
+}
+static DEVICE_ATTR_RO(pubek);
+
+static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	cap_t cap;
+	u8 digest[TPM_DIGEST_SIZE];
+	ssize_t rc;
+	int i, j, num_pcrs;
+	char *str = buf;
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+			"attempting to determine the number of PCRS",
+			sizeof(cap.num_pcrs));
+	if (rc)
+		return 0;
+
+	num_pcrs = be32_to_cpu(cap.num_pcrs);
+	for (i = 0; i < num_pcrs; i++) {
+		rc = tpm_pcr_read_dev(chip, i, digest);
+		if (rc)
+			break;
+		str += sprintf(str, "PCR-%02d: ", i);
+		for (j = 0; j < TPM_DIGEST_SIZE; j++)
+			str += sprintf(str, "%02X ", digest[j]);
+		str += sprintf(str, "\n");
+	}
+	return str - buf;
+}
+static DEVICE_ATTR_RO(pcrs);
+
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	cap_t cap;
+	ssize_t rc;
+
+	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+			"attempting to determine the permanent enabled state",
+			sizeof(cap.perm_flags));
+	if (rc)
+		return 0;
+
+	rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+	return rc;
+}
+static DEVICE_ATTR_RO(enabled);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	cap_t cap;
+	ssize_t rc;
+
+	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+			"attempting to determine the permanent active state",
+			sizeof(cap.perm_flags));
+	if (rc)
+		return 0;
+
+	rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+	return rc;
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	cap_t cap;
+	ssize_t rc;
+
+	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+			"attempting to determine the owner state",
+			sizeof(cap.owned));
+	if (rc)
+		return 0;
+
+	rc = sprintf(buf, "%d\n", cap.owned);
+	return rc;
+}
+static DEVICE_ATTR_RO(owned);
+
+static ssize_t temp_deactivated_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	cap_t cap;
+	ssize_t rc;
+
+	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+			"attempting to determine the temporary state",
+			sizeof(cap.stclear_flags));
+	if (rc)
+		return 0;
+
+	rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+	return rc;
+}
+static DEVICE_ATTR_RO(temp_deactivated);
+
+static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	cap_t cap;
+	ssize_t rc;
+	char *str = buf;
+
+	rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+			"attempting to determine the manufacturer",
+			sizeof(cap.manufacturer_id));
+	if (rc)
+		return 0;
+	str += sprintf(str, "Manufacturer: 0x%x\n",
+		       be32_to_cpu(cap.manufacturer_id));
+
+	/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
+	rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+			"attempting to determine the 1.2 version",
+			sizeof(cap.tpm_version_1_2));
+	if (!rc) {
+		str += sprintf(str,
+			       "TCG version: %d.%d\nFirmware version: %d.%d\n",
+			       cap.tpm_version_1_2.Major,
+			       cap.tpm_version_1_2.Minor,
+			       cap.tpm_version_1_2.revMajor,
+			       cap.tpm_version_1_2.revMinor);
+	} else {
+		/* Otherwise just use TPM_STRUCT_VER */
+		rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+				"attempting to determine the 1.1 version",
+				sizeof(cap.tpm_version));
+		if (rc)
+			return 0;
+		str += sprintf(str,
+			       "TCG version: %d.%d\nFirmware version: %d.%d\n",
+			       cap.tpm_version.Major,
+			       cap.tpm_version.Minor,
+			       cap.tpm_version.revMajor,
+			       cap.tpm_version.revMinor);
+	}
+
+	return str - buf;
+}
+static DEVICE_ATTR_RO(caps);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	if (chip == NULL)
+		return 0;
+
+	chip->ops->cancel(chip);
+	return count;
+}
+static DEVICE_ATTR_WO(cancel);
+
+static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	if (chip->duration[TPM_LONG] == 0)
+		return 0;
+
+	return sprintf(buf, "%d %d %d [%s]\n",
+		       jiffies_to_usecs(chip->duration[TPM_SHORT]),
+		       jiffies_to_usecs(chip->duration[TPM_MEDIUM]),
+		       jiffies_to_usecs(chip->duration[TPM_LONG]),
+		       chip->duration_adjusted
+		       ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(durations);
+
+static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	return sprintf(buf, "%d %d %d %d [%s]\n",
+		       jiffies_to_usecs(chip->timeout_a),
+		       jiffies_to_usecs(chip->timeout_b),
+		       jiffies_to_usecs(chip->timeout_c),
+		       jiffies_to_usecs(chip->timeout_d),
+		       chip->timeout_adjusted
+		       ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(timeouts);
+
+static struct attribute *tpm_dev_attrs[] = {
+	&dev_attr_pubek.attr,
+	&dev_attr_pcrs.attr,
+	&dev_attr_enabled.attr,
+	&dev_attr_active.attr,
+	&dev_attr_owned.attr,
+	&dev_attr_temp_deactivated.attr,
+	&dev_attr_caps.attr,
+	&dev_attr_cancel.attr,
+	&dev_attr_durations.attr,
+	&dev_attr_timeouts.attr,
+	NULL,
+};
+
+static const struct attribute_group tpm_dev_group = {
+	.attrs = tpm_dev_attrs,
+};
+
+void tpm_sysfs_add_device(struct tpm_chip *chip)
+{
+	/* XXX: If you wish to remove this restriction, you must first update
+	 * tpm_sysfs to explicitly lock chip->ops.
+	 */
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return;
+
+	/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
+	 * is called before ops is null'd and the sysfs core synchronizes this
+	 * removal so that no callbacks are running or can run again
+	 */
+	WARN_ON(chip->groups_cnt != 0);
+	chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
new file mode 100644
index 0000000..f3501d0
--- /dev/null
+++ b/drivers/char/tpm/tpm.h
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#ifndef __TPM_H__
+#define __TPM_H__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/hw_random.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/tpm.h>
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/highmem.h>
+#include <linux/tpm_eventlog.h>
+#include <crypto/hash_info.h>
+
+#ifdef CONFIG_X86
+#include <asm/intel-family.h>
+#endif
+
+enum tpm_const {
+	TPM_MINOR = 224,	/* officially assigned */
+	TPM_BUFSIZE = 4096,
+	TPM_NUM_DEVICES = 65536,
+	TPM_RETRY = 50,		/* 5 seconds */
+	TPM_NUM_EVENT_LOG_FILES = 3,
+};
+
+enum tpm_timeout {
+	TPM_TIMEOUT = 5,	/* msecs */
+	TPM_TIMEOUT_RETRY = 100, /* msecs */
+	TPM_TIMEOUT_RANGE_US = 300,	/* usecs */
+	TPM_TIMEOUT_POLL = 1,	/* msecs */
+	TPM_TIMEOUT_USECS_MIN = 100,      /* usecs */
+	TPM_TIMEOUT_USECS_MAX = 500      /* usecs */
+};
+
+/* TPM addresses */
+enum tpm_addr {
+	TPM_SUPERIO_ADDR = 0x2E,
+	TPM_ADDR = 0x4E,
+};
+
+/* Indexes the duration array */
+enum tpm_duration {
+	TPM_SHORT = 0,
+	TPM_MEDIUM = 1,
+	TPM_LONG = 2,
+	TPM_LONG_LONG = 3,
+	TPM_UNDEFINED,
+	TPM_NUM_DURATIONS = TPM_UNDEFINED,
+};
+
+#define TPM_WARN_RETRY          0x800
+#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED     0x6
+#define TPM_ERR_DISABLED        0x7
+#define TPM_ERR_INVALID_POSTINIT 38
+
+#define TPM_HEADER_SIZE		10
+
+enum tpm2_const {
+	TPM2_PLATFORM_PCR       =     24,
+	TPM2_PCR_SELECT_MIN     = ((TPM2_PLATFORM_PCR + 7) / 8),
+};
+
+enum tpm2_timeouts {
+	TPM2_TIMEOUT_A          =    750,
+	TPM2_TIMEOUT_B          =   2000,
+	TPM2_TIMEOUT_C          =    200,
+	TPM2_TIMEOUT_D          =     30,
+	TPM2_DURATION_SHORT     =     20,
+	TPM2_DURATION_MEDIUM    =    750,
+	TPM2_DURATION_LONG      =   2000,
+	TPM2_DURATION_LONG_LONG = 300000,
+	TPM2_DURATION_DEFAULT   = 120000,
+};
+
+enum tpm2_structures {
+	TPM2_ST_NO_SESSIONS	= 0x8001,
+	TPM2_ST_SESSIONS	= 0x8002,
+};
+
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT	 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
+enum tpm2_return_codes {
+	TPM2_RC_SUCCESS		= 0x0000,
+	TPM2_RC_HASH		= 0x0083, /* RC_FMT1 */
+	TPM2_RC_HANDLE		= 0x008B,
+	TPM2_RC_INITIALIZE	= 0x0100, /* RC_VER1 */
+	TPM2_RC_FAILURE		= 0x0101,
+	TPM2_RC_DISABLED	= 0x0120,
+	TPM2_RC_COMMAND_CODE    = 0x0143,
+	TPM2_RC_TESTING		= 0x090A, /* RC_WARN */
+	TPM2_RC_REFERENCE_H0	= 0x0910,
+	TPM2_RC_RETRY		= 0x0922,
+};
+
+enum tpm2_algorithms {
+	TPM2_ALG_ERROR		= 0x0000,
+	TPM2_ALG_SHA1		= 0x0004,
+	TPM2_ALG_KEYEDHASH	= 0x0008,
+	TPM2_ALG_SHA256		= 0x000B,
+	TPM2_ALG_SHA384		= 0x000C,
+	TPM2_ALG_SHA512		= 0x000D,
+	TPM2_ALG_NULL		= 0x0010,
+	TPM2_ALG_SM3_256	= 0x0012,
+};
+
+enum tpm2_command_codes {
+	TPM2_CC_FIRST		= 0x011F,
+	TPM2_CC_CREATE_PRIMARY  = 0x0131,
+	TPM2_CC_SELF_TEST	= 0x0143,
+	TPM2_CC_STARTUP		= 0x0144,
+	TPM2_CC_SHUTDOWN	= 0x0145,
+	TPM2_CC_CREATE		= 0x0153,
+	TPM2_CC_LOAD		= 0x0157,
+	TPM2_CC_UNSEAL		= 0x015E,
+	TPM2_CC_CONTEXT_LOAD	= 0x0161,
+	TPM2_CC_CONTEXT_SAVE	= 0x0162,
+	TPM2_CC_FLUSH_CONTEXT	= 0x0165,
+	TPM2_CC_GET_CAPABILITY	= 0x017A,
+	TPM2_CC_GET_RANDOM	= 0x017B,
+	TPM2_CC_PCR_READ	= 0x017E,
+	TPM2_CC_PCR_EXTEND	= 0x0182,
+	TPM2_CC_LAST		= 0x018F,
+};
+
+enum tpm2_permanent_handles {
+	TPM2_RS_PW		= 0x40000009,
+};
+
+enum tpm2_capabilities {
+	TPM2_CAP_HANDLES	= 1,
+	TPM2_CAP_COMMANDS	= 2,
+	TPM2_CAP_PCRS		= 5,
+	TPM2_CAP_TPM_PROPERTIES = 6,
+};
+
+enum tpm2_properties {
+	TPM_PT_TOTAL_COMMANDS	= 0x0129,
+};
+
+enum tpm2_startup_types {
+	TPM2_SU_CLEAR	= 0x0000,
+	TPM2_SU_STATE	= 0x0001,
+};
+
+enum tpm2_cc_attrs {
+	TPM2_CC_ATTR_CHANDLES	= 25,
+	TPM2_CC_ATTR_RHANDLE	= 28,
+};
+
+#define TPM_VID_INTEL    0x8086
+#define TPM_VID_WINBOND  0x1050
+#define TPM_VID_STM      0x104A
+
+#define TPM_PPI_VERSION_LEN		3
+
+struct tpm_space {
+	u32 context_tbl[3];
+	u8 *context_buf;
+	u32 session_tbl[3];
+	u8 *session_buf;
+};
+
+enum tpm_chip_flags {
+	TPM_CHIP_FLAG_TPM2		= BIT(1),
+	TPM_CHIP_FLAG_IRQ		= BIT(2),
+	TPM_CHIP_FLAG_VIRTUAL		= BIT(3),
+	TPM_CHIP_FLAG_HAVE_TIMEOUTS	= BIT(4),
+	TPM_CHIP_FLAG_ALWAYS_POWERED	= BIT(5),
+};
+
+struct tpm_bios_log {
+	void *bios_event_log;
+	void *bios_event_log_end;
+};
+
+struct tpm_chip_seqops {
+	struct tpm_chip *chip;
+	const struct seq_operations *seqops;
+};
+
+struct tpm_chip {
+	struct device dev;
+	struct device devs;
+	struct cdev cdev;
+	struct cdev cdevs;
+
+	/* A driver callback under ops cannot be run unless ops_sem is held
+	 * (sometimes implicitly, eg for the sysfs code). ops becomes null
+	 * when the driver is unregistered, see tpm_try_get_ops.
+	 */
+	struct rw_semaphore ops_sem;
+	const struct tpm_class_ops *ops;
+
+	struct tpm_bios_log log;
+	struct tpm_chip_seqops bin_log_seqops;
+	struct tpm_chip_seqops ascii_log_seqops;
+
+	unsigned int flags;
+
+	int dev_num;		/* /dev/tpm# */
+	unsigned long is_open;	/* only one allowed */
+
+	char hwrng_name[64];
+	struct hwrng hwrng;
+
+	struct mutex tpm_mutex;	/* tpm is processing */
+
+	unsigned long timeout_a; /* jiffies */
+	unsigned long timeout_b; /* jiffies */
+	unsigned long timeout_c; /* jiffies */
+	unsigned long timeout_d; /* jiffies */
+	bool timeout_adjusted;
+	unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */
+	bool duration_adjusted;
+
+	struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
+
+	const struct attribute_group *groups[3];
+	unsigned int groups_cnt;
+
+	u16 active_banks[7];
+#ifdef CONFIG_ACPI
+	acpi_handle acpi_dev_handle;
+	char ppi_version[TPM_PPI_VERSION_LEN + 1];
+#endif /* CONFIG_ACPI */
+
+	struct tpm_space work_space;
+	u32 nr_commands;
+	u32 *cc_attrs_tbl;
+
+	/* active locality */
+	int locality;
+};
+
+#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+
+struct tpm_input_header {
+	__be16	tag;
+	__be32	length;
+	__be32	ordinal;
+} __packed;
+
+struct tpm_output_header {
+	__be16	tag;
+	__be32	length;
+	__be32	return_code;
+} __packed;
+
+#define TPM_TAG_RQU_COMMAND 193
+
+struct	stclear_flags_t {
+	__be16	tag;
+	u8	deactivated;
+	u8	disableForceClear;
+	u8	physicalPresence;
+	u8	physicalPresenceLock;
+	u8	bGlobalLock;
+} __packed;
+
+struct	tpm_version_t {
+	u8	Major;
+	u8	Minor;
+	u8	revMajor;
+	u8	revMinor;
+} __packed;
+
+struct	tpm_version_1_2_t {
+	__be16	tag;
+	u8	Major;
+	u8	Minor;
+	u8	revMajor;
+	u8	revMinor;
+} __packed;
+
+struct	timeout_t {
+	__be32	a;
+	__be32	b;
+	__be32	c;
+	__be32	d;
+} __packed;
+
+struct duration_t {
+	__be32	tpm_short;
+	__be32	tpm_medium;
+	__be32	tpm_long;
+} __packed;
+
+struct permanent_flags_t {
+	__be16	tag;
+	u8	disable;
+	u8	ownership;
+	u8	deactivated;
+	u8	readPubek;
+	u8	disableOwnerClear;
+	u8	allowMaintenance;
+	u8	physicalPresenceLifetimeLock;
+	u8	physicalPresenceHWEnable;
+	u8	physicalPresenceCMDEnable;
+	u8	CEKPUsed;
+	u8	TPMpost;
+	u8	TPMpostLock;
+	u8	FIPS;
+	u8	operator;
+	u8	enableRevokeEK;
+	u8	nvLocked;
+	u8	readSRKPub;
+	u8	tpmEstablished;
+	u8	maintenanceDone;
+	u8	disableFullDALogicInfo;
+} __packed;
+
+typedef union {
+	struct	permanent_flags_t perm_flags;
+	struct	stclear_flags_t	stclear_flags;
+	__u8	owned;
+	__be32	num_pcrs;
+	struct	tpm_version_t	tpm_version;
+	struct	tpm_version_1_2_t tpm_version_1_2;
+	__be32	manufacturer_id;
+	struct timeout_t  timeout;
+	struct duration_t duration;
+} cap_t;
+
+enum tpm_capabilities {
+	TPM_CAP_FLAG = 4,
+	TPM_CAP_PROP = 5,
+	TPM_CAP_VERSION_1_1 = 0x06,
+	TPM_CAP_VERSION_1_2 = 0x1A,
+};
+
+enum tpm_sub_capabilities {
+	TPM_CAP_PROP_PCR = 0x101,
+	TPM_CAP_PROP_MANUFACTURER = 0x103,
+	TPM_CAP_FLAG_PERM = 0x108,
+	TPM_CAP_FLAG_VOL = 0x109,
+	TPM_CAP_PROP_OWNER = 0x111,
+	TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+	TPM_CAP_PROP_TIS_DURATION = 0x120,
+};
+
+typedef union {
+	struct	tpm_input_header in;
+	struct	tpm_output_header out;
+} tpm_cmd_header;
+
+struct tpm_pcrread_out {
+	u8	pcr_result[TPM_DIGEST_SIZE];
+} __packed;
+
+struct tpm_pcrread_in {
+	__be32	pcr_idx;
+} __packed;
+
+/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+ * bytes, but 128 is still a relatively large number of random bytes and
+ * anything much bigger causes users of struct tpm_cmd_t to start getting
+ * compiler warnings about stack frame size. */
+#define TPM_MAX_RNG_DATA	128
+
+struct tpm_getrandom_out {
+	__be32 rng_data_len;
+	u8     rng_data[TPM_MAX_RNG_DATA];
+} __packed;
+
+struct tpm_getrandom_in {
+	__be32 num_bytes;
+} __packed;
+
+typedef union {
+	struct	tpm_pcrread_in	pcrread_in;
+	struct	tpm_pcrread_out	pcrread_out;
+	struct	tpm_getrandom_in getrandom_in;
+	struct	tpm_getrandom_out getrandom_out;
+} tpm_cmd_params;
+
+struct tpm_cmd_t {
+	tpm_cmd_header	header;
+	tpm_cmd_params	params;
+} __packed;
+
+
+/* A string buffer type for constructing TPM commands. This is based on the
+ * ideas of string buffer code in security/keys/trusted.h but is heap based
+ * in order to keep the stack usage minimal.
+ */
+
+enum tpm_buf_flags {
+	TPM_BUF_OVERFLOW	= BIT(0),
+};
+
+struct tpm_buf {
+	struct page *data_page;
+	unsigned int flags;
+	u8 *data;
+};
+
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+	struct tpm_input_header *head;
+	head = (struct tpm_input_header *)buf->data;
+	head->tag = cpu_to_be16(tag);
+	head->length = cpu_to_be32(sizeof(*head));
+	head->ordinal = cpu_to_be32(ordinal);
+}
+
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+	buf->data_page = alloc_page(GFP_HIGHUSER);
+	if (!buf->data_page)
+		return -ENOMEM;
+
+	buf->flags = 0;
+	buf->data = kmap(buf->data_page);
+	tpm_buf_reset(buf, tag, ordinal);
+	return 0;
+}
+
+static inline void tpm_buf_destroy(struct tpm_buf *buf)
+{
+	kunmap(buf->data_page);
+	__free_page(buf->data_page);
+}
+
+static inline u32 tpm_buf_length(struct tpm_buf *buf)
+{
+	struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+
+	return be32_to_cpu(head->length);
+}
+
+static inline u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+	struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+
+	return be16_to_cpu(head->tag);
+}
+
+static inline void tpm_buf_append(struct tpm_buf *buf,
+				  const unsigned char *new_data,
+				  unsigned int new_len)
+{
+	struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+	u32 len = tpm_buf_length(buf);
+
+	/* Return silently if overflow has already happened. */
+	if (buf->flags & TPM_BUF_OVERFLOW)
+		return;
+
+	if ((len + new_len) > PAGE_SIZE) {
+		WARN(1, "tpm_buf: overflow\n");
+		buf->flags |= TPM_BUF_OVERFLOW;
+		return;
+	}
+
+	memcpy(&buf->data[len], new_data, new_len);
+	head->length = cpu_to_be32(len + new_len);
+}
+
+static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+	tpm_buf_append(buf, &value, 1);
+}
+
+static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+	__be16 value2 = cpu_to_be16(value);
+
+	tpm_buf_append(buf, (u8 *) &value2, 2);
+}
+
+static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+	__be32 value2 = cpu_to_be32(value);
+
+	tpm_buf_append(buf, (u8 *) &value2, 4);
+}
+
+extern struct class *tpm_class;
+extern struct class *tpmrm_class;
+extern dev_t tpm_devt;
+extern const struct file_operations tpm_fops;
+extern const struct file_operations tpmrm_fops;
+extern struct idr dev_nums_idr;
+
+/**
+ * enum tpm_transmit_flags - flags for tpm_transmit()
+ *
+ * @TPM_TRANSMIT_UNLOCKED:	do not lock the chip
+ * @TPM_TRANSMIT_NESTED:	discard setup steps (power management,
+ *				locality) including locking (i.e. implicit
+ *				UNLOCKED)
+ */
+enum tpm_transmit_flags {
+	TPM_TRANSMIT_UNLOCKED	= BIT(0),
+	TPM_TRANSMIT_NESTED      = BIT(1),
+};
+
+ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+		     u8 *buf, size_t bufsiz, unsigned int flags);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
+			 void *buf, size_t bufsiz,
+			 size_t min_rsp_body_length, unsigned int flags,
+			 const char *desc);
+int tpm_startup(struct tpm_chip *chip);
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+		   const char *desc, size_t min_cap_length);
+int tpm_get_timeouts(struct tpm_chip *);
+int tpm1_auto_startup(struct tpm_chip *chip);
+int tpm_do_selftest(struct tpm_chip *chip);
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm_pm_suspend(struct device *dev);
+int tpm_pm_resume(struct device *dev);
+
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+	usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+		     delay_msec * 1000);
+};
+
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+void tpm_put_ops(struct tpm_chip *chip);
+
+struct tpm_chip *tpm_chip_alloc(struct device *dev,
+				const struct tpm_class_ops *ops);
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+				 const struct tpm_class_ops *ops);
+int tpm_chip_register(struct tpm_chip *chip);
+void tpm_chip_unregister(struct tpm_chip *chip);
+
+void tpm_sysfs_add_device(struct tpm_chip *chip);
+
+int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+
+#ifdef CONFIG_ACPI
+extern void tpm_add_ppi(struct tpm_chip *chip);
+#else
+static inline void tpm_add_ppi(struct tpm_chip *chip)
+{
+}
+#endif
+
+static inline u32 tpm2_rc_value(u32 rc)
+{
+	return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
+int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
+		    struct tpm2_digest *digests);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
+void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
+			    unsigned int flags);
+int tpm2_seal_trusted(struct tpm_chip *chip,
+		      struct trusted_key_payload *payload,
+		      struct trusted_key_options *options);
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+			struct trusted_key_payload *payload,
+			struct trusted_key_options *options);
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
+			u32 *value, const char *desc);
+
+int tpm2_auto_startup(struct tpm_chip *chip);
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm2_probe(struct tpm_chip *chip);
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
+int tpm2_init_space(struct tpm_space *space);
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
+		       u8 *cmd);
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
+		      u32 cc, u8 *buf, size_t *bufsiz);
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
new file mode 100644
index 0000000..3acf4fd
--- /dev/null
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include "tpm.h"
+#include <crypto/hash_info.h>
+#include <keys/trusted-type.h>
+
+enum tpm2_object_attributes {
+	TPM2_OA_USER_WITH_AUTH		= BIT(6),
+};
+
+enum tpm2_session_attributes {
+	TPM2_SA_CONTINUE_SESSION	= BIT(0),
+};
+
+struct tpm2_hash {
+	unsigned int crypto_id;
+	unsigned int tpm_id;
+};
+
+static struct tpm2_hash tpm2_hash_map[] = {
+	{HASH_ALGO_SHA1, TPM2_ALG_SHA1},
+	{HASH_ALGO_SHA256, TPM2_ALG_SHA256},
+	{HASH_ALGO_SHA384, TPM2_ALG_SHA384},
+	{HASH_ALGO_SHA512, TPM2_ALG_SHA512},
+	{HASH_ALGO_SM3_256, TPM2_ALG_SM3_256},
+};
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result. The values
+ * of the SHORT, MEDIUM, and LONG durations are taken from the
+ * PC Client Profile (PTP) specification.
+ * LONG_LONG is for commands that generates keys which empirically
+ * takes longer time on some systems.
+ */
+static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = {
+	TPM_UNDEFINED,		/* 11F */
+	TPM_UNDEFINED,		/* 120 */
+	TPM_LONG,		/* 121 */
+	TPM_UNDEFINED,		/* 122 */
+	TPM_UNDEFINED,		/* 123 */
+	TPM_UNDEFINED,		/* 124 */
+	TPM_UNDEFINED,		/* 125 */
+	TPM_UNDEFINED,		/* 126 */
+	TPM_UNDEFINED,		/* 127 */
+	TPM_UNDEFINED,		/* 128 */
+	TPM_LONG,		/* 129 */
+	TPM_UNDEFINED,		/* 12a */
+	TPM_UNDEFINED,		/* 12b */
+	TPM_UNDEFINED,		/* 12c */
+	TPM_UNDEFINED,		/* 12d */
+	TPM_UNDEFINED,		/* 12e */
+	TPM_UNDEFINED,		/* 12f */
+	TPM_UNDEFINED,		/* 130 */
+	TPM_LONG_LONG,		/* 131 */
+	TPM_UNDEFINED,		/* 132 */
+	TPM_UNDEFINED,		/* 133 */
+	TPM_UNDEFINED,		/* 134 */
+	TPM_UNDEFINED,		/* 135 */
+	TPM_UNDEFINED,		/* 136 */
+	TPM_UNDEFINED,		/* 137 */
+	TPM_UNDEFINED,		/* 138 */
+	TPM_UNDEFINED,		/* 139 */
+	TPM_UNDEFINED,		/* 13a */
+	TPM_UNDEFINED,		/* 13b */
+	TPM_UNDEFINED,		/* 13c */
+	TPM_UNDEFINED,		/* 13d */
+	TPM_MEDIUM,		/* 13e */
+	TPM_UNDEFINED,		/* 13f */
+	TPM_UNDEFINED,		/* 140 */
+	TPM_UNDEFINED,		/* 141 */
+	TPM_UNDEFINED,		/* 142 */
+	TPM_LONG,		/* 143 */
+	TPM_MEDIUM,		/* 144 */
+	TPM_UNDEFINED,		/* 145 */
+	TPM_UNDEFINED,		/* 146 */
+	TPM_UNDEFINED,		/* 147 */
+	TPM_UNDEFINED,		/* 148 */
+	TPM_UNDEFINED,		/* 149 */
+	TPM_UNDEFINED,		/* 14a */
+	TPM_UNDEFINED,		/* 14b */
+	TPM_UNDEFINED,		/* 14c */
+	TPM_UNDEFINED,		/* 14d */
+	TPM_LONG,		/* 14e */
+	TPM_UNDEFINED,		/* 14f */
+	TPM_UNDEFINED,		/* 150 */
+	TPM_UNDEFINED,		/* 151 */
+	TPM_UNDEFINED,		/* 152 */
+	TPM_LONG_LONG,		/* 153 */
+	TPM_UNDEFINED,		/* 154 */
+	TPM_UNDEFINED,		/* 155 */
+	TPM_UNDEFINED,		/* 156 */
+	TPM_UNDEFINED,		/* 157 */
+	TPM_UNDEFINED,		/* 158 */
+	TPM_UNDEFINED,		/* 159 */
+	TPM_UNDEFINED,		/* 15a */
+	TPM_UNDEFINED,		/* 15b */
+	TPM_MEDIUM,		/* 15c */
+	TPM_UNDEFINED,		/* 15d */
+	TPM_UNDEFINED,		/* 15e */
+	TPM_UNDEFINED,		/* 15f */
+	TPM_UNDEFINED,		/* 160 */
+	TPM_UNDEFINED,		/* 161 */
+	TPM_UNDEFINED,		/* 162 */
+	TPM_UNDEFINED,		/* 163 */
+	TPM_UNDEFINED,		/* 164 */
+	TPM_UNDEFINED,		/* 165 */
+	TPM_UNDEFINED,		/* 166 */
+	TPM_UNDEFINED,		/* 167 */
+	TPM_UNDEFINED,		/* 168 */
+	TPM_UNDEFINED,		/* 169 */
+	TPM_UNDEFINED,		/* 16a */
+	TPM_UNDEFINED,		/* 16b */
+	TPM_UNDEFINED,		/* 16c */
+	TPM_UNDEFINED,		/* 16d */
+	TPM_UNDEFINED,		/* 16e */
+	TPM_UNDEFINED,		/* 16f */
+	TPM_UNDEFINED,		/* 170 */
+	TPM_UNDEFINED,		/* 171 */
+	TPM_UNDEFINED,		/* 172 */
+	TPM_UNDEFINED,		/* 173 */
+	TPM_UNDEFINED,		/* 174 */
+	TPM_UNDEFINED,		/* 175 */
+	TPM_UNDEFINED,		/* 176 */
+	TPM_LONG,		/* 177 */
+	TPM_UNDEFINED,		/* 178 */
+	TPM_UNDEFINED,		/* 179 */
+	TPM_MEDIUM,		/* 17a */
+	TPM_LONG,		/* 17b */
+	TPM_UNDEFINED,		/* 17c */
+	TPM_UNDEFINED,		/* 17d */
+	TPM_UNDEFINED,		/* 17e */
+	TPM_UNDEFINED,		/* 17f */
+	TPM_UNDEFINED,		/* 180 */
+	TPM_UNDEFINED,		/* 181 */
+	TPM_MEDIUM,		/* 182 */
+	TPM_UNDEFINED,		/* 183 */
+	TPM_UNDEFINED,		/* 184 */
+	TPM_MEDIUM,		/* 185 */
+	TPM_MEDIUM,		/* 186 */
+	TPM_UNDEFINED,		/* 187 */
+	TPM_UNDEFINED,		/* 188 */
+	TPM_UNDEFINED,		/* 189 */
+	TPM_UNDEFINED,		/* 18a */
+	TPM_UNDEFINED,		/* 18b */
+	TPM_UNDEFINED,		/* 18c */
+	TPM_UNDEFINED,		/* 18d */
+	TPM_UNDEFINED,		/* 18e */
+	TPM_UNDEFINED		/* 18f */
+};
+
+struct tpm2_pcr_read_out {
+	__be32	update_cnt;
+	__be32	pcr_selects_cnt;
+	__be16	hash_alg;
+	u8	pcr_select_size;
+	u8	pcr_select[TPM2_PCR_SELECT_MIN];
+	__be32	digests_cnt;
+	__be16	digest_size;
+	u8	digest[];
+} __packed;
+
+/**
+ * tpm2_pcr_read() - read a PCR value
+ * @chip:	TPM chip to use.
+ * @pcr_idx:	index of the PCR to read.
+ * @res_buf:	buffer to store the resulting hash.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+	int rc;
+	struct tpm_buf buf;
+	struct tpm2_pcr_read_out *out;
+	u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0};
+
+	if (pcr_idx >= TPM2_PLATFORM_PCR)
+		return -EINVAL;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ);
+	if (rc)
+		return rc;
+
+	pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
+
+	tpm_buf_append_u32(&buf, 1);
+	tpm_buf_append_u16(&buf, TPM2_ALG_SHA1);
+	tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN);
+	tpm_buf_append(&buf, (const unsigned char *)pcr_select,
+		       sizeof(pcr_select));
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+			res_buf ? "attempting to read a pcr value" : NULL);
+	if (rc == 0 && res_buf) {
+		out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
+		memcpy(res_buf, out->digest, SHA1_DIGEST_SIZE);
+	}
+
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+
+struct tpm2_null_auth_area {
+	__be32  handle;
+	__be16  nonce_size;
+	u8  attributes;
+	__be16  auth_size;
+} __packed;
+
+/**
+ * tpm2_pcr_extend() - extend a PCR value
+ *
+ * @chip:	TPM chip to use.
+ * @pcr_idx:	index of the PCR.
+ * @count:	number of digests passed.
+ * @digests:	list of pcr banks and corresponding digest values to extend.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
+		    struct tpm2_digest *digests)
+{
+	struct tpm_buf buf;
+	struct tpm2_null_auth_area auth_area;
+	int rc;
+	int i;
+	int j;
+
+	if (count > ARRAY_SIZE(chip->active_banks))
+		return -EINVAL;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, pcr_idx);
+
+	auth_area.handle = cpu_to_be32(TPM2_RS_PW);
+	auth_area.nonce_size = 0;
+	auth_area.attributes = 0;
+	auth_area.auth_size = 0;
+
+	tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
+	tpm_buf_append(&buf, (const unsigned char *)&auth_area,
+		       sizeof(auth_area));
+	tpm_buf_append_u32(&buf, count);
+
+	for (i = 0; i < count; i++) {
+		for (j = 0; j < ARRAY_SIZE(tpm2_hash_map); j++) {
+			if (digests[i].alg_id != tpm2_hash_map[j].tpm_id)
+				continue;
+			tpm_buf_append_u16(&buf, digests[i].alg_id);
+			tpm_buf_append(&buf, (const unsigned char
+					      *)&digests[i].digest,
+			       hash_digest_size[tpm2_hash_map[j].crypto_id]);
+		}
+	}
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+			      "attempting extend a PCR value");
+
+	tpm_buf_destroy(&buf);
+
+	return rc;
+}
+
+
+struct tpm2_get_random_out {
+	__be16 size;
+	u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
+
+/**
+ * tpm2_get_random() - get random bytes from the TPM RNG
+ *
+ * @chip:	a &tpm_chip instance
+ * @dest:	destination buffer
+ * @max:	the max number of random bytes to pull
+ *
+ * Return:
+ *   size of the buffer on success,
+ *   -errno otherwise
+ */
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+{
+	struct tpm2_get_random_out *out;
+	struct tpm_buf buf;
+	u32 recd;
+	u32 num_bytes = max;
+	int err;
+	int total = 0;
+	int retries = 5;
+	u8 *dest_ptr = dest;
+
+	if (!num_bytes || max > TPM_MAX_RNG_DATA)
+		return -EINVAL;
+
+	err = tpm_buf_init(&buf, 0, 0);
+	if (err)
+		return err;
+
+	do {
+		tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+		tpm_buf_append_u16(&buf, num_bytes);
+		err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
+				       offsetof(struct tpm2_get_random_out,
+						buffer),
+				       0, "attempting get random");
+		if (err)
+			goto out;
+
+		out = (struct tpm2_get_random_out *)
+			&buf.data[TPM_HEADER_SIZE];
+		recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+		if (tpm_buf_length(&buf) <
+		    TPM_HEADER_SIZE +
+		    offsetof(struct tpm2_get_random_out, buffer) +
+		    recd) {
+			err = -EFAULT;
+			goto out;
+		}
+		memcpy(dest_ptr, out->buffer, recd);
+
+		dest_ptr += recd;
+		total += recd;
+		num_bytes -= recd;
+	} while (retries-- && total < max);
+
+	tpm_buf_destroy(&buf);
+	return total ? total : -EIO;
+out:
+	tpm_buf_destroy(&buf);
+	return err;
+}
+
+/**
+ * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
+			    unsigned int flags)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
+	if (rc) {
+		dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
+			 handle);
+		return;
+	}
+
+	tpm_buf_append_u32(&buf, handle);
+
+	(void) tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, flags,
+				"flushing context");
+
+	tpm_buf_destroy(&buf);
+}
+
+/**
+ * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+				 const u8 *nonce, u16 nonce_len,
+				 u8 attributes,
+				 const u8 *hmac, u16 hmac_len)
+{
+	tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+	tpm_buf_append_u32(buf, session_handle);
+	tpm_buf_append_u16(buf, nonce_len);
+
+	if (nonce && nonce_len)
+		tpm_buf_append(buf, nonce, nonce_len);
+
+	tpm_buf_append_u8(buf, attributes);
+	tpm_buf_append_u16(buf, hmac_len);
+
+	if (hmac && hmac_len)
+		tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+		      struct trusted_key_payload *payload,
+		      struct trusted_key_options *options)
+{
+	unsigned int blob_len;
+	struct tpm_buf buf;
+	u32 hash;
+	int i;
+	int rc;
+
+	for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+		if (options->hash == tpm2_hash_map[i].crypto_id) {
+			hash = tpm2_hash_map[i].tpm_id;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(tpm2_hash_map))
+		return -EINVAL;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, options->keyhandle);
+	tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+			     NULL /* nonce */, 0,
+			     0 /* session_attributes */,
+			     options->keyauth /* hmac */,
+			     TPM_DIGEST_SIZE);
+
+	/* sensitive */
+	tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+
+	tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+	tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+	tpm_buf_append_u16(&buf, payload->key_len + 1);
+	tpm_buf_append(&buf, payload->key, payload->key_len);
+	tpm_buf_append_u8(&buf, payload->migratable);
+
+	/* public */
+	tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+	tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
+	tpm_buf_append_u16(&buf, hash);
+
+	/* policy */
+	if (options->policydigest_len) {
+		tpm_buf_append_u32(&buf, 0);
+		tpm_buf_append_u16(&buf, options->policydigest_len);
+		tpm_buf_append(&buf, options->policydigest,
+			       options->policydigest_len);
+	} else {
+		tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+		tpm_buf_append_u16(&buf, 0);
+	}
+
+	/* public parameters */
+	tpm_buf_append_u16(&buf, TPM2_ALG_NULL);
+	tpm_buf_append_u16(&buf, 0);
+
+	/* outside info */
+	tpm_buf_append_u16(&buf, 0);
+
+	/* creation PCR */
+	tpm_buf_append_u32(&buf, 0);
+
+	if (buf.flags & TPM_BUF_OVERFLOW) {
+		rc = -E2BIG;
+		goto out;
+	}
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, 0,
+			      "sealing data");
+	if (rc)
+		goto out;
+
+	blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+	if (blob_len > MAX_BLOB_SIZE) {
+		rc = -E2BIG;
+		goto out;
+	}
+	if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+	payload->blob_len = blob_len;
+
+out:
+	tpm_buf_destroy(&buf);
+
+	if (rc > 0) {
+		if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+			rc = -EINVAL;
+		else
+			rc = -EPERM;
+	}
+
+	return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ * @flags: tpm transmit flags
+ *
+ * Return: 0 on success.
+ *        -E2BIG on wrong payload size.
+ *        -EPERM on tpm error status.
+ *        < 0 error from tpm_transmit_cmd.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+			 struct trusted_key_payload *payload,
+			 struct trusted_key_options *options,
+			 u32 *blob_handle, unsigned int flags)
+{
+	struct tpm_buf buf;
+	unsigned int private_len;
+	unsigned int public_len;
+	unsigned int blob_len;
+	int rc;
+
+	private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+	if (private_len > (payload->blob_len - 2))
+		return -E2BIG;
+
+	public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+	blob_len = private_len + public_len + 4;
+	if (blob_len > payload->blob_len)
+		return -E2BIG;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, options->keyhandle);
+	tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+			     NULL /* nonce */, 0,
+			     0 /* session_attributes */,
+			     options->keyauth /* hmac */,
+			     TPM_DIGEST_SIZE);
+
+	tpm_buf_append(&buf, payload->blob, blob_len);
+
+	if (buf.flags & TPM_BUF_OVERFLOW) {
+		rc = -E2BIG;
+		goto out;
+	}
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, flags,
+			      "loading blob");
+	if (!rc)
+		*blob_handle = be32_to_cpup(
+			(__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+	tpm_buf_destroy(&buf);
+
+	if (rc > 0)
+		rc = -EPERM;
+
+	return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ * @flags: tpm_transmit_cmd flags
+ *
+ * Return: 0 on success
+ *         -EPERM on tpm error status
+ *         < 0 error from tpm_transmit_cmd
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+			   struct trusted_key_payload *payload,
+			   struct trusted_key_options *options,
+			   u32 blob_handle, unsigned int flags)
+{
+	struct tpm_buf buf;
+	u16 data_len;
+	u8 *data;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, blob_handle);
+	tpm2_buf_append_auth(&buf,
+			     options->policyhandle ?
+			     options->policyhandle : TPM2_RS_PW,
+			     NULL /* nonce */, 0,
+			     TPM2_SA_CONTINUE_SESSION,
+			     options->blobauth /* hmac */,
+			     TPM_DIGEST_SIZE);
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 6, flags,
+			      "unsealing");
+	if (rc > 0)
+		rc = -EPERM;
+
+	if (!rc) {
+		data_len = be16_to_cpup(
+			(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+		if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) {
+			rc = -EFAULT;
+			goto out;
+		}
+
+		if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+			rc = -EFAULT;
+			goto out;
+		}
+		data = &buf.data[TPM_HEADER_SIZE + 6];
+
+		memcpy(payload->key, data, data_len - 1);
+		payload->key_len = data_len - 1;
+		payload->migratable = data[data_len - 1];
+	}
+
+out:
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+			struct trusted_key_payload *payload,
+			struct trusted_key_options *options)
+{
+	u32 blob_handle;
+	int rc;
+
+	mutex_lock(&chip->tpm_mutex);
+	rc = tpm2_load_cmd(chip, payload, options, &blob_handle,
+			   TPM_TRANSMIT_UNLOCKED);
+	if (rc)
+		goto out;
+
+	rc = tpm2_unseal_cmd(chip, payload, options, blob_handle,
+			     TPM_TRANSMIT_UNLOCKED);
+	tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED);
+out:
+	mutex_unlock(&chip->tpm_mutex);
+	return rc;
+}
+
+struct tpm2_get_cap_out {
+	u8 more_data;
+	__be32 subcap_id;
+	__be32 property_cnt;
+	__be32 property_id;
+	__be32 value;
+} __packed;
+
+/**
+ * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
+ * @chip:		a &tpm_chip instance
+ * @property_id:	property ID.
+ * @value:		output variable.
+ * @desc:		passed to tpm_transmit_cmd()
+ *
+ * Return:
+ *   0 on success,
+ *   -errno or a TPM return code otherwise
+ */
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,  u32 *value,
+			const char *desc)
+{
+	struct tpm2_get_cap_out *out;
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+	if (rc)
+		return rc;
+	tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+	tpm_buf_append_u32(&buf, property_id);
+	tpm_buf_append_u32(&buf, 1);
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+	if (!rc) {
+		out = (struct tpm2_get_cap_out *)
+			&buf.data[TPM_HEADER_SIZE];
+		*value = be32_to_cpu(out->value);
+	}
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
+
+/**
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
+ *
+ * @chip:		a &tpm_chip instance
+ * @shutdown_type:	TPM_SU_CLEAR or TPM_SU_STATE.
+ */
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+	if (rc)
+		return;
+	tpm_buf_append_u16(&buf, shutdown_type);
+	tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+			 "stopping the TPM");
+	tpm_buf_destroy(&buf);
+}
+
+/*
+ * tpm2_calc_ordinal_duration() - maximum duration for a command
+ *
+ * @chip:	TPM chip to use.
+ * @ordinal:	command code number.
+ *
+ * Return: maximum duration for a command
+ */
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+	int index = TPM_UNDEFINED;
+	int duration = 0;
+
+	if (ordinal >= TPM2_CC_FIRST && ordinal <= TPM2_CC_LAST)
+		index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST];
+
+	if (index != TPM_UNDEFINED)
+		duration = chip->duration[index];
+
+	if (duration <= 0)
+		duration = msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+
+	return duration;
+}
+EXPORT_SYMBOL_GPL(tpm2_calc_ordinal_duration);
+
+/**
+ * tpm2_do_selftest() - ensure that all self tests have passed
+ *
+ * @chip: TPM chip to use
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ *
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
+ */
+static int tpm2_do_selftest(struct tpm_chip *chip)
+{
+	struct tpm_buf buf;
+	int full;
+	int rc;
+
+	for (full = 0; full < 2; full++) {
+		rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SELF_TEST);
+		if (rc)
+			return rc;
+
+		tpm_buf_append_u8(&buf, full);
+		rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+				      "attempting the self test");
+		tpm_buf_destroy(&buf);
+
+		if (rc == TPM2_RC_TESTING)
+			rc = TPM2_RC_SUCCESS;
+		if (rc == TPM2_RC_INITIALIZE || rc == TPM2_RC_SUCCESS)
+			return rc;
+	}
+
+	return rc;
+}
+
+/**
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip:	a &tpm_chip instance
+ *
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise
+ */
+int tpm2_probe(struct tpm_chip *chip)
+{
+	struct tpm_output_header *out;
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+	if (rc)
+		return rc;
+	tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+	tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+	tpm_buf_append_u32(&buf, 1);
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+	/* We ignore TPM return codes on purpose. */
+	if (rc >=  0) {
+		out = (struct tpm_output_header *)buf.data;
+		if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+			chip->flags |= TPM_CHIP_FLAG_TPM2;
+	}
+	tpm_buf_destroy(&buf);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpm2_probe);
+
+struct tpm2_pcr_selection {
+	__be16  hash_alg;
+	u8  size_of_select;
+	u8  pcr_select[3];
+} __packed;
+
+static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+{
+	struct tpm2_pcr_selection pcr_selection;
+	struct tpm_buf buf;
+	void *marker;
+	void *end;
+	void *pcr_select_offset;
+	unsigned int count;
+	u32 sizeof_pcr_selection;
+	u32 rsp_len;
+	int rc;
+	int i = 0;
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, TPM2_CAP_PCRS);
+	tpm_buf_append_u32(&buf, 0);
+	tpm_buf_append_u32(&buf, 1);
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 9, 0,
+			      "get tpm pcr allocation");
+	if (rc)
+		goto out;
+
+	count = be32_to_cpup(
+		(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
+
+	if (count > ARRAY_SIZE(chip->active_banks)) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	marker = &buf.data[TPM_HEADER_SIZE + 9];
+
+	rsp_len = be32_to_cpup((__be32 *)&buf.data[2]);
+	end = &buf.data[rsp_len];
+
+	for (i = 0; i < count; i++) {
+		pcr_select_offset = marker +
+			offsetof(struct tpm2_pcr_selection, size_of_select);
+		if (pcr_select_offset >= end) {
+			rc = -EFAULT;
+			break;
+		}
+
+		memcpy(&pcr_selection, marker, sizeof(pcr_selection));
+		chip->active_banks[i] = be16_to_cpu(pcr_selection.hash_alg);
+		sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) +
+			sizeof(pcr_selection.size_of_select) +
+			pcr_selection.size_of_select;
+		marker = marker + sizeof_pcr_selection;
+	}
+
+out:
+	if (i < ARRAY_SIZE(chip->active_banks))
+		chip->active_banks[i] = TPM2_ALG_ERROR;
+
+	tpm_buf_destroy(&buf);
+
+	return rc;
+}
+
+static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+{
+	struct tpm_buf buf;
+	u32 nr_commands;
+	__be32 *attrs;
+	u32 cc;
+	int i;
+	int rc;
+
+	rc = tpm2_get_tpm_pt(chip, TPM_PT_TOTAL_COMMANDS, &nr_commands, NULL);
+	if (rc)
+		goto out;
+
+	if (nr_commands > 0xFFFFF) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
+					  GFP_KERNEL);
+
+	rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+	if (rc)
+		goto out;
+
+	tpm_buf_append_u32(&buf, TPM2_CAP_COMMANDS);
+	tpm_buf_append_u32(&buf, TPM2_CC_FIRST);
+	tpm_buf_append_u32(&buf, nr_commands);
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
+			      9 + 4 * nr_commands, 0, NULL);
+	if (rc) {
+		tpm_buf_destroy(&buf);
+		goto out;
+	}
+
+	if (nr_commands !=
+	    be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+		tpm_buf_destroy(&buf);
+		goto out;
+	}
+
+	chip->nr_commands = nr_commands;
+
+	attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
+	for (i = 0; i < nr_commands; i++, attrs++) {
+		chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
+		cc = chip->cc_attrs_tbl[i] & 0xFFFF;
+
+		if (cc == TPM2_CC_CONTEXT_SAVE || cc == TPM2_CC_FLUSH_CONTEXT) {
+			chip->cc_attrs_tbl[i] &=
+				~(GENMASK(2, 0) << TPM2_CC_ATTR_CHANDLES);
+			chip->cc_attrs_tbl[i] |= 1 << TPM2_CC_ATTR_CHANDLES;
+		}
+	}
+
+	tpm_buf_destroy(&buf);
+
+out:
+	if (rc > 0)
+		rc = -ENODEV;
+	return rc;
+}
+
+/**
+ * tpm2_auto_startup - Perform the standard automatic TPM initialization
+ *                     sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm2_auto_startup(struct tpm_chip *chip)
+{
+	int rc;
+
+	rc = tpm_get_timeouts(chip);
+	if (rc)
+		goto out;
+
+	rc = tpm2_do_selftest(chip);
+	if (rc && rc != TPM2_RC_INITIALIZE)
+		goto out;
+
+	if (rc == TPM2_RC_INITIALIZE) {
+		rc = tpm_startup(chip);
+		if (rc)
+			goto out;
+
+		rc = tpm2_do_selftest(chip);
+		if (rc)
+			goto out;
+	}
+
+	rc = tpm2_get_pcr_allocation(chip);
+	if (rc)
+		goto out;
+
+	rc = tpm2_get_cc_attrs_tbl(chip);
+
+out:
+	if (rc > 0)
+		rc = -ENODEV;
+	return rc;
+}
+
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc)
+{
+	int i;
+
+	for (i = 0; i < chip->nr_commands; i++)
+		if (cc == (chip->cc_attrs_tbl[i] & GENMASK(15, 0)))
+			return i;
+
+	return -1;
+}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
new file mode 100644
index 0000000..d2e101b
--- /dev/null
+++ b/drivers/char/tpm/tpm2-space.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gfp.h>
+#include <asm/unaligned.h>
+#include "tpm.h"
+
+enum tpm2_handle_types {
+	TPM2_HT_HMAC_SESSION	= 0x02000000,
+	TPM2_HT_POLICY_SESSION	= 0x03000000,
+	TPM2_HT_TRANSIENT	= 0x80000000,
+};
+
+struct tpm2_context {
+	__be64 sequence;
+	__be32 saved_handle;
+	__be32 hierarchy;
+	__be16 blob_size;
+} __packed;
+
+static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+		if (space->session_tbl[i])
+			tpm2_flush_context_cmd(chip, space->session_tbl[i],
+					       TPM_TRANSMIT_NESTED);
+	}
+}
+
+int tpm2_init_space(struct tpm_space *space)
+{
+	space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!space->context_buf)
+		return -ENOMEM;
+
+	space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (space->session_buf == NULL) {
+		kfree(space->context_buf);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
+{
+	mutex_lock(&chip->tpm_mutex);
+	tpm2_flush_sessions(chip, space);
+	mutex_unlock(&chip->tpm_mutex);
+	kfree(space->context_buf);
+	kfree(space->session_buf);
+}
+
+static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+			     unsigned int *offset, u32 *handle)
+{
+	struct tpm_buf tbuf;
+	struct tpm2_context *ctx;
+	unsigned int body_size;
+	int rc;
+
+	rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_LOAD);
+	if (rc)
+		return rc;
+
+	ctx = (struct tpm2_context *)&buf[*offset];
+	body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size);
+	tpm_buf_append(&tbuf, &buf[*offset], body_size);
+
+	rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
+			      TPM_TRANSMIT_NESTED, NULL);
+	if (rc < 0) {
+		dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+			 __func__, rc);
+		tpm_buf_destroy(&tbuf);
+		return -EFAULT;
+	} else if (tpm2_rc_value(rc) == TPM2_RC_HANDLE ||
+		   rc == TPM2_RC_REFERENCE_H0) {
+		/*
+		 * TPM_RC_HANDLE means that the session context can't
+		 * be loaded because of an internal counter mismatch
+		 * that makes the TPM think there might have been a
+		 * replay.  This might happen if the context was saved
+		 * and loaded outside the space.
+		 *
+		 * TPM_RC_REFERENCE_H0 means the session has been
+		 * flushed outside the space
+		 */
+		*handle = 0;
+		tpm_buf_destroy(&tbuf);
+		return -ENOENT;
+	} else if (rc > 0) {
+		dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+			 __func__, rc);
+		tpm_buf_destroy(&tbuf);
+		return -EFAULT;
+	}
+
+	*handle = be32_to_cpup((__be32 *)&tbuf.data[TPM_HEADER_SIZE]);
+	*offset += body_size;
+
+	tpm_buf_destroy(&tbuf);
+	return 0;
+}
+
+static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+			     unsigned int buf_size, unsigned int *offset)
+{
+	struct tpm_buf tbuf;
+	unsigned int body_size;
+	int rc;
+
+	rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_SAVE);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&tbuf, handle);
+
+	rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
+			      TPM_TRANSMIT_NESTED, NULL);
+	if (rc < 0) {
+		dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+			 __func__, rc);
+		tpm_buf_destroy(&tbuf);
+		return -EFAULT;
+	} else if (tpm2_rc_value(rc) == TPM2_RC_REFERENCE_H0) {
+		tpm_buf_destroy(&tbuf);
+		return -ENOENT;
+	} else if (rc) {
+		dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+			 __func__, rc);
+		tpm_buf_destroy(&tbuf);
+		return -EFAULT;
+	}
+
+	body_size = tpm_buf_length(&tbuf) - TPM_HEADER_SIZE;
+	if ((*offset + body_size) > buf_size) {
+		dev_warn(&chip->dev, "%s: out of backing storage\n", __func__);
+		tpm_buf_destroy(&tbuf);
+		return -ENOMEM;
+	}
+
+	memcpy(&buf[*offset], &tbuf.data[TPM_HEADER_SIZE], body_size);
+	*offset += body_size;
+	tpm_buf_destroy(&tbuf);
+	return 0;
+}
+
+static void tpm2_flush_space(struct tpm_chip *chip)
+{
+	struct tpm_space *space = &chip->work_space;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
+		if (space->context_tbl[i] && ~space->context_tbl[i])
+			tpm2_flush_context_cmd(chip, space->context_tbl[i],
+					       TPM_TRANSMIT_NESTED);
+
+	tpm2_flush_sessions(chip, space);
+}
+
+static int tpm2_load_space(struct tpm_chip *chip)
+{
+	struct tpm_space *space = &chip->work_space;
+	unsigned int offset;
+	int i;
+	int rc;
+
+	for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+		if (!space->context_tbl[i])
+			continue;
+
+		/* sanity check, should never happen */
+		if (~space->context_tbl[i]) {
+			dev_err(&chip->dev, "context table is inconsistent");
+			return -EFAULT;
+		}
+
+		rc = tpm2_load_context(chip, space->context_buf, &offset,
+				       &space->context_tbl[i]);
+		if (rc)
+			return rc;
+	}
+
+	for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+		u32 handle;
+
+		if (!space->session_tbl[i])
+			continue;
+
+		rc = tpm2_load_context(chip, space->session_buf,
+				       &offset, &handle);
+		if (rc == -ENOENT) {
+			/* load failed, just forget session */
+			space->session_tbl[i] = 0;
+		} else if (rc) {
+			tpm2_flush_space(chip);
+			return rc;
+		}
+		if (handle != space->session_tbl[i]) {
+			dev_warn(&chip->dev, "session restored to wrong handle\n");
+			tpm2_flush_space(chip);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static bool tpm2_map_to_phandle(struct tpm_space *space, void *handle)
+{
+	u32 vhandle = be32_to_cpup((__be32 *)handle);
+	u32 phandle;
+	int i;
+
+	i = 0xFFFFFF - (vhandle & 0xFFFFFF);
+	if (i >= ARRAY_SIZE(space->context_tbl) || !space->context_tbl[i])
+		return false;
+
+	phandle = space->context_tbl[i];
+	*((__be32 *)handle) = cpu_to_be32(phandle);
+	return true;
+}
+
+static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
+{
+	struct tpm_space *space = &chip->work_space;
+	unsigned int nr_handles;
+	u32 attrs;
+	__be32 *handle;
+	int i;
+
+	i = tpm2_find_cc(chip, cc);
+	if (i < 0)
+		return -EINVAL;
+
+	attrs = chip->cc_attrs_tbl[i];
+	nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
+
+	handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
+	for (i = 0; i < nr_handles; i++, handle++) {
+		if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
+			if (!tpm2_map_to_phandle(space, handle))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
+		       u8 *cmd)
+{
+	int rc;
+
+	if (!space)
+		return 0;
+
+	memcpy(&chip->work_space.context_tbl, &space->context_tbl,
+	       sizeof(space->context_tbl));
+	memcpy(&chip->work_space.session_tbl, &space->session_tbl,
+	       sizeof(space->session_tbl));
+	memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
+	memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
+
+	rc = tpm2_load_space(chip);
+	if (rc) {
+		tpm2_flush_space(chip);
+		return rc;
+	}
+
+	rc = tpm2_map_command(chip, cc, cmd);
+	if (rc) {
+		tpm2_flush_space(chip);
+		return rc;
+	}
+
+	return 0;
+}
+
+static bool tpm2_add_session(struct tpm_chip *chip, u32 handle)
+{
+	struct tpm_space *space = &chip->work_space;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++)
+		if (space->session_tbl[i] == 0)
+			break;
+
+	if (i == ARRAY_SIZE(space->session_tbl))
+		return false;
+
+	space->session_tbl[i] = handle;
+	return true;
+}
+
+static u32 tpm2_map_to_vhandle(struct tpm_space *space, u32 phandle, bool alloc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+		if (alloc) {
+			if (!space->context_tbl[i]) {
+				space->context_tbl[i] = phandle;
+				break;
+			}
+		} else if (space->context_tbl[i] == phandle)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(space->context_tbl))
+		return 0;
+
+	return TPM2_HT_TRANSIENT | (0xFFFFFF - i);
+}
+
+static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
+				    size_t len)
+{
+	struct tpm_space *space = &chip->work_space;
+	struct tpm_output_header *header = (void *)rsp;
+	u32 phandle;
+	u32 phandle_type;
+	u32 vhandle;
+	u32 attrs;
+	int i;
+
+	if (be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS)
+		return 0;
+
+	i = tpm2_find_cc(chip, cc);
+	/* sanity check, should never happen */
+	if (i < 0)
+		return -EFAULT;
+
+	attrs = chip->cc_attrs_tbl[i];
+	if (!((attrs >> TPM2_CC_ATTR_RHANDLE) & 1))
+		return 0;
+
+	phandle = be32_to_cpup((__be32 *)&rsp[TPM_HEADER_SIZE]);
+	phandle_type = phandle & 0xFF000000;
+
+	switch (phandle_type) {
+	case TPM2_HT_TRANSIENT:
+		vhandle = tpm2_map_to_vhandle(space, phandle, true);
+		if (!vhandle)
+			goto out_no_slots;
+
+		*(__be32 *)&rsp[TPM_HEADER_SIZE] = cpu_to_be32(vhandle);
+		break;
+	case TPM2_HT_HMAC_SESSION:
+	case TPM2_HT_POLICY_SESSION:
+		if (!tpm2_add_session(chip, phandle))
+			goto out_no_slots;
+		break;
+	default:
+		dev_err(&chip->dev, "%s: unknown handle 0x%08X\n",
+			__func__, phandle);
+		break;
+	};
+
+	return 0;
+out_no_slots:
+	tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED);
+	dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
+		 phandle);
+	return -ENOMEM;
+}
+
+struct tpm2_cap_handles {
+	u8 more_data;
+	__be32 capability;
+	__be32 count;
+	__be32 handles[];
+} __packed;
+
+static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
+				  size_t len)
+{
+	struct tpm_space *space = &chip->work_space;
+	struct tpm_output_header *header = (void *)rsp;
+	struct tpm2_cap_handles *data;
+	u32 phandle;
+	u32 phandle_type;
+	u32 vhandle;
+	int i;
+	int j;
+
+	if (cc != TPM2_CC_GET_CAPABILITY ||
+	    be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) {
+		return 0;
+	}
+
+	if (len < TPM_HEADER_SIZE + 9)
+		return -EFAULT;
+
+	data = (void *)&rsp[TPM_HEADER_SIZE];
+	if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
+		return 0;
+
+	if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
+		return -EFAULT;
+
+	for (i = 0, j = 0; i < be32_to_cpu(data->count); i++) {
+		phandle = be32_to_cpup((__be32 *)&data->handles[i]);
+		phandle_type = phandle & 0xFF000000;
+
+		switch (phandle_type) {
+		case TPM2_HT_TRANSIENT:
+			vhandle = tpm2_map_to_vhandle(space, phandle, false);
+			if (!vhandle)
+				break;
+
+			data->handles[j] = cpu_to_be32(vhandle);
+			j++;
+			break;
+
+		default:
+			data->handles[j] = cpu_to_be32(phandle);
+			j++;
+			break;
+		}
+
+	}
+
+	header->length = cpu_to_be32(TPM_HEADER_SIZE + 9 + 4 * j);
+	data->count = cpu_to_be32(j);
+	return 0;
+}
+
+static int tpm2_save_space(struct tpm_chip *chip)
+{
+	struct tpm_space *space = &chip->work_space;
+	unsigned int offset;
+	int i;
+	int rc;
+
+	for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+		if (!(space->context_tbl[i] && ~space->context_tbl[i]))
+			continue;
+
+		rc = tpm2_save_context(chip, space->context_tbl[i],
+				       space->context_buf, PAGE_SIZE,
+				       &offset);
+		if (rc == -ENOENT) {
+			space->context_tbl[i] = 0;
+			continue;
+		} else if (rc)
+			return rc;
+
+		tpm2_flush_context_cmd(chip, space->context_tbl[i],
+				       TPM_TRANSMIT_NESTED);
+		space->context_tbl[i] = ~0;
+	}
+
+	for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+		if (!space->session_tbl[i])
+			continue;
+
+		rc = tpm2_save_context(chip, space->session_tbl[i],
+				       space->session_buf, PAGE_SIZE,
+				       &offset);
+
+		if (rc == -ENOENT) {
+			/* handle error saving session, just forget it */
+			space->session_tbl[i] = 0;
+		} else if (rc < 0) {
+			tpm2_flush_space(chip);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
+		      u32 cc, u8 *buf, size_t *bufsiz)
+{
+	struct tpm_output_header *header = (void *)buf;
+	int rc;
+
+	if (!space)
+		return 0;
+
+	rc = tpm2_map_response_header(chip, cc, buf, *bufsiz);
+	if (rc) {
+		tpm2_flush_space(chip);
+		return rc;
+	}
+
+	rc = tpm2_map_response_body(chip, cc, buf, *bufsiz);
+	if (rc) {
+		tpm2_flush_space(chip);
+		return rc;
+	}
+
+	rc = tpm2_save_space(chip);
+	if (rc) {
+		tpm2_flush_space(chip);
+		return rc;
+	}
+
+	*bufsiz = be32_to_cpu(header->length);
+
+	memcpy(&space->context_tbl, &chip->work_space.context_tbl,
+	       sizeof(space->context_tbl));
+	memcpy(&space->session_tbl, &chip->work_space.session_tbl,
+	       sizeof(space->session_tbl));
+	memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
+	memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
+
+	return 0;
+}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
new file mode 100644
index 0000000..66a1452
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org	 
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ * 
+ */
+
+#include "tpm.h"
+#include "tpm_atmel.h"
+
+/* write status bits */
+enum tpm_atmel_write_status {
+	ATML_STATUS_ABORT = 0x01,
+	ATML_STATUS_LASTBYTE = 0x04
+};
+/* read status bits */
+enum tpm_atmel_read_status {
+	ATML_STATUS_BUSY = 0x01,
+	ATML_STATUS_DATA_AVAIL = 0x02,
+	ATML_STATUS_REWRITE = 0x04,
+	ATML_STATUS_READY = 0x08
+};
+
+static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+	u8 status, *hdr = buf;
+	u32 size;
+	int i;
+	__be32 *native_size;
+
+	/* start reading header */
+	if (count < 6)
+		return -EIO;
+
+	for (i = 0; i < 6; i++) {
+		status = ioread8(priv->iobase + 1);
+		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+			dev_err(&chip->dev, "error reading header\n");
+			return -EIO;
+		}
+		*buf++ = ioread8(priv->iobase);
+	}
+
+	/* size of the data received */
+	native_size = (__force __be32 *) (hdr + 2);
+	size = be32_to_cpu(*native_size);
+
+	if (count < size) {
+		dev_err(&chip->dev,
+			"Recv size(%d) less than available space\n", size);
+		for (; i < size; i++) {	/* clear the waiting data anyway */
+			status = ioread8(priv->iobase + 1);
+			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+				dev_err(&chip->dev, "error reading data\n");
+				return -EIO;
+			}
+		}
+		return -EIO;
+	}
+
+	/* read all the data available */
+	for (; i < size; i++) {
+		status = ioread8(priv->iobase + 1);
+		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+			dev_err(&chip->dev, "error reading data\n");
+			return -EIO;
+		}
+		*buf++ = ioread8(priv->iobase);
+	}
+
+	/* make sure data available is gone */
+	status = ioread8(priv->iobase + 1);
+
+	if (status & ATML_STATUS_DATA_AVAIL) {
+		dev_err(&chip->dev, "data available is stuck\n");
+		return -EIO;
+	}
+
+	return size;
+}
+
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+	int i;
+
+	dev_dbg(&chip->dev, "tpm_atml_send:\n");
+	for (i = 0; i < count; i++) {
+		dev_dbg(&chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
+		iowrite8(buf[i], priv->iobase);
+	}
+
+	return count;
+}
+
+static void tpm_atml_cancel(struct tpm_chip *chip)
+{
+	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+	iowrite8(ATML_STATUS_ABORT, priv->iobase + 1);
+}
+
+static u8 tpm_atml_status(struct tpm_chip *chip)
+{
+	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return ioread8(priv->iobase + 1);
+}
+
+static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == ATML_STATUS_READY);
+}
+
+static const struct tpm_class_ops tpm_atmel = {
+	.recv = tpm_atml_recv,
+	.send = tpm_atml_send,
+	.cancel = tpm_atml_cancel,
+	.status = tpm_atml_status,
+	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+	.req_complete_val = ATML_STATUS_DATA_AVAIL,
+	.req_canceled = tpm_atml_req_canceled,
+};
+
+static struct platform_device *pdev;
+
+static void atml_plat_remove(void)
+{
+	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+	tpm_chip_unregister(chip);
+	if (priv->have_region)
+		atmel_release_region(priv->base, priv->region_size);
+	atmel_put_base_addr(priv->iobase);
+	platform_device_unregister(pdev);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver atml_drv = {
+	.driver = {
+		.name = "tpm_atmel",
+		.pm		= &tpm_atml_pm,
+	},
+};
+
+static int __init init_atmel(void)
+{
+	int rc = 0;
+	void __iomem *iobase = NULL;
+	int have_region, region_size;
+	unsigned long base;
+	struct  tpm_chip *chip;
+	struct tpm_atmel_priv *priv;
+
+	rc = platform_driver_register(&atml_drv);
+	if (rc)
+		return rc;
+
+	if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
+		rc = -ENODEV;
+		goto err_unreg_drv;
+	}
+
+	have_region =
+	    (atmel_request_region
+	     (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+
+	pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		rc = PTR_ERR(pdev);
+		goto err_rel_reg;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		rc = -ENOMEM;
+		goto err_unreg_dev;
+	}
+
+	priv->iobase = iobase;
+	priv->base = base;
+	priv->have_region = have_region;
+	priv->region_size = region_size;
+
+	chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel);
+	if (IS_ERR(chip)) {
+		rc = PTR_ERR(chip);
+		goto err_unreg_dev;
+	}
+
+	dev_set_drvdata(&chip->dev, priv);
+
+	rc = tpm_chip_register(chip);
+	if (rc)
+		goto err_unreg_dev;
+
+	return 0;
+
+err_unreg_dev:
+	platform_device_unregister(pdev);
+err_rel_reg:
+	atmel_put_base_addr(iobase);
+	if (have_region)
+		atmel_release_region(base,
+				     region_size);
+err_unreg_drv:
+	platform_driver_unregister(&atml_drv);
+	return rc;
+}
+
+static void __exit cleanup_atmel(void)
+{
+	platform_driver_unregister(&atml_drv);
+	atml_plat_remove();
+}
+
+module_init(init_atmel);
+module_exit(cleanup_atmel);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
new file mode 100644
index 0000000..5c82eb4
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * These difference are required on power because the device must be
+ * discovered through the device tree and iomap must be used to get
+ * around the need for holes in the io_page_mask.  This does not happen
+ * automatically because the tpm is not a normal pci device and lives
+ * under the root node.
+ *
+ */
+
+struct tpm_atmel_priv {
+	int region_size;
+	int have_region;
+	unsigned long base;
+	void __iomem *iobase;
+};
+
+#ifdef CONFIG_PPC64
+
+#include <asm/prom.h>
+
+#define atmel_getb(priv, offset) readb(priv->iobase + offset)
+#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
+#define atmel_request_region request_mem_region
+#define atmel_release_region release_mem_region
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+	iounmap(iobase);
+}
+
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+	struct device_node *dn;
+	unsigned long address, size;
+	const unsigned int *reg;
+	int reglen;
+	int naddrc;
+	int nsizec;
+
+	dn = of_find_node_by_name(NULL, "tpm");
+
+	if (!dn)
+		return NULL;
+
+	if (!of_device_is_compatible(dn, "AT97SC3201")) {
+		of_node_put(dn);
+		return NULL;
+	}
+
+	reg = of_get_property(dn, "reg", &reglen);
+	naddrc = of_n_addr_cells(dn);
+	nsizec = of_n_size_cells(dn);
+
+	of_node_put(dn);
+
+
+	if (naddrc == 2)
+		address = ((unsigned long) reg[0] << 32) | reg[1];
+	else
+		address = reg[0];
+
+	if (nsizec == 2)
+		size =
+		    ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
+	else
+		size = reg[naddrc];
+
+	*base = address;
+	*region_size = size;
+	return ioremap(*base, *region_size);
+}
+#else
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
+#define atmel_putb(val, chip, offset) \
+	outb(val, atmel_get_priv(chip)->base + offset)
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+	TPM_ATMEL_BASE_ADDR_LO = 0x08,
+	TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+	outb(index, base);
+	return inb(base+1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+
+	/* verify that it is an Atmel part */
+	if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+	    tpm_read_index(TPM_ADDR, 5) != 'T' ||
+	    tpm_read_index(TPM_ADDR, 6) != 'M' ||
+	    tpm_read_index(TPM_ADDR, 7) != 'L')
+		return 1;
+
+	/* query chip for its version number */
+	if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+	    tpm_read_index(TPM_ADDR, 0x01) != 1)
+		return 1;
+
+	/* This is an atmel supported part */
+	return 0;
+}
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+}
+
+/* Determine where to talk to device */
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+	int lo, hi;
+
+	if (atmel_verify_tpm11() != 0)
+		return NULL;
+
+	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+	*base = (hi << 8) | lo;
+	*region_size = 2;
+
+	return ioport_map(*base, *region_size);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
new file mode 100644
index 0000000..36952ef
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG CRB 2.0 TPM specification.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/acpi.h>
+#include <linux/highmem.h>
+#include <linux/rculist.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#ifdef CONFIG_ARM64
+#include <linux/arm-smccc.h>
+#endif
+#include "tpm.h"
+
+#define ACPI_SIG_TPM2 "TPM2"
+
+static const guid_t crb_acpi_start_guid =
+	GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+		  0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
+
+enum crb_defaults {
+	CRB_ACPI_START_REVISION_ID = 1,
+	CRB_ACPI_START_INDEX = 1,
+};
+
+enum crb_loc_ctrl {
+	CRB_LOC_CTRL_REQUEST_ACCESS	= BIT(0),
+	CRB_LOC_CTRL_RELINQUISH		= BIT(1),
+};
+
+enum crb_loc_state {
+	CRB_LOC_STATE_LOC_ASSIGNED	= BIT(1),
+	CRB_LOC_STATE_TPM_REG_VALID_STS	= BIT(7),
+};
+
+enum crb_ctrl_req {
+	CRB_CTRL_REQ_CMD_READY	= BIT(0),
+	CRB_CTRL_REQ_GO_IDLE	= BIT(1),
+};
+
+enum crb_ctrl_sts {
+	CRB_CTRL_STS_ERROR	= BIT(0),
+	CRB_CTRL_STS_TPM_IDLE	= BIT(1),
+};
+
+enum crb_start {
+	CRB_START_INVOKE	= BIT(0),
+};
+
+enum crb_cancel {
+	CRB_CANCEL_INVOKE	= BIT(0),
+};
+
+struct crb_regs_head {
+	u32 loc_state;
+	u32 reserved1;
+	u32 loc_ctrl;
+	u32 loc_sts;
+	u8 reserved2[32];
+	u64 intf_id;
+	u64 ctrl_ext;
+} __packed;
+
+struct crb_regs_tail {
+	u32 ctrl_req;
+	u32 ctrl_sts;
+	u32 ctrl_cancel;
+	u32 ctrl_start;
+	u32 ctrl_int_enable;
+	u32 ctrl_int_sts;
+	u32 ctrl_cmd_size;
+	u32 ctrl_cmd_pa_low;
+	u32 ctrl_cmd_pa_high;
+	u32 ctrl_rsp_size;
+	u64 ctrl_rsp_pa;
+} __packed;
+
+enum crb_status {
+	CRB_DRV_STS_COMPLETE	= BIT(0),
+};
+
+struct crb_priv {
+	u32 sm;
+	const char *hid;
+	void __iomem *iobase;
+	struct crb_regs_head __iomem *regs_h;
+	struct crb_regs_tail __iomem *regs_t;
+	u8 __iomem *cmd;
+	u8 __iomem *rsp;
+	u32 cmd_size;
+	u32 smc_func_id;
+};
+
+struct tpm2_crb_smc {
+	u32 interrupt;
+	u8 interrupt_flags;
+	u8 op_flags;
+	u16 reserved2;
+	u32 smc_func_id;
+};
+
+static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+				unsigned long timeout)
+{
+	ktime_t start;
+	ktime_t stop;
+
+	start = ktime_get();
+	stop = ktime_add(start, ms_to_ktime(timeout));
+
+	do {
+		if ((ioread32(reg) & mask) == value)
+			return true;
+
+		usleep_range(50, 100);
+	} while (ktime_before(ktime_get(), stop));
+
+	return ((ioread32(reg) & mask) == value);
+}
+
+/**
+ * __crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev:  crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 always
+ */
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+		return 0;
+
+	iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+
+	if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+				 CRB_CTRL_REQ_GO_IDLE/* mask */,
+				 0, /* value */
+				 TPM2_TIMEOUT_C)) {
+		dev_warn(dev, "goIdle timed out\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int crb_go_idle(struct tpm_chip *chip)
+{
+	struct device *dev = &chip->dev;
+	struct crb_priv *priv = dev_get_drvdata(dev);
+
+	return __crb_go_idle(dev, priv);
+}
+
+/**
+ * __crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev:  crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+{
+	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+		return 0;
+
+	iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+	if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+				 CRB_CTRL_REQ_CMD_READY /* mask */,
+				 0, /* value */
+				 TPM2_TIMEOUT_C)) {
+		dev_warn(dev, "cmdReady timed out\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+	struct device *dev = &chip->dev;
+	struct crb_priv *priv = dev_get_drvdata(dev);
+
+	return __crb_cmd_ready(dev, priv);
+}
+
+static int __crb_request_locality(struct device *dev,
+				  struct crb_priv *priv, int loc)
+{
+	u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
+		    CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+	if (!priv->regs_h)
+		return 0;
+
+	iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+	if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
+				 TPM2_TIMEOUT_C)) {
+		dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int crb_request_locality(struct tpm_chip *chip, int loc)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return __crb_request_locality(&chip->dev, priv, loc);
+}
+
+static int __crb_relinquish_locality(struct device *dev,
+				     struct crb_priv *priv, int loc)
+{
+	u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
+		   CRB_LOC_STATE_TPM_REG_VALID_STS;
+	u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+	if (!priv->regs_h)
+		return 0;
+
+	iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+	if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+				 TPM2_TIMEOUT_C)) {
+		dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return __crb_relinquish_locality(&chip->dev, priv, loc);
+}
+
+static u8 crb_status(struct tpm_chip *chip)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+	u8 sts = 0;
+
+	if ((ioread32(&priv->regs_t->ctrl_start) & CRB_START_INVOKE) !=
+	    CRB_START_INVOKE)
+		sts |= CRB_DRV_STS_COMPLETE;
+
+	return sts;
+}
+
+static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+	unsigned int expected;
+
+	/* sanity check */
+	if (count < 6)
+		return -EIO;
+
+	if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
+		return -EIO;
+
+	memcpy_fromio(buf, priv->rsp, 6);
+	expected = be32_to_cpup((__be32 *) &buf[2]);
+	if (expected > count || expected < 6)
+		return -EIO;
+
+	memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
+
+	return expected;
+}
+
+static int crb_do_acpi_start(struct tpm_chip *chip)
+{
+	union acpi_object *obj;
+	int rc;
+
+	obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
+				&crb_acpi_start_guid,
+				CRB_ACPI_START_REVISION_ID,
+				CRB_ACPI_START_INDEX,
+				NULL);
+	if (!obj)
+		return -ENXIO;
+	rc = obj->integer.value == 0 ? 0 : -ENXIO;
+	ACPI_FREE(obj);
+	return rc;
+}
+
+#ifdef CONFIG_ARM64
+/*
+ * This is a TPM Command Response Buffer start method that invokes a
+ * Secure Monitor Call to requrest the firmware to execute or cancel
+ * a TPM 2.0 command.
+ */
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+	if (res.a0 != 0) {
+		dev_err(dev,
+			FW_BUG "tpm_crb_smc_start() returns res.a0 = 0x%lx\n",
+			res.a0);
+		return -EIO;
+	}
+
+	return 0;
+}
+#else
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+	dev_err(dev, FW_BUG "tpm_crb: incorrect start method\n");
+	return -EINVAL;
+}
+#endif
+
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+
+	/* Zero the cancel register so that the next command will not get
+	 * canceled.
+	 */
+	iowrite32(0, &priv->regs_t->ctrl_cancel);
+
+	if (len > priv->cmd_size) {
+		dev_err(&chip->dev, "invalid command count value %zd %d\n",
+			len, priv->cmd_size);
+		return -E2BIG;
+	}
+
+	memcpy_toio(priv->cmd, buf, len);
+
+	/* Make sure that cmd is populated before issuing start. */
+	wmb();
+
+	/* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+	 * report only ACPI start but in practice seems to require both
+	 * CRB start, hence invoking CRB start method if hid == MSFT0101.
+	 */
+	if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+	    (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+	    (!strcmp(priv->hid, "MSFT0101")))
+		iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+
+	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+		rc = crb_do_acpi_start(chip);
+
+	if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+		iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+		rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
+	}
+
+	return rc;
+}
+
+static void crb_cancel(struct tpm_chip *chip)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+	iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
+
+	if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+	     crb_do_acpi_start(chip))
+		dev_err(&chip->dev, "ACPI Start failed\n");
+}
+
+static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+	u32 cancel = ioread32(&priv->regs_t->ctrl_cancel);
+
+	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+}
+
+static const struct tpm_class_ops tpm_crb = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.status = crb_status,
+	.recv = crb_recv,
+	.send = crb_send,
+	.cancel = crb_cancel,
+	.req_canceled = crb_req_canceled,
+	.go_idle  = crb_go_idle,
+	.cmd_ready = crb_cmd_ready,
+	.request_locality = crb_request_locality,
+	.relinquish_locality = crb_relinquish_locality,
+	.req_complete_mask = CRB_DRV_STS_COMPLETE,
+	.req_complete_val = CRB_DRV_STS_COMPLETE,
+};
+
+static int crb_check_resource(struct acpi_resource *ares, void *data)
+{
+	struct resource *io_res = data;
+	struct resource_win win;
+	struct resource *res = &(win.res);
+
+	if (acpi_dev_resource_memory(ares, res) ||
+	    acpi_dev_resource_address_space(ares, &win)) {
+		*io_res = *res;
+		io_res->name = NULL;
+	}
+
+	return 1;
+}
+
+static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+				 struct resource *io_res, u64 start, u32 size)
+{
+	struct resource new_res = {
+		.start	= start,
+		.end	= start + size - 1,
+		.flags	= IORESOURCE_MEM,
+	};
+
+	/* Detect a 64 bit address on a 32 bit system */
+	if (start != new_res.start)
+		return (void __iomem *) ERR_PTR(-EINVAL);
+
+	if (!resource_contains(io_res, &new_res))
+		return devm_ioremap_resource(dev, &new_res);
+
+	return priv->iobase + (new_res.start - io_res->start);
+}
+
+/*
+ * Work around broken BIOSs that return inconsistent values from the ACPI
+ * region vs the registers. Trust the ACPI region. Such broken systems
+ * probably cannot send large TPM commands since the buffer will be truncated.
+ */
+static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
+			      u64 start, u64 size)
+{
+	if (io_res->start > start || io_res->end < start)
+		return size;
+
+	if (start + size - 1 <= io_res->end)
+		return size;
+
+	dev_err(dev,
+		FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n",
+		io_res, start, size);
+
+	return io_res->end - start + 1;
+}
+
+static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+		      struct acpi_table_tpm2 *buf)
+{
+	struct list_head resources;
+	struct resource io_res;
+	struct device *dev = &device->dev;
+	u32 pa_high, pa_low;
+	u64 cmd_pa;
+	u32 cmd_size;
+	__le64 __rsp_pa;
+	u64 rsp_pa;
+	u32 rsp_size;
+	int ret;
+
+	INIT_LIST_HEAD(&resources);
+	ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+				     &io_res);
+	if (ret < 0)
+		return ret;
+	acpi_dev_free_resource_list(&resources);
+
+	if (resource_type(&io_res) != IORESOURCE_MEM) {
+		dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+		return -EINVAL;
+	}
+
+	priv->iobase = devm_ioremap_resource(dev, &io_res);
+	if (IS_ERR(priv->iobase))
+		return PTR_ERR(priv->iobase);
+
+	/* The ACPI IO region starts at the head area and continues to include
+	 * the control area, as one nice sane region except for some older
+	 * stuff that puts the control area outside the ACPI IO region.
+	 */
+	if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+	    (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+		if (buf->control_address == io_res.start +
+		    sizeof(*priv->regs_h))
+			priv->regs_h = priv->iobase;
+		else
+			dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+	}
+
+	ret = __crb_request_locality(dev, priv, 0);
+	if (ret)
+		return ret;
+
+	priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
+				   sizeof(struct crb_regs_tail));
+	if (IS_ERR(priv->regs_t)) {
+		ret = PTR_ERR(priv->regs_t);
+		goto out_relinquish_locality;
+	}
+
+	/*
+	 * PTT HW bug w/a: wake up the device to access
+	 * possibly not retained registers.
+	 */
+	ret = __crb_cmd_ready(dev, priv);
+	if (ret)
+		goto out_relinquish_locality;
+
+	pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+	pa_low  = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+	cmd_pa = ((u64)pa_high << 32) | pa_low;
+	cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
+				      ioread32(&priv->regs_t->ctrl_cmd_size));
+
+	dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+		pa_high, pa_low, cmd_size);
+
+	priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+	if (IS_ERR(priv->cmd)) {
+		ret = PTR_ERR(priv->cmd);
+		goto out;
+	}
+
+	memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
+	rsp_pa = le64_to_cpu(__rsp_pa);
+	rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
+				      ioread32(&priv->regs_t->ctrl_rsp_size));
+
+	if (cmd_pa != rsp_pa) {
+		priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+		ret = PTR_ERR_OR_ZERO(priv->rsp);
+		goto out;
+	}
+
+	/* According to the PTP specification, overlapping command and response
+	 * buffer sizes must be identical.
+	 */
+	if (cmd_size != rsp_size) {
+		dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	priv->rsp = priv->cmd;
+
+out:
+	if (!ret)
+		priv->cmd_size = cmd_size;
+
+	__crb_go_idle(dev, priv);
+
+out_relinquish_locality:
+
+	__crb_relinquish_locality(dev, priv, 0);
+
+	return ret;
+}
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+	struct acpi_table_tpm2 *buf;
+	struct crb_priv *priv;
+	struct tpm_chip *chip;
+	struct device *dev = &device->dev;
+	struct tpm2_crb_smc *crb_smc;
+	acpi_status status;
+	u32 sm;
+	int rc;
+
+	status = acpi_get_table(ACPI_SIG_TPM2, 1,
+				(struct acpi_table_header **) &buf);
+	if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
+		dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+		return -EINVAL;
+	}
+
+	/* Should the FIFO driver handle this? */
+	sm = buf->start_method;
+	if (sm == ACPI_TPM2_MEMORY_MAPPED)
+		return -ENODEV;
+
+	priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+		if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+			dev_err(dev,
+				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+				buf->header.length,
+				ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+			return -EINVAL;
+		}
+		crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+		priv->smc_func_id = crb_smc->smc_func_id;
+	}
+
+	priv->sm = sm;
+	priv->hid = acpi_device_hid(device);
+
+	rc = crb_map_io(device, priv, buf);
+	if (rc)
+		return rc;
+
+	chip = tpmm_chip_alloc(dev, &tpm_crb);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	dev_set_drvdata(&chip->dev, priv);
+	chip->acpi_dev_handle = device->handle;
+	chip->flags = TPM_CHIP_FLAG_TPM2;
+
+	return tpm_chip_register(chip);
+}
+
+static int crb_acpi_remove(struct acpi_device *device)
+{
+	struct device *dev = &device->dev;
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+
+	tpm_chip_unregister(chip);
+
+	return 0;
+}
+
+static const struct dev_pm_ops crb_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+};
+
+static const struct acpi_device_id crb_device_ids[] = {
+	{"MSFT0101", 0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, crb_device_ids);
+
+static struct acpi_driver crb_acpi_driver = {
+	.name = "tpm_crb",
+	.ids = crb_device_ids,
+	.ops = {
+		.add = crb_acpi_add,
+		.remove = crb_acpi_remove,
+	},
+	.drv = {
+		.pm = &crb_pm,
+	},
+};
+
+module_acpi_driver(crb_acpi_driver);
+MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
+MODULE_DESCRIPTION("TPM2 Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 0000000..95ce2e9
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,226 @@
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ *  Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ *  Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see http://www.gnu.org/licenses/>.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT  750     /* ms */
+#define TPM_I2C_LONG_TIMEOUT   2000    /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+	size_t len;
+	/* This is the amount we read on the first try. 25 was chosen to fit a
+	 * fair number of read responses in the buffer so a 2nd retry can be
+	 * avoided in small message cases. */
+	u8 buffer[sizeof(struct tpm_output_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	struct i2c_client *client = to_i2c_client(chip->dev.parent);
+	s32 status;
+
+	priv->len = 0;
+
+	if (len <= 2)
+		return -EIO;
+
+	status = i2c_master_send(client, buf, len);
+
+	dev_dbg(&chip->dev,
+		"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+		(int)min_t(size_t, 64, len), buf, len, status);
+	return status;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	struct i2c_client *client = to_i2c_client(chip->dev.parent);
+	struct tpm_output_header *hdr =
+		(struct tpm_output_header *)priv->buffer;
+	u32 expected_len;
+	int rc;
+
+	if (priv->len == 0)
+		return -EIO;
+
+	/* Get the message size from the message header, if we didn't get the
+	 * whole message in read_status then we need to re-read the
+	 * message. */
+	expected_len = be32_to_cpu(hdr->length);
+	if (expected_len > count)
+		return -ENOMEM;
+
+	if (priv->len >= expected_len) {
+		dev_dbg(&chip->dev,
+			"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+			(int)min_t(size_t, 64, expected_len), buf, count,
+			expected_len);
+		memcpy(buf, priv->buffer, expected_len);
+		return expected_len;
+	}
+
+	rc = i2c_master_recv(client, buf, expected_len);
+	dev_dbg(&chip->dev,
+		"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+		(int)min_t(size_t, 64, expected_len), buf, count,
+		expected_len);
+	return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+	dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	struct i2c_client *client = to_i2c_client(chip->dev.parent);
+	int rc;
+
+	/* The TPM fails the I2C read until it is ready, so we do the entire
+	 * transfer here and buffer it locally. This way the common code can
+	 * properly handle the timeouts. */
+	priv->len = 0;
+	memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+	/* Once the TPM has completed the command the command remains readable
+	 * until another command is issued. */
+	rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+	dev_dbg(&chip->dev,
+		"%s: sts=%d", __func__, rc);
+	if (rc <= 0)
+		return 0;
+
+	priv->len = rc;
+
+	return ATMEL_STS_OK;
+}
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return false;
+}
+
+static const struct tpm_class_ops i2c_atmel = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.status = i2c_atmel_read_status,
+	.recv = i2c_atmel_recv,
+	.send = i2c_atmel_send,
+	.cancel = i2c_atmel_cancel,
+	.req_complete_mask = ATMEL_STS_OK,
+	.req_complete_val = ATMEL_STS_OK,
+	.req_canceled = i2c_atmel_req_canceled,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client,
+			   const struct i2c_device_id *id)
+{
+	struct tpm_chip *chip;
+	struct device *dev = &client->dev;
+	struct priv_data *priv;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	chip = tpmm_chip_alloc(dev, &i2c_atmel);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/* Default timeouts */
+	chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+	chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+	chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+	chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+	dev_set_drvdata(&chip->dev, priv);
+
+	/* There is no known way to probe for this device, and all version
+	 * information seems to be read via TPM commands. Thus we rely on the
+	 * TPM startup process in the common code to detect the device. */
+
+	return tpm_chip_register(chip);
+}
+
+static int i2c_atmel_remove(struct i2c_client *client)
+{
+	struct device *dev = &(client->dev);
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	tpm_chip_unregister(chip);
+	return 0;
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+	{I2C_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+	{.compatible = "atmel,at97sc3204t"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+	.id_table = i2c_atmel_id,
+	.probe = i2c_atmel_probe,
+	.remove = i2c_atmel_remove,
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.pm = &i2c_atmel_pm_ops,
+		.of_match_table = of_match_ptr(i2c_atmel_of_match),
+	},
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
new file mode 100644
index 0000000..9086edc
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2012,2013 Infineon Technologies
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0 and the
+ * Infineon I2C Protocol Stack Specification v0.20.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include "tpm.h"
+
+/* max. buffer size supported by our TPM */
+#define TPM_BUFSIZE 1260
+
+/* max. number of iterations after I2C NAK */
+#define MAX_COUNT 3
+
+#define SLEEP_DURATION_LOW 55
+#define SLEEP_DURATION_HI 65
+
+/* max. number of iterations after I2C NAK for 'long' commands
+ * we need this especially for sending TPM_READY, since the cleanup after the
+ * transtion to the ready state may take some time, but it is unpredictable
+ * how long it will take.
+ */
+#define MAX_COUNT_LONG 50
+
+#define SLEEP_DURATION_LONG_LOW 200
+#define SLEEP_DURATION_LONG_HI 220
+
+/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */
+#define SLEEP_DURATION_RESET_LOW 2400
+#define SLEEP_DURATION_RESET_HI 2600
+
+/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */
+#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000)
+#define TPM_TIMEOUT_US_HI  (TPM_TIMEOUT_US_LOW + 2000)
+
+/* expected value for DIDVID register */
+#define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L
+#define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L
+
+enum i2c_chip_type {
+	SLB9635,
+	SLB9645,
+	UNKNOWN,
+};
+
+/* Structure to store I2C TPM specific stuff */
+struct tpm_inf_dev {
+	struct i2c_client *client;
+	int locality;
+	u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
+	struct tpm_chip *chip;
+	enum i2c_chip_type chip_type;
+	unsigned int adapterlimit;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+/*
+ * iic_tpm_read() - read from TPM register
+ * @addr: register address to read from
+ * @buffer: provided by caller
+ * @len: number of bytes to read
+ *
+ * Read len bytes from TPM register and put them into
+ * buffer (little-endian format, i.e. first byte is put into buffer[0]).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: We can't unfortunately use the combined read/write functions
+ * provided by the i2c core as the TPM currently does not support the
+ * repeated start condition and due to it's special requirements.
+ * The i2c_smbus* functions do not work for this chip.
+ *
+ * Return -EIO on error, 0 on success.
+ */
+static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
+{
+
+	struct i2c_msg msg1 = {
+		.addr = tpm_dev.client->addr,
+		.len = 1,
+		.buf = &addr
+	};
+	struct i2c_msg msg2 = {
+		.addr = tpm_dev.client->addr,
+		.flags = I2C_M_RD,
+		.len = len,
+		.buf = buffer
+	};
+	struct i2c_msg msgs[] = {msg1, msg2};
+
+	int rc = 0;
+	int count;
+	unsigned int msglen = len;
+
+	/* Lock the adapter for the duration of the whole sequence. */
+	if (!tpm_dev.client->adapter->algo->master_xfer)
+		return -EOPNOTSUPP;
+	i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+	if (tpm_dev.chip_type == SLB9645) {
+		/* use a combined read for newer chips
+		 * unfortunately the smbus functions are not suitable due to
+		 * the 32 byte limit of the smbus.
+		 * retries should usually not be needed, but are kept just to
+		 * be on the safe side.
+		 */
+		for (count = 0; count < MAX_COUNT; count++) {
+			rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2);
+			if (rc > 0)
+				break;	/* break here to skip sleep */
+			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+		}
+	} else {
+		/* Expect to send one command message and one data message, but
+		 * support looping over each or both if necessary.
+		 */
+		while (len > 0) {
+			/* slb9635 protocol should work in all cases */
+			for (count = 0; count < MAX_COUNT; count++) {
+				rc = __i2c_transfer(tpm_dev.client->adapter,
+						    &msg1, 1);
+				if (rc > 0)
+					break;	/* break here to skip sleep */
+
+				usleep_range(SLEEP_DURATION_LOW,
+					     SLEEP_DURATION_HI);
+			}
+
+			if (rc <= 0)
+				goto out;
+
+			/* After the TPM has successfully received the register
+			 * address it needs some time, thus we're sleeping here
+			 * again, before retrieving the data
+			 */
+			for (count = 0; count < MAX_COUNT; count++) {
+				if (tpm_dev.adapterlimit) {
+					msglen = min_t(unsigned int,
+						       tpm_dev.adapterlimit,
+						       len);
+					msg2.len = msglen;
+				}
+				usleep_range(SLEEP_DURATION_LOW,
+					     SLEEP_DURATION_HI);
+				rc = __i2c_transfer(tpm_dev.client->adapter,
+						    &msg2, 1);
+				if (rc > 0) {
+					/* Since len is unsigned, make doubly
+					 * sure we do not underflow it.
+					 */
+					if (msglen > len)
+						len = 0;
+					else
+						len -= msglen;
+					msg2.buf += msglen;
+					break;
+				}
+				/* If the I2C adapter rejected the request (e.g
+				 * when the quirk read_max_len < len) fall back
+				 * to a sane minimum value and try again.
+				 */
+				if (rc == -EOPNOTSUPP)
+					tpm_dev.adapterlimit =
+							I2C_SMBUS_BLOCK_MAX;
+			}
+
+			if (rc <= 0)
+				goto out;
+		}
+	}
+
+out:
+	i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+	/* take care of 'guard time' */
+	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+	/* __i2c_transfer returns the number of successfully transferred
+	 * messages.
+	 * So rc should be greater than 0 here otherwise we have an error.
+	 */
+	if (rc <= 0)
+		return -EIO;
+
+	return 0;
+}
+
+static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
+				 unsigned int sleep_low,
+				 unsigned int sleep_hi, u8 max_count)
+{
+	int rc = -EIO;
+	int count;
+
+	struct i2c_msg msg1 = {
+		.addr = tpm_dev.client->addr,
+		.len = len + 1,
+		.buf = tpm_dev.buf
+	};
+
+	if (len > TPM_BUFSIZE)
+		return -EINVAL;
+
+	if (!tpm_dev.client->adapter->algo->master_xfer)
+		return -EOPNOTSUPP;
+	i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+	/* prepend the 'register address' to the buffer */
+	tpm_dev.buf[0] = addr;
+	memcpy(&(tpm_dev.buf[1]), buffer, len);
+
+	/*
+	 * NOTE: We have to use these special mechanisms here and unfortunately
+	 * cannot rely on the standard behavior of i2c_transfer.
+	 * Even for newer chips the smbus functions are not
+	 * suitable due to the 32 byte limit of the smbus.
+	 */
+	for (count = 0; count < max_count; count++) {
+		rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+		if (rc > 0)
+			break;
+		usleep_range(sleep_low, sleep_hi);
+	}
+
+	i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+	/* take care of 'guard time' */
+	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+	/* __i2c_transfer returns the number of successfully transferred
+	 * messages.
+	 * So rc should be greater than 0 here otherwise we have an error.
+	 */
+	if (rc <= 0)
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * iic_tpm_write() - write to TPM register
+ * @addr: register address to write to
+ * @buffer: containing data to be written
+ * @len: number of bytes to write
+ *
+ * Write len bytes from provided buffer to TPM register (little
+ * endian format, i.e. buffer[0] is written as first byte).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: use this function instead of the iic_tpm_write_generic function.
+ *
+ * Return -EIO on error, 0 on success
+ */
+static int iic_tpm_write(u8 addr, u8 *buffer, size_t len)
+{
+	return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW,
+				     SLEEP_DURATION_HI, MAX_COUNT);
+}
+
+/*
+ * This function is needed especially for the cleanup situation after
+ * sending TPM_READY
+ * */
+static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len)
+{
+	return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW,
+				     SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG);
+}
+
+enum tis_access {
+	TPM_ACCESS_VALID = 0x80,
+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+	TPM_ACCESS_REQUEST_PENDING = 0x04,
+	TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+	TPM_STS_VALID = 0x80,
+	TPM_STS_COMMAND_READY = 0x40,
+	TPM_STS_GO = 0x20,
+	TPM_STS_DATA_AVAIL = 0x10,
+	TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_defaults {
+	TIS_SHORT_TIMEOUT = 750,	/* ms */
+	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
+};
+
+#define	TPM_ACCESS(l)			(0x0000 | ((l) << 4))
+#define	TPM_STS(l)			(0x0001 | ((l) << 4))
+#define	TPM_DATA_FIFO(l)		(0x0005 | ((l) << 4))
+#define	TPM_DID_VID(l)			(0x0006 | ((l) << 4))
+
+static bool check_locality(struct tpm_chip *chip, int loc)
+{
+	u8 buf;
+	int rc;
+
+	rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1);
+	if (rc < 0)
+		return false;
+
+	if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+		tpm_dev.locality = loc;
+		return true;
+	}
+
+	return false;
+}
+
+/* implementation similar to tpm_tis */
+static void release_locality(struct tpm_chip *chip, int loc, int force)
+{
+	u8 buf;
+	if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0)
+		return;
+
+	if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) {
+		buf = TPM_ACCESS_ACTIVE_LOCALITY;
+		iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+	}
+}
+
+static int request_locality(struct tpm_chip *chip, int loc)
+{
+	unsigned long stop;
+	u8 buf = TPM_ACCESS_REQUEST_USE;
+
+	if (check_locality(chip, loc))
+		return loc;
+
+	iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+
+	/* wait for burstcount */
+	stop = jiffies + chip->timeout_a;
+	do {
+		if (check_locality(chip, loc))
+			return loc;
+		usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+	} while (time_before(jiffies, stop));
+
+	return -ETIME;
+}
+
+static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
+{
+	/* NOTE: since I2C read may fail, return 0 in this case --> time-out */
+	u8 buf = 0xFF;
+	u8 i = 0;
+
+	do {
+		if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0)
+			return 0;
+
+		i++;
+	/* if locallity is set STS should not be 0xFF */
+	} while ((buf == 0xFF) && i < 10);
+
+	return buf;
+}
+
+static void tpm_tis_i2c_ready(struct tpm_chip *chip)
+{
+	/* this causes the current command to be aborted */
+	u8 buf = TPM_STS_COMMAND_READY;
+	iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1);
+}
+
+static ssize_t get_burstcount(struct tpm_chip *chip)
+{
+	unsigned long stop;
+	ssize_t burstcnt;
+	u8 buf[3];
+
+	/* wait for burstcount */
+	/* which timeout value, spec has 2 answers (c & d) */
+	stop = jiffies + chip->timeout_d;
+	do {
+		/* Note: STS is little endian */
+		if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0)
+			burstcnt = 0;
+		else
+			burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
+
+		if (burstcnt)
+			return burstcnt;
+
+		usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+	} while (time_before(jiffies, stop));
+	return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+			 int *status)
+{
+	unsigned long stop;
+
+	/* check current status */
+	*status = tpm_tis_i2c_status(chip);
+	if ((*status != 0xFF) && (*status & mask) == mask)
+		return 0;
+
+	stop = jiffies + timeout;
+	do {
+		/* since we just checked the status, give the TPM some time */
+		usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+		*status = tpm_tis_i2c_status(chip);
+		if ((*status & mask) == mask)
+			return 0;
+
+	} while (time_before(jiffies, stop));
+
+	return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	size_t size = 0;
+	ssize_t burstcnt;
+	u8 retries = 0;
+	int rc;
+
+	while (size < count) {
+		burstcnt = get_burstcount(chip);
+
+		/* burstcnt < 0 = TPM is busy */
+		if (burstcnt < 0)
+			return burstcnt;
+
+		/* limit received data to max. left */
+		if (burstcnt > (count - size))
+			burstcnt = count - size;
+
+		rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality),
+				  &(buf[size]), burstcnt);
+		if (rc == 0)
+			size += burstcnt;
+		else if (rc < 0)
+			retries++;
+
+		/* avoid endless loop in case of broken HW */
+		if (retries > MAX_COUNT_LONG)
+			return -EIO;
+	}
+	return size;
+}
+
+static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	int size = 0;
+	int status;
+	u32 expected;
+
+	if (count < TPM_HEADER_SIZE) {
+		size = -EIO;
+		goto out;
+	}
+
+	/* read first 10 bytes, including tag, paramsize, and result */
+	size = recv_data(chip, buf, TPM_HEADER_SIZE);
+	if (size < TPM_HEADER_SIZE) {
+		dev_err(&chip->dev, "Unable to read header\n");
+		goto out;
+	}
+
+	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+	if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
+		size = -EIO;
+		goto out;
+	}
+
+	size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+			  expected - TPM_HEADER_SIZE);
+	if (size < expected) {
+		dev_err(&chip->dev, "Unable to read remainder of result\n");
+		size = -ETIME;
+		goto out;
+	}
+
+	wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
+		dev_err(&chip->dev, "Error left over data\n");
+		size = -EIO;
+		goto out;
+	}
+
+out:
+	tpm_tis_i2c_ready(chip);
+	/* The TPM needs some time to clean up here,
+	 * so we sleep rather than keeping the bus busy
+	 */
+	usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+	release_locality(chip, tpm_dev.locality, 0);
+	return size;
+}
+
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	int rc, status;
+	ssize_t burstcnt;
+	size_t count = 0;
+	u8 retries = 0;
+	u8 sts = TPM_STS_GO;
+
+	if (len > TPM_BUFSIZE)
+		return -E2BIG;	/* command is too long for our tpm, sorry */
+
+	if (request_locality(chip, 0) < 0)
+		return -EBUSY;
+
+	status = tpm_tis_i2c_status(chip);
+	if ((status & TPM_STS_COMMAND_READY) == 0) {
+		tpm_tis_i2c_ready(chip);
+		if (wait_for_stat
+		    (chip, TPM_STS_COMMAND_READY,
+		     chip->timeout_b, &status) < 0) {
+			rc = -ETIME;
+			goto out_err;
+		}
+	}
+
+	while (count < len - 1) {
+		burstcnt = get_burstcount(chip);
+
+		/* burstcnt < 0 = TPM is busy */
+		if (burstcnt < 0)
+			return burstcnt;
+
+		if (burstcnt > (len - 1 - count))
+			burstcnt = len - 1 - count;
+
+		rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality),
+				   &(buf[count]), burstcnt);
+		if (rc == 0)
+			count += burstcnt;
+		else if (rc < 0)
+			retries++;
+
+		/* avoid endless loop in case of broken HW */
+		if (retries > MAX_COUNT_LONG) {
+			rc = -EIO;
+			goto out_err;
+		}
+
+		wait_for_stat(chip, TPM_STS_VALID,
+			      chip->timeout_c, &status);
+
+		if ((status & TPM_STS_DATA_EXPECT) == 0) {
+			rc = -EIO;
+			goto out_err;
+		}
+	}
+
+	/* write last byte */
+	iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1);
+	wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+	if ((status & TPM_STS_DATA_EXPECT) != 0) {
+		rc = -EIO;
+		goto out_err;
+	}
+
+	/* go and do it */
+	iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
+
+	return len;
+out_err:
+	tpm_tis_i2c_ready(chip);
+	/* The TPM needs some time to clean up here,
+	 * so we sleep rather than keeping the bus busy
+	 */
+	usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+	release_locality(chip, tpm_dev.locality, 0);
+	return rc;
+}
+
+static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_tis_i2c = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.status = tpm_tis_i2c_status,
+	.recv = tpm_tis_i2c_recv,
+	.send = tpm_tis_i2c_send,
+	.cancel = tpm_tis_i2c_ready,
+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_canceled = tpm_tis_i2c_req_canceled,
+};
+
+static int tpm_tis_i2c_init(struct device *dev)
+{
+	u32 vendor;
+	int rc = 0;
+	struct tpm_chip *chip;
+
+	chip = tpmm_chip_alloc(dev, &tpm_tis_i2c);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	/* Default timeouts */
+	chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+	chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+	chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+	chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+	if (request_locality(chip, 0) != 0) {
+		dev_err(dev, "could not request locality\n");
+		rc = -ENODEV;
+		goto out_err;
+	}
+
+	/* read four bytes from DID_VID register */
+	if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
+		dev_err(dev, "could not read vendor id\n");
+		rc = -EIO;
+		goto out_release;
+	}
+
+	if (vendor == TPM_TIS_I2C_DID_VID_9645) {
+		tpm_dev.chip_type = SLB9645;
+	} else if (vendor == TPM_TIS_I2C_DID_VID_9635) {
+		tpm_dev.chip_type = SLB9635;
+	} else {
+		dev_err(dev, "vendor id did not match! ID was %08x\n", vendor);
+		rc = -ENODEV;
+		goto out_release;
+	}
+
+	dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
+
+	tpm_dev.chip = chip;
+
+	return tpm_chip_register(chip);
+out_release:
+	release_locality(chip, tpm_dev.locality, 1);
+	tpm_dev.client = NULL;
+out_err:
+	return rc;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_table[] = {
+	{"tpm_i2c_infineon"},
+	{"slb9635tt"},
+	{"slb9645tt"},
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tpm_tis_i2c_of_match[] = {
+	{.compatible = "infineon,tpm_i2c_infineon"},
+	{.compatible = "infineon,slb9635tt"},
+	{.compatible = "infineon,slb9645tt"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static int tpm_tis_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int rc;
+	struct device *dev = &(client->dev);
+
+	if (tpm_dev.client != NULL) {
+		dev_err(dev, "This driver only supports one client at a time\n");
+		return -EBUSY;	/* We only support one client */
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(dev, "no algorithms associated to the i2c bus\n");
+		return -ENODEV;
+	}
+
+	tpm_dev.client = client;
+	rc = tpm_tis_i2c_init(&client->dev);
+	if (rc != 0) {
+		tpm_dev.client = NULL;
+		rc = -ENODEV;
+	}
+	return rc;
+}
+
+static int tpm_tis_i2c_remove(struct i2c_client *client)
+{
+	struct tpm_chip *chip = tpm_dev.chip;
+
+	tpm_chip_unregister(chip);
+	release_locality(chip, tpm_dev.locality, 1);
+	tpm_dev.client = NULL;
+
+	return 0;
+}
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+	.id_table = tpm_tis_i2c_table,
+	.probe = tpm_tis_i2c_probe,
+	.remove = tpm_tis_i2c_remove,
+	.driver = {
+		   .name = "tpm_i2c_infineon",
+		   .pm = &tpm_tis_i2c_ops,
+		   .of_match_table = of_match_ptr(tpm_tis_i2c_of_match),
+		   },
+};
+
+module_i2c_driver(tpm_tis_i2c_driver);
+MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
+MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
+MODULE_VERSION("2.2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 0000000..caa86b1
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,680 @@
+ /******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ *  Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ *  Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see http://www.gnu.org/licenses/>.
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS                0x00
+#define TPM_BURST_COUNT        0x01
+#define TPM_DATA_FIFO_W        0x20
+#define TPM_DATA_FIFO_R        0x40
+#define TPM_VID_DID_RID        0x60
+/* TPM command header size */
+#define TPM_HEADER_SIZE        10
+#define TPM_RETRY      5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE           32
+#define TPM_I2C_RETRY_COUNT            32
+#define TPM_I2C_BUS_DELAY              1000      	/* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT      (2 * 1000)	/* usec */
+#define TPM_I2C_RETRY_DELAY_LONG       (10 * 1000) 	/* usec */
+#define TPM_I2C_DELAY_RANGE            300		/* usec */
+
+#define OF_IS_TPM2 ((void *)1)
+#define I2C_IS_TPM2 1
+
+struct priv_data {
+	int irq;
+	unsigned int intrs;
+	wait_queue_head_t read_queue;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+				u8 *data)
+{
+	s32 status;
+
+	status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+	dev_dbg(&client->dev,
+		"%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+		offset, size, (int)size, data, status);
+	return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+				 u8 *data)
+{
+	s32 status;
+
+	status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+	dev_dbg(&client->dev,
+		"%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+		offset, size, (int)size, data, status);
+	return status;
+}
+
+#define TPM_STS_VALID          0x80
+#define TPM_STS_COMMAND_READY  0x40
+#define TPM_STS_GO             0x20
+#define TPM_STS_DATA_AVAIL     0x10
+#define TPM_STS_EXPECT         0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL        0x07    /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT  750     /* ms */
+#define TPM_I2C_LONG_TIMEOUT   2000    /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+	struct i2c_client *client = to_i2c_client(chip->dev.parent);
+	s32 status;
+	u8 data;
+
+	status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+	if (status <= 0) {
+		dev_err(&chip->dev, "%s() error return %d\n", __func__,
+			status);
+		data = TPM_STS_ERR_VAL;
+	}
+
+	return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+	s32 status;
+	int i;
+
+	/* this causes the current command to be aborted */
+	for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+		status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+		if (status < 0)
+			usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+				     + TPM_I2C_DELAY_RANGE);
+	}
+	return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+	struct i2c_client *client = to_i2c_client(chip->dev.parent);
+	s32 status;
+
+	/* this causes the current command to be aborted */
+	status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+	if (status < 0)
+		dev_err(&chip->dev,
+			"%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+				      struct tpm_chip *chip)
+{
+	unsigned long stop = jiffies + chip->timeout_d;
+	s32 status;
+	int burst_count = -1;
+	u8 data;
+
+	/* wait for burstcount to be non-zero */
+	do {
+		/* in I2C burstCount is 1 byte */
+		status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+					      &data);
+		if (status > 0 && data > 0) {
+			burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+			break;
+		}
+		usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+			     + TPM_I2C_DELAY_RANGE);
+	} while (time_before(jiffies, stop));
+
+	return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+	u8 status = i2c_nuvoton_read_status(chip);
+	return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+				     u32 timeout, wait_queue_head_t *queue)
+{
+	if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) {
+		s32 rc;
+		struct priv_data *priv = dev_get_drvdata(&chip->dev);
+		unsigned int cur_intrs = priv->intrs;
+
+		enable_irq(priv->irq);
+		rc = wait_event_interruptible_timeout(*queue,
+						      cur_intrs != priv->intrs,
+						      timeout);
+		if (rc > 0)
+			return 0;
+		/* At this point we know that the SINT pin is asserted, so we
+		 * do not need to do i2c_nuvoton_check_status */
+	} else {
+		unsigned long ten_msec, stop;
+		bool status_valid;
+
+		/* check current status */
+		status_valid = i2c_nuvoton_check_status(chip, mask, value);
+		if (status_valid)
+			return 0;
+
+		/* use polling to wait for the event */
+		ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+		stop = jiffies + timeout;
+		do {
+			if (time_before(jiffies, ten_msec))
+				usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+					     TPM_I2C_RETRY_DELAY_SHORT
+					     + TPM_I2C_DELAY_RANGE);
+			else
+				usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+					     TPM_I2C_RETRY_DELAY_LONG
+					     + TPM_I2C_DELAY_RANGE);
+			status_valid = i2c_nuvoton_check_status(chip, mask,
+								value);
+			if (status_valid)
+				return 0;
+		} while (time_before(jiffies, stop));
+	}
+	dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+		value);
+	return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+					   wait_queue_head_t *queue)
+{
+	return i2c_nuvoton_wait_for_stat(chip,
+					 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+					 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+					 timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+				 struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	s32 rc;
+	int burst_count, bytes2read, size = 0;
+
+	while (size < count &&
+	       i2c_nuvoton_wait_for_data_avail(chip,
+					       chip->timeout_c,
+					       &priv->read_queue) == 0) {
+		burst_count = i2c_nuvoton_get_burstcount(client, chip);
+		if (burst_count < 0) {
+			dev_err(&chip->dev,
+				"%s() fail to read burstCount=%d\n", __func__,
+				burst_count);
+			return -EIO;
+		}
+		bytes2read = min_t(size_t, burst_count, count - size);
+		rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+					  bytes2read, &buf[size]);
+		if (rc < 0) {
+			dev_err(&chip->dev,
+				"%s() fail on i2c_nuvoton_read_buf()=%d\n",
+				__func__, rc);
+			return -EIO;
+		}
+		dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
+		size += bytes2read;
+	}
+
+	return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	struct device *dev = chip->dev.parent;
+	struct i2c_client *client = to_i2c_client(dev);
+	s32 rc;
+	int status;
+	int burst_count;
+	int retries;
+	int size = 0;
+	u32 expected;
+
+	if (count < TPM_HEADER_SIZE) {
+		i2c_nuvoton_ready(chip);    /* return to idle */
+		dev_err(dev, "%s() count < header size\n", __func__);
+		return -EIO;
+	}
+	for (retries = 0; retries < TPM_RETRY; retries++) {
+		if (retries > 0) {
+			/* if this is not the first trial, set responseRetry */
+			i2c_nuvoton_write_status(client,
+						 TPM_STS_RESPONSE_RETRY);
+		}
+		/*
+		 * read first available (> 10 bytes), including:
+		 * tag, paramsize, and result
+		 */
+		status = i2c_nuvoton_wait_for_data_avail(
+			chip, chip->timeout_c, &priv->read_queue);
+		if (status != 0) {
+			dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+			size = -ETIMEDOUT;
+			continue;
+		}
+		burst_count = i2c_nuvoton_get_burstcount(client, chip);
+		if (burst_count < 0) {
+			dev_err(dev, "%s() fail to get burstCount\n", __func__);
+			size = -EIO;
+			continue;
+		}
+		size = i2c_nuvoton_recv_data(client, chip, buf,
+					     burst_count);
+		if (size < TPM_HEADER_SIZE) {
+			dev_err(dev, "%s() fail to read header\n", __func__);
+			size = -EIO;
+			continue;
+		}
+		/*
+		 * convert number of expected bytes field from big endian 32 bit
+		 * to machine native
+		 */
+		expected = be32_to_cpu(*(__be32 *) (buf + 2));
+		if (expected > count || expected < size) {
+			dev_err(dev, "%s() expected > count\n", __func__);
+			size = -EIO;
+			continue;
+		}
+		rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+					   expected - size);
+		size += rc;
+		if (rc < 0 || size < expected) {
+			dev_err(dev, "%s() fail to read remainder of result\n",
+				__func__);
+			size = -EIO;
+			continue;
+		}
+		if (i2c_nuvoton_wait_for_stat(
+			    chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+			    TPM_STS_VALID, chip->timeout_c,
+			    NULL)) {
+			dev_err(dev, "%s() error left over data\n", __func__);
+			size = -ETIMEDOUT;
+			continue;
+		}
+		break;
+	}
+	i2c_nuvoton_ready(chip);
+	dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
+	return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+	struct device *dev = chip->dev.parent;
+	struct i2c_client *client = to_i2c_client(dev);
+	u32 ordinal;
+	size_t count = 0;
+	int burst_count, bytes2write, retries, rc = -EIO;
+
+	for (retries = 0; retries < TPM_RETRY; retries++) {
+		i2c_nuvoton_ready(chip);
+		if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+					      TPM_STS_COMMAND_READY,
+					      chip->timeout_b, NULL)) {
+			dev_err(dev, "%s() timeout on commandReady\n",
+				__func__);
+			rc = -EIO;
+			continue;
+		}
+		rc = 0;
+		while (count < len - 1) {
+			burst_count = i2c_nuvoton_get_burstcount(client,
+								 chip);
+			if (burst_count < 0) {
+				dev_err(dev, "%s() fail get burstCount\n",
+					__func__);
+				rc = -EIO;
+				break;
+			}
+			bytes2write = min_t(size_t, burst_count,
+					    len - 1 - count);
+			rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+						   bytes2write, &buf[count]);
+			if (rc < 0) {
+				dev_err(dev, "%s() fail i2cWriteBuf\n",
+					__func__);
+				break;
+			}
+			dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+			count += bytes2write;
+			rc = i2c_nuvoton_wait_for_stat(chip,
+						       TPM_STS_VALID |
+						       TPM_STS_EXPECT,
+						       TPM_STS_VALID |
+						       TPM_STS_EXPECT,
+						       chip->timeout_c,
+						       NULL);
+			if (rc < 0) {
+				dev_err(dev, "%s() timeout on Expect\n",
+					__func__);
+				rc = -ETIMEDOUT;
+				break;
+			}
+		}
+		if (rc < 0)
+			continue;
+
+		/* write last byte */
+		rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+					   &buf[count]);
+		if (rc < 0) {
+			dev_err(dev, "%s() fail to write last byte\n",
+				__func__);
+			rc = -EIO;
+			continue;
+		}
+		dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+		rc = i2c_nuvoton_wait_for_stat(chip,
+					       TPM_STS_VALID | TPM_STS_EXPECT,
+					       TPM_STS_VALID,
+					       chip->timeout_c, NULL);
+		if (rc) {
+			dev_err(dev, "%s() timeout on Expect to clear\n",
+				__func__);
+			rc = -ETIMEDOUT;
+			continue;
+		}
+		break;
+	}
+	if (rc < 0) {
+		/* retries == TPM_RETRY */
+		i2c_nuvoton_ready(chip);
+		return rc;
+	}
+	/* execute the TPM command */
+	rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+	if (rc < 0) {
+		dev_err(dev, "%s() fail to write Go\n", __func__);
+		i2c_nuvoton_ready(chip);
+		return rc;
+	}
+	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+	rc = i2c_nuvoton_wait_for_data_avail(chip,
+					     tpm_calc_ordinal_duration(chip,
+								       ordinal),
+					     &priv->read_queue);
+	if (rc) {
+		dev_err(dev, "%s() timeout command duration\n", __func__);
+		i2c_nuvoton_ready(chip);
+		return rc;
+	}
+
+	dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+	return len;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_i2c = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.status = i2c_nuvoton_read_status,
+	.recv = i2c_nuvoton_recv,
+	.send = i2c_nuvoton_send,
+	.cancel = i2c_nuvoton_ready,
+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_canceled = i2c_nuvoton_req_canceled,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+	struct tpm_chip *chip = dev_id;
+	struct priv_data *priv = dev_get_drvdata(&chip->dev);
+
+	priv->intrs++;
+	wake_up(&priv->read_queue);
+	disable_irq_nosync(priv->irq);
+	return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+	static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+	u32 temp;
+	s32 rc;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+	rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+	if (rc < 0)
+		return rc;
+
+	/* check WPCT301 values - ignore RID */
+	if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+		/*
+		 * f/w rev 2.81 has an issue where the VID_DID_RID is not
+		 * reporting the right value. so give it another chance at
+		 * offset 0x20 (FIFO_W).
+		 */
+		rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+					  (u8 *) (&temp));
+		if (rc < 0)
+			return rc;
+
+		/* check WPCT301 values - ignore RID */
+		if (memcmp(&temp, vid_did_rid_value,
+			   sizeof(vid_did_rid_value)))
+			return -ENODEV;
+	}
+
+	*res = temp;
+	return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int rc;
+	struct tpm_chip *chip;
+	struct device *dev = &client->dev;
+	struct priv_data *priv;
+	u32 vid = 0;
+
+	rc = get_vid(client, &vid);
+	if (rc)
+		return rc;
+
+	dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+		 (u8) (vid >> 16), (u8) (vid >> 24));
+
+	chip = tpmm_chip_alloc(dev, &tpm_i2c);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	if (dev->of_node) {
+		const struct of_device_id *of_id;
+
+		of_id = of_match_device(dev->driver->of_match_table, dev);
+		if (of_id && of_id->data == OF_IS_TPM2)
+			chip->flags |= TPM_CHIP_FLAG_TPM2;
+	} else
+		if (id->driver_data == I2C_IS_TPM2)
+			chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+	init_waitqueue_head(&priv->read_queue);
+
+	/* Default timeouts */
+	chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+	chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+	chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+	chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+	dev_set_drvdata(&chip->dev, priv);
+
+	/*
+	 * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+	 *   TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+	 * The IRQ should be set in the i2c_board_info (which is done
+	 * automatically in of_i2c_register_devices, for device tree users */
+	priv->irq = client->irq;
+	if (client->irq) {
+		dev_dbg(dev, "%s() priv->irq\n", __func__);
+		rc = devm_request_irq(dev, client->irq,
+				      i2c_nuvoton_int_handler,
+				      IRQF_TRIGGER_LOW,
+				      dev_name(&chip->dev),
+				      chip);
+		if (rc) {
+			dev_err(dev, "%s() Unable to request irq: %d for use\n",
+				__func__, priv->irq);
+			priv->irq = 0;
+		} else {
+			chip->flags |= TPM_CHIP_FLAG_IRQ;
+			/* Clear any pending interrupt */
+			i2c_nuvoton_ready(chip);
+			/* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+			rc = i2c_nuvoton_wait_for_stat(chip,
+						       TPM_STS_COMMAND_READY,
+						       TPM_STS_COMMAND_READY,
+						       chip->timeout_b,
+						       NULL);
+			if (rc == 0) {
+				/*
+				 * TIS is in ready state
+				 * write dummy byte to enter reception state
+				 * TPM_DATA_FIFO_W <- rc (0)
+				 */
+				rc = i2c_nuvoton_write_buf(client,
+							   TPM_DATA_FIFO_W,
+							   1, (u8 *) (&rc));
+				if (rc < 0)
+					return rc;
+				/* TPM_STS <- 0x40 (commandReady) */
+				i2c_nuvoton_ready(chip);
+			} else {
+				/*
+				 * timeout_b reached - command was
+				 * aborted. TIS should now be in idle state -
+				 * only TPM_STS_VALID should be set
+				 */
+				if (i2c_nuvoton_read_status(chip) !=
+				    TPM_STS_VALID)
+					return -EIO;
+			}
+		}
+	}
+
+	return tpm_chip_register(chip);
+}
+
+static int i2c_nuvoton_remove(struct i2c_client *client)
+{
+	struct tpm_chip *chip = i2c_get_clientdata(client);
+
+	tpm_chip_unregister(chip);
+	return 0;
+}
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+	{"tpm_i2c_nuvoton"},
+	{"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+	{.compatible = "nuvoton,npct501"},
+	{.compatible = "winbond,wpct301"},
+	{.compatible = "nuvoton,npct601", .data = OF_IS_TPM2},
+	{},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+	.id_table = i2c_nuvoton_id,
+	.probe = i2c_nuvoton_probe,
+	.remove = i2c_nuvoton_remove,
+	.driver = {
+		.name = "tpm_i2c_nuvoton",
+		.pm = &i2c_nuvoton_pm_ops,
+		.of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+	},
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
new file mode 100644
index 0000000..25f6e26
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <asm/vio.h>
+#include <asm/irq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <asm/prom.h>
+
+#include "tpm.h"
+#include "tpm_ibmvtpm.h"
+
+static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
+
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
+	{ "IBM,vtpm", "IBM,vtpm"},
+	{ "", "" }
+};
+MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
+
+/**
+ *
+ * ibmvtpm_send_crq_word - Send a CRQ request
+ * @vdev:	vio device struct
+ * @w1:		pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ *	0 - Success
+ *	Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ *
+ * ibmvtpm_send_crq - Send a CRQ request
+ *
+ * @vdev:	vio device struct
+ * @valid:	Valid field
+ * @msg:	Type field
+ * @len:	Length field
+ * @data:	Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type  |     Length    |              Data
+ * -----------------------------------------------------------------------
+ * Word1 |                Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ *         u8 valid;
+ *         u8 msg;
+ *         __be16 len;
+ *         __be32 data;
+ *         __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
+ *
+ * Return:
+ *	0 (H_SUCCESS) - Success
+ *	Non-zero - Failure
+ */
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+		u8 valid, u8 msg, u16 len, u32 data)
+{
+	u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+		(u64)data;
+	return ibmvtpm_send_crq_word(vdev, w1);
+}
+
+/**
+ * tpm_ibmvtpm_recv - Receive data after send
+ *
+ * @chip:	tpm chip struct
+ * @buf:	buffer to read
+ * @count:	size of buffer
+ *
+ * Return:
+ *	Number of bytes read
+ */
+static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	u16 len;
+	int sig;
+
+	if (!ibmvtpm->rtce_buf) {
+		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+		return 0;
+	}
+
+	sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
+	if (sig)
+		return -EINTR;
+
+	len = ibmvtpm->res_len;
+
+	if (count < len) {
+		dev_err(ibmvtpm->dev,
+			"Invalid size in recv: count=%zd, crq_size=%d\n",
+			count, len);
+		return -EIO;
+	}
+
+	spin_lock(&ibmvtpm->rtce_lock);
+	memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+	memset(ibmvtpm->rtce_buf, 0, len);
+	ibmvtpm->res_len = 0;
+	spin_unlock(&ibmvtpm->rtce_lock);
+	return len;
+}
+
+/**
+ * tpm_ibmvtpm_send - Send tpm request
+ *
+ * @chip:	tpm chip struct
+ * @buf:	buffer contains data to send
+ * @count:	size of buffer
+ *
+ * Return:
+ *	Number of bytes sent or < 0 on error.
+ */
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	int rc, sig;
+
+	if (!ibmvtpm->rtce_buf) {
+		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+		return 0;
+	}
+
+	if (count > ibmvtpm->rtce_size) {
+		dev_err(ibmvtpm->dev,
+			"Invalid size in send: count=%zd, rtce_size=%d\n",
+			count, ibmvtpm->rtce_size);
+		return -EIO;
+	}
+
+	if (ibmvtpm->tpm_processing_cmd) {
+		dev_info(ibmvtpm->dev,
+		         "Need to wait for TPM to finish\n");
+		/* wait for previous command to finish */
+		sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
+		if (sig)
+			return -EINTR;
+	}
+
+	spin_lock(&ibmvtpm->rtce_lock);
+	ibmvtpm->res_len = 0;
+	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+
+	/*
+	 * set the processing flag before the Hcall, since we may get the
+	 * result (interrupt) before even being able to check rc.
+	 */
+	ibmvtpm->tpm_processing_cmd = true;
+
+	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+			IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+			count, ibmvtpm->rtce_dma_handle);
+	if (rc != H_SUCCESS) {
+		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+		rc = 0;
+		ibmvtpm->tpm_processing_cmd = false;
+	} else
+		rc = count;
+
+	spin_unlock(&ibmvtpm->rtce_lock);
+	return rc;
+}
+
+static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
+{
+	return;
+}
+
+static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
+{
+	return 0;
+}
+
+/**
+ * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
+ *
+ * @ibmvtpm:	vtpm device struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc;
+
+	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+			IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
+	if (rc != H_SUCCESS)
+		dev_err(ibmvtpm->dev,
+			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
+ *			   - Note that this is vtpm version and not tpm version
+ *
+ * @ibmvtpm:	vtpm device struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc;
+
+	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+			IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
+	if (rc != H_SUCCESS)
+		dev_err(ibmvtpm->dev,
+			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
+ * @ibmvtpm:	vtpm device struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc;
+
+	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
+	if (rc != H_SUCCESS)
+		dev_err(ibmvtpm->dev,
+			"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm:	vtpm device struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc;
+
+	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+	if (rc != H_SUCCESS)
+		dev_err(ibmvtpm->dev,
+			"ibmvtpm_crq_send_init failed rc=%d\n", rc);
+
+	return rc;
+}
+
+/**
+ * tpm_ibmvtpm_remove - ibm vtpm remove entry point
+ * @vdev:	vio device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+
+	tpm_chip_unregister(chip);
+
+	free_irq(vdev->irq, ibmvtpm);
+
+	do {
+		if (rc)
+			msleep(100);
+		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
+			 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
+	free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
+
+	if (ibmvtpm->rtce_buf) {
+		dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
+				 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
+		kfree(ibmvtpm->rtce_buf);
+	}
+
+	kfree(ibmvtpm);
+	/* For tpm_ibmvtpm_get_desired_dma */
+	dev_set_drvdata(&vdev->dev, NULL);
+
+	return 0;
+}
+
+/**
+ * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
+ * @vdev:	vio device struct
+ *
+ * Return:
+ *	Number of bytes the driver needs to DMA map.
+ */
+static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+	struct ibmvtpm_dev *ibmvtpm;
+
+	/*
+	 * ibmvtpm initializes at probe time, so the data we are
+	 * asking for may not be set yet. Estimate that 4K required
+	 * for TCE-mapped buffer in addition to CRQ.
+	 */
+	if (chip)
+		ibmvtpm = dev_get_drvdata(&chip->dev);
+	else
+		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
+	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+}
+
+/**
+ * tpm_ibmvtpm_suspend - Suspend
+ * @dev:	device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_suspend(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+
+	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+			IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
+	if (rc != H_SUCCESS)
+		dev_err(ibmvtpm->dev,
+			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvtpm_reset_crq - Reset CRQ
+ *
+ * @ibmvtpm:	ibm vtpm struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc = 0;
+
+	do {
+		if (rc)
+			msleep(100);
+		rc = plpar_hcall_norets(H_FREE_CRQ,
+					ibmvtpm->vdev->unit_address);
+	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
+	ibmvtpm->crq_queue.index = 0;
+
+	return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
+				  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ *
+ * @dev:	device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+
+	do {
+		if (rc)
+			msleep(100);
+		rc = plpar_hcall_norets(H_ENABLE_CRQ,
+					ibmvtpm->vdev->unit_address);
+	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	if (rc) {
+		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = vio_enable_interrupts(ibmvtpm->vdev);
+	if (rc) {
+		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = ibmvtpm_crq_send_init(ibmvtpm);
+	if (rc)
+		dev_err(dev, "Error send_init rc=%d\n", rc);
+
+	return rc;
+}
+
+static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == 0);
+}
+
+static const struct tpm_class_ops tpm_ibmvtpm = {
+	.recv = tpm_ibmvtpm_recv,
+	.send = tpm_ibmvtpm_send,
+	.cancel = tpm_ibmvtpm_cancel,
+	.status = tpm_ibmvtpm_status,
+	.req_complete_mask = 0,
+	.req_complete_val = 0,
+	.req_canceled = tpm_ibmvtpm_req_canceled,
+};
+
+static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
+	.suspend = tpm_ibmvtpm_suspend,
+	.resume = tpm_ibmvtpm_resume,
+};
+
+/**
+ * ibmvtpm_crq_get_next - Get next responded crq
+ *
+ * @ibmvtpm:	vtpm device struct
+ *
+ * Return: vtpm crq pointer or NULL.
+ */
+static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
+{
+	struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
+	struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
+
+	if (crq->valid & VTPM_MSG_RES) {
+		if (++crq_q->index == crq_q->num_entry)
+			crq_q->index = 0;
+		smp_rmb();
+	} else
+		crq = NULL;
+	return crq;
+}
+
+/**
+ * ibmvtpm_crq_process - Process responded crq
+ *
+ * @crq:	crq to be processed
+ * @ibmvtpm:	vtpm device struct
+ *
+ */
+static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+				struct ibmvtpm_dev *ibmvtpm)
+{
+	int rc = 0;
+
+	switch (crq->valid) {
+	case VALID_INIT_CRQ:
+		switch (crq->msg) {
+		case INIT_CRQ_RES:
+			dev_info(ibmvtpm->dev, "CRQ initialized\n");
+			rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
+			if (rc)
+				dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
+			return;
+		case INIT_CRQ_COMP_RES:
+			dev_info(ibmvtpm->dev,
+				 "CRQ initialization completed\n");
+			return;
+		default:
+			dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
+			return;
+		}
+	case IBMVTPM_VALID_CMD:
+		switch (crq->msg) {
+		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+			if (be16_to_cpu(crq->len) <= 0) {
+				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+				return;
+			}
+			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+						    GFP_ATOMIC);
+			if (!ibmvtpm->rtce_buf) {
+				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
+				return;
+			}
+
+			ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
+				ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
+				DMA_BIDIRECTIONAL);
+
+			if (dma_mapping_error(ibmvtpm->dev,
+					      ibmvtpm->rtce_dma_handle)) {
+				kfree(ibmvtpm->rtce_buf);
+				ibmvtpm->rtce_buf = NULL;
+				dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
+			}
+
+			return;
+		case VTPM_GET_VERSION_RES:
+			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+			return;
+		case VTPM_TPM_COMMAND_RES:
+			/* len of the data in rtce buffer */
+			ibmvtpm->res_len = be16_to_cpu(crq->len);
+			ibmvtpm->tpm_processing_cmd = false;
+			wake_up_interruptible(&ibmvtpm->wq);
+			return;
+		default:
+			return;
+		}
+	}
+	return;
+}
+
+/**
+ * ibmvtpm_interrupt -	Interrupt handler
+ *
+ * @irq:		irq number to handle
+ * @vtpm_instance:	vtpm that received interrupt
+ *
+ * Returns:
+ *	IRQ_HANDLED
+ **/
+static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+{
+	struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
+	struct ibmvtpm_crq *crq;
+
+	/* while loop is needed for initial setup (get version and
+	 * get rtce_size). There should be only one tpm request at any
+	 * given time.
+	 */
+	while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+		ibmvtpm_crq_process(crq, ibmvtpm);
+		crq->valid = 0;
+		smp_wmb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
+ *
+ * @vio_dev:	vio device struct
+ * @id:		vio device id struct
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+				   const struct vio_device_id *id)
+{
+	struct ibmvtpm_dev *ibmvtpm;
+	struct device *dev = &vio_dev->dev;
+	struct ibmvtpm_crq_queue *crq_q;
+	struct tpm_chip *chip;
+	int rc = -ENOMEM, rc1;
+
+	chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
+	if (!ibmvtpm) {
+		dev_err(dev, "kzalloc for ibmvtpm failed\n");
+		goto cleanup;
+	}
+
+	ibmvtpm->dev = dev;
+	ibmvtpm->vdev = vio_dev;
+
+	crq_q = &ibmvtpm->crq_queue;
+	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+	if (!crq_q->crq_addr) {
+		dev_err(dev, "Unable to allocate memory for crq_addr\n");
+		goto cleanup;
+	}
+
+	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+						 CRQ_RES_BUF_SIZE,
+						 DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
+		dev_err(dev, "dma mapping failed\n");
+		goto cleanup;
+	}
+
+	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
+				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+	if (rc == H_RESOURCE)
+		rc = ibmvtpm_reset_crq(ibmvtpm);
+
+	if (rc) {
+		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
+		goto reg_crq_cleanup;
+	}
+
+	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
+			 tpm_ibmvtpm_driver_name, ibmvtpm);
+	if (rc) {
+		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
+		goto init_irq_cleanup;
+	}
+
+	rc = vio_enable_interrupts(vio_dev);
+	if (rc) {
+		dev_err(dev, "Error %d enabling interrupts\n", rc);
+		goto init_irq_cleanup;
+	}
+
+	init_waitqueue_head(&ibmvtpm->wq);
+
+	crq_q->index = 0;
+
+	dev_set_drvdata(&chip->dev, ibmvtpm);
+
+	spin_lock_init(&ibmvtpm->rtce_lock);
+
+	rc = ibmvtpm_crq_send_init(ibmvtpm);
+	if (rc)
+		goto init_irq_cleanup;
+
+	rc = ibmvtpm_crq_get_version(ibmvtpm);
+	if (rc)
+		goto init_irq_cleanup;
+
+	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
+	if (rc)
+		goto init_irq_cleanup;
+
+	return tpm_chip_register(chip);
+init_irq_cleanup:
+	do {
+		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
+	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
+reg_crq_cleanup:
+	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
+			 DMA_BIDIRECTIONAL);
+cleanup:
+	if (ibmvtpm) {
+		if (crq_q->crq_addr)
+			free_page((unsigned long)crq_q->crq_addr);
+		kfree(ibmvtpm);
+	}
+
+	return rc;
+}
+
+static struct vio_driver ibmvtpm_driver = {
+	.id_table	 = tpm_ibmvtpm_device_table,
+	.probe		 = tpm_ibmvtpm_probe,
+	.remove		 = tpm_ibmvtpm_remove,
+	.get_desired_dma = tpm_ibmvtpm_get_desired_dma,
+	.name		 = tpm_ibmvtpm_driver_name,
+	.pm		 = &tpm_ibmvtpm_pm_ops,
+};
+
+/**
+ * ibmvtpm_module_init - Initialize ibm vtpm module.
+ *
+ *
+ * Return:
+ *	0 on success.
+ *	Non-zero on failure.
+ */
+static int __init ibmvtpm_module_init(void)
+{
+	return vio_register_driver(&ibmvtpm_driver);
+}
+
+/**
+ * ibmvtpm_module_exit - Tear down ibm vtpm module.
+ */
+static void __exit ibmvtpm_module_exit(void)
+{
+	vio_unregister_driver(&ibmvtpm_driver);
+}
+
+module_init(ibmvtpm_module_init);
+module_exit(ibmvtpm_module_exit);
+
+MODULE_AUTHOR("adlai@us.ibm.com");
+MODULE_DESCRIPTION("IBM vTPM Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
new file mode 100644
index 0000000..91dfe76
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#ifndef __TPM_IBMVTPM_H__
+#define __TPM_IBMVTPM_H__
+
+/* vTPM Message Format 1 */
+struct ibmvtpm_crq {
+	u8 valid;
+	u8 msg;
+	__be16 len;
+	__be32 data;
+	__be64 reserved;
+} __attribute__((packed, aligned(8)));
+
+struct ibmvtpm_crq_queue {
+	struct ibmvtpm_crq *crq_addr;
+	u32 index;
+	u32 num_entry;
+};
+
+struct ibmvtpm_dev {
+	struct device *dev;
+	struct vio_dev *vdev;
+	struct ibmvtpm_crq_queue crq_queue;
+	dma_addr_t crq_dma_handle;
+	u32 rtce_size;
+	void __iomem *rtce_buf;
+	dma_addr_t rtce_dma_handle;
+	spinlock_t rtce_lock;
+	wait_queue_head_t wq;
+	u16 res_len;
+	u32 vtpm_version;
+	bool tpm_processing_cmd;
+};
+
+#define CRQ_RES_BUF_SIZE	PAGE_SIZE
+
+/* Initialize CRQ */
+#define INIT_CRQ_CMD		0xC001000000000000LL /* Init cmd */
+#define INIT_CRQ_COMP_CMD	0xC002000000000000LL /* Init complete cmd */
+#define INIT_CRQ_RES		0x01	/* Init respond */
+#define INIT_CRQ_COMP_RES	0x02	/* Init complete respond */
+#define VALID_INIT_CRQ		0xC0	/* Valid command for init crq */
+
+/* vTPM CRQ response is the message type | 0x80 */
+#define VTPM_MSG_RES		0x80
+#define IBMVTPM_VALID_CMD	0x80
+
+/* vTPM CRQ message types */
+#define VTPM_GET_VERSION			0x01
+#define VTPM_GET_VERSION_RES			(0x01 | VTPM_MSG_RES)
+
+#define VTPM_TPM_COMMAND			0x02
+#define VTPM_TPM_COMMAND_RES			(0x02 | VTPM_MSG_RES)
+
+#define VTPM_GET_RTCE_BUFFER_SIZE		0x03
+#define VTPM_GET_RTCE_BUFFER_SIZE_RES		(0x03 | VTPM_MSG_RES)
+
+#define VTPM_PREPARE_TO_SUSPEND			0x04
+#define VTPM_PREPARE_TO_SUSPEND_RES		(0x04 | VTPM_MSG_RES)
+
+#endif
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
new file mode 100644
index 0000000..d8f1004
--- /dev/null
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -0,0 +1,629 @@
+/*
+ * Description:
+ * Device Driver for the Infineon Technologies
+ * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
+ * Sirrix AG - security technologies <tpmdd@sirrix.com> and
+ * Applied Data Security Group, Ruhr-University Bochum, Germany
+ * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include "tpm.h"
+
+/* Infineon specific definitions */
+/* maximum number of WTX-packages */
+#define	TPM_MAX_WTX_PACKAGES 	50
+/* msleep-Time for WTX-packages */
+#define	TPM_WTX_MSLEEP_TIME 	20
+/* msleep-Time --> Interval to check status register */
+#define	TPM_MSLEEP_TIME 	3
+/* gives number of max. msleep()-calls before throwing timeout */
+#define	TPM_MAX_TRIES		5000
+#define	TPM_INFINEON_DEV_VEN_VALUE	0x15D1
+
+#define TPM_INF_IO_PORT		0x0
+#define TPM_INF_IO_MEM		0x1
+
+#define TPM_INF_ADDR		0x0
+#define TPM_INF_DATA		0x1
+
+struct tpm_inf_dev {
+	int iotype;
+
+	void __iomem *mem_base;	/* MMIO ioremap'd addr */
+	unsigned long map_base;	/* phys MMIO base */
+	unsigned long map_size;	/* MMIO region size */
+	unsigned int index_off;	/* index register offset */
+
+	unsigned int data_regs;	/* Data registers */
+	unsigned int data_size;
+
+	unsigned int config_port;	/* IO Port config index reg */
+	unsigned int config_size;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+static inline void tpm_data_out(unsigned char data, unsigned char offset)
+{
+	if (tpm_dev.iotype == TPM_INF_IO_PORT)
+		outb(data, tpm_dev.data_regs + offset);
+	else
+		writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline unsigned char tpm_data_in(unsigned char offset)
+{
+	if (tpm_dev.iotype == TPM_INF_IO_PORT)
+		return inb(tpm_dev.data_regs + offset);
+	else
+		return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline void tpm_config_out(unsigned char data, unsigned char offset)
+{
+	if (tpm_dev.iotype == TPM_INF_IO_PORT)
+		outb(data, tpm_dev.config_port + offset);
+	else
+		writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+static inline unsigned char tpm_config_in(unsigned char offset)
+{
+	if (tpm_dev.iotype == TPM_INF_IO_PORT)
+		return inb(tpm_dev.config_port + offset);
+	else
+		return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+/* TPM header definitions */
+enum infineon_tpm_header {
+	TPM_VL_VER = 0x01,
+	TPM_VL_CHANNEL_CONTROL = 0x07,
+	TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
+	TPM_VL_CHANNEL_TPM = 0x0B,
+	TPM_VL_CONTROL = 0x00,
+	TPM_INF_NAK = 0x15,
+	TPM_CTRL_WTX = 0x10,
+	TPM_CTRL_WTX_ABORT = 0x18,
+	TPM_CTRL_WTX_ABORT_ACK = 0x18,
+	TPM_CTRL_ERROR = 0x20,
+	TPM_CTRL_CHAININGACK = 0x40,
+	TPM_CTRL_CHAINING = 0x80,
+	TPM_CTRL_DATA = 0x04,
+	TPM_CTRL_DATA_CHA = 0x84,
+	TPM_CTRL_DATA_CHA_ACK = 0xC4
+};
+
+enum infineon_tpm_register {
+	WRFIFO = 0x00,
+	RDFIFO = 0x01,
+	STAT = 0x02,
+	CMD = 0x03
+};
+
+enum infineon_tpm_command_bits {
+	CMD_DIS = 0x00,
+	CMD_LP = 0x01,
+	CMD_RES = 0x02,
+	CMD_IRQC = 0x06
+};
+
+enum infineon_tpm_status_bits {
+	STAT_XFE = 0x00,
+	STAT_LPA = 0x01,
+	STAT_FOK = 0x02,
+	STAT_TOK = 0x03,
+	STAT_IRQA = 0x06,
+	STAT_RDA = 0x07
+};
+
+/* some outgoing values */
+enum infineon_tpm_values {
+	CHIP_ID1 = 0x20,
+	CHIP_ID2 = 0x21,
+	TPM_DAR = 0x30,
+	RESET_LP_IRQC_DISABLE = 0x41,
+	ENABLE_REGISTER_PAIR = 0x55,
+	IOLIMH = 0x60,
+	IOLIML = 0x61,
+	DISABLE_REGISTER_PAIR = 0xAA,
+	IDVENL = 0xF1,
+	IDVENH = 0xF2,
+	IDPDL = 0xF3,
+	IDPDH = 0xF4
+};
+
+static int number_of_wtx;
+
+static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
+{
+	int status;
+	int check = 0;
+	int i;
+
+	if (clear_wrfifo) {
+		for (i = 0; i < 4096; i++) {
+			status = tpm_data_in(WRFIFO);
+			if (status == 0xff) {
+				if (check == 5)
+					break;
+				else
+					check++;
+			}
+		}
+	}
+	/* Note: The values which are currently in the FIFO of the TPM
+	   are thrown away since there is no usage for them. Usually,
+	   this has nothing to say, since the TPM will give its answer
+	   immediately or will be aborted anyway, so the data here is
+	   usually garbage and useless.
+	   We have to clean this, because the next communication with
+	   the TPM would be rubbish, if there is still some old data
+	   in the Read FIFO.
+	 */
+	i = 0;
+	do {
+		status = tpm_data_in(RDFIFO);
+		status = tpm_data_in(STAT);
+		i++;
+		if (i == TPM_MAX_TRIES)
+			return -EIO;
+	} while ((status & (1 << STAT_RDA)) != 0);
+	return 0;
+}
+
+static int wait(struct tpm_chip *chip, int wait_for_bit)
+{
+	int status;
+	int i;
+	for (i = 0; i < TPM_MAX_TRIES; i++) {
+		status = tpm_data_in(STAT);
+		/* check the status-register if wait_for_bit is set */
+		if (status & 1 << wait_for_bit)
+			break;
+		tpm_msleep(TPM_MSLEEP_TIME);
+	}
+	if (i == TPM_MAX_TRIES) {	/* timeout occurs */
+		if (wait_for_bit == STAT_XFE)
+			dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
+		if (wait_for_bit == STAT_RDA)
+			dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
+		return -EIO;
+	}
+	return 0;
+};
+
+static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
+{
+	wait(chip, STAT_XFE);
+	tpm_data_out(sendbyte, WRFIFO);
+}
+
+    /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
+       calculation time, it sends a WTX-package, which has to be acknowledged
+       or aborted. This usually occurs if you are hammering the TPM with key
+       creation. Set the maximum number of WTX-packages in the definitions
+       above, if the number is reached, the waiting-time will be denied
+       and the TPM command has to be resend.
+     */
+
+static void tpm_wtx(struct tpm_chip *chip)
+{
+	number_of_wtx++;
+	dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
+		 number_of_wtx, TPM_MAX_WTX_PACKAGES);
+	wait_and_send(chip, TPM_VL_VER);
+	wait_and_send(chip, TPM_CTRL_WTX);
+	wait_and_send(chip, 0x00);
+	wait_and_send(chip, 0x00);
+	tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static void tpm_wtx_abort(struct tpm_chip *chip)
+{
+	dev_info(&chip->dev, "Aborting WTX\n");
+	wait_and_send(chip, TPM_VL_VER);
+	wait_and_send(chip, TPM_CTRL_WTX_ABORT);
+	wait_and_send(chip, 0x00);
+	wait_and_send(chip, 0x00);
+	number_of_wtx = 0;
+	tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+	int i;
+	int ret;
+	u32 size = 0;
+	number_of_wtx = 0;
+
+recv_begin:
+	/* start receiving header */
+	for (i = 0; i < 4; i++) {
+		ret = wait(chip, STAT_RDA);
+		if (ret)
+			return -EIO;
+		buf[i] = tpm_data_in(RDFIFO);
+	}
+
+	if (buf[0] != TPM_VL_VER) {
+		dev_err(&chip->dev,
+			"Wrong transport protocol implementation!\n");
+		return -EIO;
+	}
+
+	if (buf[1] == TPM_CTRL_DATA) {
+		/* size of the data received */
+		size = ((buf[2] << 8) | buf[3]);
+
+		for (i = 0; i < size; i++) {
+			wait(chip, STAT_RDA);
+			buf[i] = tpm_data_in(RDFIFO);
+		}
+
+		if ((size == 0x6D00) && (buf[1] == 0x80)) {
+			dev_err(&chip->dev, "Error handling on vendor layer!\n");
+			return -EIO;
+		}
+
+		for (i = 0; i < size; i++)
+			buf[i] = buf[i + 6];
+
+		size = size - 6;
+		return size;
+	}
+
+	if (buf[1] == TPM_CTRL_WTX) {
+		dev_info(&chip->dev, "WTX-package received\n");
+		if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
+			tpm_wtx(chip);
+			goto recv_begin;
+		} else {
+			tpm_wtx_abort(chip);
+			goto recv_begin;
+		}
+	}
+
+	if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
+		dev_info(&chip->dev, "WTX-abort acknowledged\n");
+		return size;
+	}
+
+	if (buf[1] == TPM_CTRL_ERROR) {
+		dev_err(&chip->dev, "ERROR-package received:\n");
+		if (buf[4] == TPM_INF_NAK)
+			dev_err(&chip->dev,
+				"-> Negative acknowledgement"
+				" - retransmit command!\n");
+		return -EIO;
+	}
+	return -EIO;
+}
+
+static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+	int i;
+	int ret;
+	u8 count_high, count_low, count_4, count_3, count_2, count_1;
+
+	/* Disabling Reset, LP and IRQC */
+	tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+	ret = empty_fifo(chip, 1);
+	if (ret) {
+		dev_err(&chip->dev, "Timeout while clearing FIFO\n");
+		return -EIO;
+	}
+
+	ret = wait(chip, STAT_XFE);
+	if (ret)
+		return -EIO;
+
+	count_4 = (count & 0xff000000) >> 24;
+	count_3 = (count & 0x00ff0000) >> 16;
+	count_2 = (count & 0x0000ff00) >> 8;
+	count_1 = (count & 0x000000ff);
+	count_high = ((count + 6) & 0xffffff00) >> 8;
+	count_low = ((count + 6) & 0x000000ff);
+
+	/* Sending Header */
+	wait_and_send(chip, TPM_VL_VER);
+	wait_and_send(chip, TPM_CTRL_DATA);
+	wait_and_send(chip, count_high);
+	wait_and_send(chip, count_low);
+
+	/* Sending Data Header */
+	wait_and_send(chip, TPM_VL_VER);
+	wait_and_send(chip, TPM_VL_CHANNEL_TPM);
+	wait_and_send(chip, count_4);
+	wait_and_send(chip, count_3);
+	wait_and_send(chip, count_2);
+	wait_and_send(chip, count_1);
+
+	/* Sending Data */
+	for (i = 0; i < count; i++) {
+		wait_and_send(chip, buf[i]);
+	}
+	return count;
+}
+
+static void tpm_inf_cancel(struct tpm_chip *chip)
+{
+	/*
+	   Since we are using the legacy mode to communicate
+	   with the TPM, we have no cancel functions, but have
+	   a workaround for interrupting the TPM through WTX.
+	 */
+}
+
+static u8 tpm_inf_status(struct tpm_chip *chip)
+{
+	return tpm_data_in(STAT);
+}
+
+static const struct tpm_class_ops tpm_inf = {
+	.recv = tpm_inf_recv,
+	.send = tpm_inf_send,
+	.cancel = tpm_inf_cancel,
+	.status = tpm_inf_status,
+	.req_complete_mask = 0,
+	.req_complete_val = 0,
+};
+
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+	/* Infineon TPMs */
+	{"IFX0101", 0},
+	{"IFX0102", 0},
+	{"", 0}
+};
+
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+
+static int tpm_inf_pnp_probe(struct pnp_dev *dev,
+				       const struct pnp_device_id *dev_id)
+{
+	int rc = 0;
+	u8 iol, ioh;
+	int vendorid[2];
+	int version[2];
+	int productid[2];
+	const char *chipname;
+	struct tpm_chip *chip;
+
+	/* read IO-ports through PnP */
+	if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+	    !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+		tpm_dev.iotype = TPM_INF_IO_PORT;
+
+		tpm_dev.config_port = pnp_port_start(dev, 0);
+		tpm_dev.config_size = pnp_port_len(dev, 0);
+		tpm_dev.data_regs = pnp_port_start(dev, 1);
+		tpm_dev.data_size = pnp_port_len(dev, 1);
+		if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) {
+			rc = -EINVAL;
+			goto err_last;
+		}
+		dev_info(&dev->dev, "Found %s with ID %s\n",
+			 dev->name, dev_id->id);
+		if (!((tpm_dev.data_regs >> 8) & 0xff)) {
+			rc = -EINVAL;
+			goto err_last;
+		}
+		/* publish my base address and request region */
+		if (request_region(tpm_dev.data_regs, tpm_dev.data_size,
+				   "tpm_infineon0") == NULL) {
+			rc = -EINVAL;
+			goto err_last;
+		}
+		if (request_region(tpm_dev.config_port, tpm_dev.config_size,
+				   "tpm_infineon0") == NULL) {
+			release_region(tpm_dev.data_regs, tpm_dev.data_size);
+			rc = -EINVAL;
+			goto err_last;
+		}
+	} else if (pnp_mem_valid(dev, 0) &&
+		   !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+		tpm_dev.iotype = TPM_INF_IO_MEM;
+
+		tpm_dev.map_base = pnp_mem_start(dev, 0);
+		tpm_dev.map_size = pnp_mem_len(dev, 0);
+
+		dev_info(&dev->dev, "Found %s with ID %s\n",
+			 dev->name, dev_id->id);
+
+		/* publish my base address and request region */
+		if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size,
+				       "tpm_infineon0") == NULL) {
+			rc = -EINVAL;
+			goto err_last;
+		}
+
+		tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size);
+		if (tpm_dev.mem_base == NULL) {
+			release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+			rc = -EINVAL;
+			goto err_last;
+		}
+
+		/*
+		 * The only known MMIO based Infineon TPM system provides
+		 * a single large mem region with the device config
+		 * registers at the default TPM_ADDR.  The data registers
+		 * seem like they could be placed anywhere within the MMIO
+		 * region, but lets just put them at zero offset.
+		 */
+		tpm_dev.index_off = TPM_ADDR;
+		tpm_dev.data_regs = 0x0;
+	} else {
+		rc = -EINVAL;
+		goto err_last;
+	}
+
+	/* query chip for its vendor, its version number a.s.o. */
+	tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+	tpm_config_out(IDVENL, TPM_INF_ADDR);
+	vendorid[1] = tpm_config_in(TPM_INF_DATA);
+	tpm_config_out(IDVENH, TPM_INF_ADDR);
+	vendorid[0] = tpm_config_in(TPM_INF_DATA);
+	tpm_config_out(IDPDL, TPM_INF_ADDR);
+	productid[1] = tpm_config_in(TPM_INF_DATA);
+	tpm_config_out(IDPDH, TPM_INF_ADDR);
+	productid[0] = tpm_config_in(TPM_INF_DATA);
+	tpm_config_out(CHIP_ID1, TPM_INF_ADDR);
+	version[1] = tpm_config_in(TPM_INF_DATA);
+	tpm_config_out(CHIP_ID2, TPM_INF_ADDR);
+	version[0] = tpm_config_in(TPM_INF_DATA);
+
+	switch ((productid[0] << 8) | productid[1]) {
+	case 6:
+		chipname = " (SLD 9630 TT 1.1)";
+		break;
+	case 11:
+		chipname = " (SLB 9635 TT 1.2)";
+		break;
+	default:
+		chipname = " (unknown chip)";
+		break;
+	}
+
+	if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
+
+		/* configure TPM with IO-ports */
+		tpm_config_out(IOLIMH, TPM_INF_ADDR);
+		tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+		tpm_config_out(IOLIML, TPM_INF_ADDR);
+		tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+
+		/* control if IO-ports are set correctly */
+		tpm_config_out(IOLIMH, TPM_INF_ADDR);
+		ioh = tpm_config_in(TPM_INF_DATA);
+		tpm_config_out(IOLIML, TPM_INF_ADDR);
+		iol = tpm_config_in(TPM_INF_DATA);
+
+		if ((ioh << 8 | iol) != tpm_dev.data_regs) {
+			dev_err(&dev->dev,
+				"Could not set IO-data registers to 0x%x\n",
+				tpm_dev.data_regs);
+			rc = -EIO;
+			goto err_release_region;
+		}
+
+		/* activate register */
+		tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+		tpm_config_out(0x01, TPM_INF_DATA);
+		tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+
+		/* disable RESET, LP and IRQC */
+		tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+		/* Finally, we're done, print some infos */
+		dev_info(&dev->dev, "TPM found: "
+			 "config base 0x%lx, "
+			 "data base 0x%lx, "
+			 "chip version 0x%02x%02x, "
+			 "vendor id 0x%x%x (Infineon), "
+			 "product id 0x%02x%02x"
+			 "%s\n",
+			 tpm_dev.iotype == TPM_INF_IO_PORT ?
+			 tpm_dev.config_port :
+			 tpm_dev.map_base + tpm_dev.index_off,
+			 tpm_dev.iotype == TPM_INF_IO_PORT ?
+			 tpm_dev.data_regs :
+			 tpm_dev.map_base + tpm_dev.data_regs,
+			 version[0], version[1],
+			 vendorid[0], vendorid[1],
+			 productid[0], productid[1], chipname);
+
+		chip = tpmm_chip_alloc(&dev->dev, &tpm_inf);
+		if (IS_ERR(chip)) {
+			rc = PTR_ERR(chip);
+			goto err_release_region;
+		}
+
+		rc = tpm_chip_register(chip);
+		if (rc)
+			goto err_release_region;
+
+		return 0;
+	} else {
+		rc = -ENODEV;
+		goto err_release_region;
+	}
+
+err_release_region:
+	if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+		release_region(tpm_dev.data_regs, tpm_dev.data_size);
+		release_region(tpm_dev.config_port, tpm_dev.config_size);
+	} else {
+		iounmap(tpm_dev.mem_base);
+		release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+	}
+
+err_last:
+	return rc;
+}
+
+static void tpm_inf_pnp_remove(struct pnp_dev *dev)
+{
+	struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+	tpm_chip_unregister(chip);
+
+	if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+		release_region(tpm_dev.data_regs, tpm_dev.data_size);
+		release_region(tpm_dev.config_port,
+			       tpm_dev.config_size);
+	} else {
+		iounmap(tpm_dev.mem_base);
+		release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+	}
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tpm_inf_resume(struct device *dev)
+{
+	/* Re-configure TPM after suspending */
+	tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+	tpm_config_out(IOLIMH, TPM_INF_ADDR);
+	tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+	tpm_config_out(IOLIML, TPM_INF_ADDR);
+	tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+	/* activate register */
+	tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+	tpm_config_out(0x01, TPM_INF_DATA);
+	tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+	/* disable RESET, LP and IRQC */
+	tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+	return tpm_pm_resume(dev);
+}
+#endif
+static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume);
+
+static struct pnp_driver tpm_inf_pnp_driver = {
+	.name = "tpm_inf_pnp",
+	.id_table = tpm_inf_pnp_tbl,
+	.probe = tpm_inf_pnp_probe,
+	.remove = tpm_inf_pnp_remove,
+	.driver = {
+		.pm = &tpm_inf_pm,
+	}
+};
+
+module_pnp_driver(tpm_inf_pnp_driver);
+
+MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
+MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
+MODULE_VERSION("1.9.2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
new file mode 100644
index 0000000..5d6cce7
--- /dev/null
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org	 
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ * 
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "tpm.h"
+
+/* National definitions */
+enum tpm_nsc_addr{
+	TPM_NSC_IRQ = 0x07,
+	TPM_NSC_BASE0_HI = 0x60,
+	TPM_NSC_BASE0_LO = 0x61,
+	TPM_NSC_BASE1_HI = 0x62,
+	TPM_NSC_BASE1_LO = 0x63
+};
+
+enum tpm_nsc_index {
+	NSC_LDN_INDEX = 0x07,
+	NSC_SID_INDEX = 0x20,
+	NSC_LDC_INDEX = 0x30,
+	NSC_DIO_INDEX = 0x60,
+	NSC_CIO_INDEX = 0x62,
+	NSC_IRQ_INDEX = 0x70,
+	NSC_ITS_INDEX = 0x71
+};
+
+enum tpm_nsc_status_loc {
+	NSC_STATUS = 0x01,
+	NSC_COMMAND = 0x01,
+	NSC_DATA = 0x00
+};
+
+/* status bits */
+enum tpm_nsc_status {
+	NSC_STATUS_OBF = 0x01,	/* output buffer full */
+	NSC_STATUS_IBF = 0x02,	/* input buffer full */
+	NSC_STATUS_F0 = 0x04,	/* F0 */
+	NSC_STATUS_A2 = 0x08,	/* A2 */
+	NSC_STATUS_RDY = 0x10,	/* ready to receive command */
+	NSC_STATUS_IBR = 0x20	/* ready to receive data */
+};
+
+/* command bits */
+enum tpm_nsc_cmd_mode {
+	NSC_COMMAND_NORMAL = 0x01,	/* normal mode */
+	NSC_COMMAND_EOC = 0x03,
+	NSC_COMMAND_CANCEL = 0x22
+};
+
+struct tpm_nsc_priv {
+	unsigned long base;
+};
+
+/*
+ * Wait for a certain status to appear
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+	unsigned long stop;
+
+	/* status immediately available check */
+	*data = inb(priv->base + NSC_STATUS);
+	if ((*data & mask) == val)
+		return 0;
+
+	/* wait for status */
+	stop = jiffies + 10 * HZ;
+	do {
+		msleep(TPM_TIMEOUT);
+		*data = inb(priv->base + 1);
+		if ((*data & mask) == val)
+			return 0;
+	}
+	while (time_before(jiffies, stop));
+
+	return -EBUSY;
+}
+
+static int nsc_wait_for_ready(struct tpm_chip *chip)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+	int status;
+	unsigned long stop;
+
+	/* status immediately available check */
+	status = inb(priv->base + NSC_STATUS);
+	if (status & NSC_STATUS_OBF)
+		status = inb(priv->base + NSC_DATA);
+	if (status & NSC_STATUS_RDY)
+		return 0;
+
+	/* wait for status */
+	stop = jiffies + 100;
+	do {
+		msleep(TPM_TIMEOUT);
+		status = inb(priv->base + NSC_STATUS);
+		if (status & NSC_STATUS_OBF)
+			status = inb(priv->base + NSC_DATA);
+		if (status & NSC_STATUS_RDY)
+			return 0;
+	}
+	while (time_before(jiffies, stop));
+
+	dev_info(&chip->dev, "wait for ready failed\n");
+	return -EBUSY;
+}
+
+
+static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+	u8 *buffer = buf;
+	u8 data, *p;
+	u32 size;
+	__be32 *native_size;
+
+	if (count < 6)
+		return -EIO;
+
+	if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
+		dev_err(&chip->dev, "F0 timeout\n");
+		return -EIO;
+	}
+
+	data = inb(priv->base + NSC_DATA);
+	if (data != NSC_COMMAND_NORMAL) {
+		dev_err(&chip->dev, "not in normal mode (0x%x)\n",
+			data);
+		return -EIO;
+	}
+
+	/* read the whole packet */
+	for (p = buffer; p < &buffer[count]; p++) {
+		if (wait_for_stat
+		    (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
+			dev_err(&chip->dev,
+				"OBF timeout (while reading data)\n");
+			return -EIO;
+		}
+		if (data & NSC_STATUS_F0)
+			break;
+		*p = inb(priv->base + NSC_DATA);
+	}
+
+	if ((data & NSC_STATUS_F0) == 0 &&
+	(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
+		dev_err(&chip->dev, "F0 not set\n");
+		return -EIO;
+	}
+
+	data = inb(priv->base + NSC_DATA);
+	if (data != NSC_COMMAND_EOC) {
+		dev_err(&chip->dev,
+			"expected end of command(0x%x)\n", data);
+		return -EIO;
+	}
+
+	native_size = (__force __be32 *) (buf + 2);
+	size = be32_to_cpu(*native_size);
+
+	if (count < size)
+		return -EIO;
+
+	return size;
+}
+
+static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+	u8 data;
+	int i;
+
+	/*
+	 * If we hit the chip with back to back commands it locks up
+	 * and never set IBF. Hitting it with this "hammer" seems to
+	 * fix it. Not sure why this is needed, we followed the flow
+	 * chart in the manual to the letter.
+	 */
+	outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+
+	if (nsc_wait_for_ready(chip) != 0)
+		return -EIO;
+
+	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+		dev_err(&chip->dev, "IBF timeout\n");
+		return -EIO;
+	}
+
+	outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND);
+	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
+		dev_err(&chip->dev, "IBR timeout\n");
+		return -EIO;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+			dev_err(&chip->dev,
+				"IBF timeout (while writing data)\n");
+			return -EIO;
+		}
+		outb(buf[i], priv->base + NSC_DATA);
+	}
+
+	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+		dev_err(&chip->dev, "IBF timeout\n");
+		return -EIO;
+	}
+	outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
+
+	return count;
+}
+
+static void tpm_nsc_cancel(struct tpm_chip *chip)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+	outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+}
+
+static u8 tpm_nsc_status(struct tpm_chip *chip)
+{
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return inb(priv->base + NSC_STATUS);
+}
+
+static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return (status == NSC_STATUS_RDY);
+}
+
+static const struct tpm_class_ops tpm_nsc = {
+	.recv = tpm_nsc_recv,
+	.send = tpm_nsc_send,
+	.cancel = tpm_nsc_cancel,
+	.status = tpm_nsc_status,
+	.req_complete_mask = NSC_STATUS_OBF,
+	.req_complete_val = NSC_STATUS_OBF,
+	.req_canceled = tpm_nsc_req_canceled,
+};
+
+static struct platform_device *pdev = NULL;
+
+static void tpm_nsc_remove(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+	tpm_chip_unregister(chip);
+	release_region(priv->base, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver nsc_drv = {
+	.driver          = {
+		.name    = "tpm_nsc",
+		.pm      = &tpm_nsc_pm,
+	},
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+	outb(index, base);
+	return inb(base+1) & 0xFF;
+}
+
+static inline void tpm_write_index(int base, int index, int value)
+{
+	outb(index, base);
+	outb(value & 0xFF, base+1);
+}
+
+static int __init init_nsc(void)
+{
+	int rc = 0;
+	int lo, hi, err;
+	int nscAddrBase = TPM_ADDR;
+	struct tpm_chip *chip;
+	unsigned long base;
+	struct tpm_nsc_priv *priv;
+
+	/* verify that it is a National part (SID) */
+	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
+		nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
+			(tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
+		if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
+			return -ENODEV;
+	}
+
+	err = platform_driver_register(&nsc_drv);
+	if (err)
+		return err;
+
+	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
+	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
+	base = (hi<<8) | lo;
+
+	/* enable the DPM module */
+	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
+
+	pdev = platform_device_alloc("tpm_nscl0", -1);
+	if (!pdev) {
+		rc = -ENOMEM;
+		goto err_unreg_drv;
+	}
+
+	pdev->num_resources = 0;
+	pdev->dev.driver = &nsc_drv.driver;
+	pdev->dev.release = tpm_nsc_remove;
+
+	if ((rc = platform_device_add(pdev)) < 0)
+		goto err_put_dev;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		rc = -ENOMEM;
+		goto err_del_dev;
+	}
+
+	priv->base = base;
+
+	if (request_region(base, 2, "tpm_nsc0") == NULL ) {
+		rc = -EBUSY;
+		goto err_del_dev;
+	}
+
+	chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc);
+	if (IS_ERR(chip)) {
+		rc = -ENODEV;
+		goto err_rel_reg;
+	}
+
+	dev_set_drvdata(&chip->dev, priv);
+
+	rc = tpm_chip_register(chip);
+	if (rc)
+		goto err_rel_reg;
+
+	dev_dbg(&pdev->dev, "NSC TPM detected\n");
+	dev_dbg(&pdev->dev,
+		"NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
+		tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
+		tpm_read_index(nscAddrBase,0x27));
+	dev_dbg(&pdev->dev,
+		"NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
+		tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
+		tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
+	dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
+		(tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
+	dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
+		(tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
+	dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
+		tpm_read_index(nscAddrBase,0x70));
+	dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
+		tpm_read_index(nscAddrBase,0x71));
+	dev_dbg(&pdev->dev,
+		"NSC DMA channel select0 0x%x, select1 0x%x\n",
+		tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
+	dev_dbg(&pdev->dev,
+		"NSC Config "
+		"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+		tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
+		tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
+		tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
+		tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
+		tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
+
+	dev_info(&pdev->dev,
+		 "NSC TPM revision %d\n",
+		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
+
+	return 0;
+
+err_rel_reg:
+	release_region(base, 2);
+err_del_dev:
+	platform_device_del(pdev);
+err_put_dev:
+	platform_device_put(pdev);
+err_unreg_drv:
+	platform_driver_unregister(&nsc_drv);
+	return rc;
+}
+
+static void __exit cleanup_nsc(void)
+{
+	if (pdev) {
+		tpm_nsc_remove(&pdev->dev);
+		platform_device_unregister(pdev);
+	}
+
+	platform_driver_unregister(&nsc_drv);
+}
+
+module_init(init_nsc);
+module_exit(cleanup_nsc);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
new file mode 100644
index 0000000..86dd852
--- /dev/null
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ *
+ * Authors:
+ * Xiaoyan Zhang <xiaoyan.zhang@intel.com>
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains implementation of the sysfs interface for PPI.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+#include <linux/acpi.h>
+#include "tpm.h"
+
+#define TPM_PPI_REVISION_ID	1
+#define TPM_PPI_FN_VERSION	1
+#define TPM_PPI_FN_SUBREQ	2
+#define TPM_PPI_FN_GETREQ	3
+#define TPM_PPI_FN_GETACT	4
+#define TPM_PPI_FN_GETRSP	5
+#define TPM_PPI_FN_SUBREQ2	7
+#define TPM_PPI_FN_GETOPR	8
+#define PPI_TPM_REQ_MAX		22
+#define PPI_VS_REQ_START	128
+#define PPI_VS_REQ_END		255
+
+static const guid_t tpm_ppi_guid =
+	GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+		  0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+
+static inline union acpi_object *
+tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
+	     union acpi_object *argv4)
+{
+	BUG_ON(!ppi_handle);
+	return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
+				       TPM_PPI_REVISION_ID,
+				       func, argv4, type);
+}
+
+static ssize_t tpm_show_ppi_version(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+}
+
+static ssize_t tpm_show_ppi_request(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	ssize_t size = -EINVAL;
+	union acpi_object *obj;
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
+			   ACPI_TYPE_PACKAGE, NULL);
+	if (!obj)
+		return -ENXIO;
+
+	/*
+	 * output.pointer should be of package type, including two integers.
+	 * The first is function return code, 0 means success and 1 means
+	 * error. The second is pending TPM operation requested by the OS, 0
+	 * means none and >0 means operation value.
+	 */
+	if (obj->package.count == 2 &&
+	    obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+	    obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
+		if (obj->package.elements[0].integer.value)
+			size = -EFAULT;
+		else
+			size = scnprintf(buf, PAGE_SIZE, "%llu\n",
+				 obj->package.elements[1].integer.value);
+	}
+
+	ACPI_FREE(obj);
+
+	return size;
+}
+
+static ssize_t tpm_store_ppi_request(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	u32 req;
+	u64 ret;
+	int func = TPM_PPI_FN_SUBREQ;
+	union acpi_object *obj, tmp;
+	union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	/*
+	 * the function to submit TPM operation request to pre-os environment
+	 * is updated with function index from SUBREQ to SUBREQ2 since PPI
+	 * version 1.1
+	 */
+	if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+			   TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
+		func = TPM_PPI_FN_SUBREQ2;
+
+	/*
+	 * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
+	 * accept buffer/string/integer type, but some BIOS accept buffer/
+	 * string/package type. For PPI version 1.0 and 1.1, use buffer type
+	 * for compatibility, and use package type since 1.2 according to spec.
+	 */
+	if (strcmp(chip->ppi_version, "1.2") < 0) {
+		if (sscanf(buf, "%d", &req) != 1)
+			return -EINVAL;
+		argv4.type = ACPI_TYPE_BUFFER;
+		argv4.buffer.length = sizeof(req);
+		argv4.buffer.pointer = (u8 *)&req;
+	} else {
+		tmp.type = ACPI_TYPE_INTEGER;
+		if (sscanf(buf, "%llu", &tmp.integer.value) != 1)
+			return -EINVAL;
+	}
+
+	obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER,
+			   &argv4);
+	if (!obj) {
+		return -ENXIO;
+	} else {
+		ret = obj->integer.value;
+		ACPI_FREE(obj);
+	}
+
+	if (ret == 0)
+		return (acpi_status)count;
+
+	return (ret == 1) ? -EPERM : -EFAULT;
+}
+
+static ssize_t tpm_show_ppi_transition_action(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	u32 ret;
+	acpi_status status;
+	union acpi_object *obj = NULL;
+	union acpi_object tmp = {
+		.buffer.type = ACPI_TYPE_BUFFER,
+		.buffer.length = 0,
+		.buffer.pointer = NULL
+	};
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	static char *info[] = {
+		"None",
+		"Shutdown",
+		"Reboot",
+		"OS Vendor-specific",
+		"Error",
+	};
+
+	/*
+	 * PPI spec defines params[3].type as empty package, but some platforms
+	 * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
+	 * compatibility, define params[3].type as buffer, if PPI version < 1.2
+	 */
+	if (strcmp(chip->ppi_version, "1.2") < 0)
+		obj = &tmp;
+	obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT,
+			   ACPI_TYPE_INTEGER, obj);
+	if (!obj) {
+		return -ENXIO;
+	} else {
+		ret = obj->integer.value;
+		ACPI_FREE(obj);
+	}
+
+	if (ret < ARRAY_SIZE(info) - 1)
+		status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+	else
+		status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
+				   info[ARRAY_SIZE(info)-1]);
+	return status;
+}
+
+static ssize_t tpm_show_ppi_response(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	acpi_status status = -EINVAL;
+	union acpi_object *obj, *ret_obj;
+	u64 req, res;
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
+			   ACPI_TYPE_PACKAGE, NULL);
+	if (!obj)
+		return -ENXIO;
+
+	/*
+	 * parameter output.pointer should be of package type, including
+	 * 3 integers. The first means function return code, the second means
+	 * most recent TPM operation request, and the last means response to
+	 * the most recent TPM operation request. Only if the first is 0, and
+	 * the second integer is not 0, the response makes sense.
+	 */
+	ret_obj = obj->package.elements;
+	if (obj->package.count < 3 ||
+	    ret_obj[0].type != ACPI_TYPE_INTEGER ||
+	    ret_obj[1].type != ACPI_TYPE_INTEGER ||
+	    ret_obj[2].type != ACPI_TYPE_INTEGER)
+		goto cleanup;
+
+	if (ret_obj[0].integer.value) {
+		status = -EFAULT;
+		goto cleanup;
+	}
+
+	req = ret_obj[1].integer.value;
+	res = ret_obj[2].integer.value;
+	if (req) {
+		if (res == 0)
+			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+					   "0: Success");
+		else if (res == 0xFFFFFFF0)
+			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+					   "0xFFFFFFF0: User Abort");
+		else if (res == 0xFFFFFFF1)
+			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+					   "0xFFFFFFF1: BIOS Failure");
+		else if (res >= 1 && res <= 0x00000FFF)
+			status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+					   req, res, "Corresponding TPM error");
+		else
+			status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+					   req, res, "Error");
+	} else {
+		status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
+				   req, "No Recent Request");
+	}
+
+cleanup:
+	ACPI_FREE(obj);
+	return status;
+}
+
+static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
+				   u32 end)
+{
+	int i;
+	u32 ret;
+	char *str = buf;
+	union acpi_object *obj, tmp;
+	union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+	static char *info[] = {
+		"Not implemented",
+		"BIOS only",
+		"Blocked for OS by BIOS",
+		"User required",
+		"User not required",
+	};
+
+	if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
+			    1 << TPM_PPI_FN_GETOPR))
+		return -EPERM;
+
+	tmp.integer.type = ACPI_TYPE_INTEGER;
+	for (i = start; i <= end; i++) {
+		tmp.integer.value = i;
+		obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
+				   ACPI_TYPE_INTEGER, &argv);
+		if (!obj) {
+			return -ENOMEM;
+		} else {
+			ret = obj->integer.value;
+			ACPI_FREE(obj);
+		}
+
+		if (ret > 0 && ret < ARRAY_SIZE(info))
+			str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
+					 i, ret, info[ret]);
+	}
+
+	return str - buf;
+}
+
+static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
+				   PPI_TPM_REQ_MAX);
+}
+
+static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpm_chip *chip = to_tpm_chip(dev);
+
+	return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
+				   PPI_VS_REQ_END);
+}
+
+static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
+static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP,
+		   tpm_show_ppi_request, tpm_store_ppi_request);
+static DEVICE_ATTR(transition_action, S_IRUGO,
+		   tpm_show_ppi_transition_action, NULL);
+static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL);
+static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL);
+static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL);
+
+static struct attribute *ppi_attrs[] = {
+	&dev_attr_version.attr,
+	&dev_attr_request.attr,
+	&dev_attr_transition_action.attr,
+	&dev_attr_response.attr,
+	&dev_attr_tcg_operations.attr,
+	&dev_attr_vs_operations.attr, NULL,
+};
+static struct attribute_group ppi_attr_grp = {
+	.name = "ppi",
+	.attrs = ppi_attrs
+};
+
+void tpm_add_ppi(struct tpm_chip *chip)
+{
+	union acpi_object *obj;
+
+	if (!chip->acpi_dev_handle)
+		return;
+
+	if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+			    TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
+		return;
+
+	/* Cache PPI version string. */
+	obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
+				      TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
+				      NULL, ACPI_TYPE_STRING);
+	if (obj) {
+		strlcpy(chip->ppi_version, obj->string.pointer,
+			sizeof(chip->ppi_version));
+		ACPI_FREE(obj);
+	}
+
+	chip->groups[chip->groups_cnt++] = &ppi_attr_grp;
+}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
new file mode 100644
index 0000000..f08949a
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+struct tpm_info {
+	struct resource res;
+	/* irq > 0 means: use irq $irq;
+	 * irq = 0 means: autoprobe for an irq;
+	 * irq = -1 means: no irq support
+	 */
+	int irq;
+};
+
+struct tpm_tis_tcg_phy {
+	struct tpm_tis_data priv;
+	void __iomem *iobase;
+};
+
+static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+	return container_of(data, struct tpm_tis_tcg_phy, priv);
+}
+
+static bool interrupts = true;
+module_param(interrupts, bool, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static bool itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
+static bool force;
+#ifdef CONFIG_X86
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+static int has_hid(struct acpi_device *dev, const char *hid)
+{
+	struct acpi_hardware_id *id;
+
+	list_for_each_entry(id, &dev->pnp.ids, list)
+		if (!strcmp(hid, id->id))
+			return 1;
+
+	return 0;
+}
+
+static inline int is_itpm(struct acpi_device *dev)
+{
+	if (!dev)
+		return 0;
+	return has_hid(dev, "INTC0102");
+}
+#else
+static inline int is_itpm(struct acpi_device *dev)
+{
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_ACPI)
+#define DEVICE_IS_TPM2 1
+
+static const struct acpi_device_id tpm_acpi_tbl[] = {
+	{"MSFT0101", DEVICE_IS_TPM2},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
+
+static int check_acpi_tpm2(struct device *dev)
+{
+	const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+	struct acpi_table_tpm2 *tbl;
+	acpi_status st;
+
+	if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+		return 0;
+
+	/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+	 * table is mandatory
+	 */
+	st =
+	    acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+	if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+		dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+		return -EINVAL;
+	}
+
+	/* The tpm2_crb driver handles this device */
+	if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+		return -ENODEV;
+
+	return 0;
+}
+#else
+static int check_acpi_tpm2(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+			      u8 *result)
+{
+	struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+	while (len--)
+		*result++ = ioread8(phy->iobase + addr);
+
+	return 0;
+}
+
+static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+			       const u8 *value)
+{
+	struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+	while (len--)
+		iowrite8(*value++, phy->iobase + addr);
+
+	return 0;
+}
+
+static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+{
+	struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+	*result = ioread16(phy->iobase + addr);
+
+	return 0;
+}
+
+static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+{
+	struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+	*result = ioread32(phy->iobase + addr);
+
+	return 0;
+}
+
+static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+{
+	struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+	iowrite32(value, phy->iobase + addr);
+
+	return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg = {
+	.read_bytes = tpm_tcg_read_bytes,
+	.write_bytes = tpm_tcg_write_bytes,
+	.read16 = tpm_tcg_read16,
+	.read32 = tpm_tcg_read32,
+	.write32 = tpm_tcg_write32,
+};
+
+static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
+{
+	struct tpm_tis_tcg_phy *phy;
+	int irq = -1;
+	int rc;
+
+	rc = check_acpi_tpm2(dev);
+	if (rc)
+		return rc;
+
+	phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL);
+	if (phy == NULL)
+		return -ENOMEM;
+
+	phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+	if (IS_ERR(phy->iobase))
+		return PTR_ERR(phy->iobase);
+
+	if (interrupts)
+		irq = tpm_info->irq;
+
+	if (itpm || is_itpm(ACPI_COMPANION(dev)))
+		phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND;
+
+	return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
+				 ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+			    const struct pnp_device_id *pnp_id)
+{
+	struct tpm_info tpm_info = {};
+	struct resource *res;
+
+	res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+	tpm_info.res = *res;
+
+	if (pnp_irq_valid(pnp_dev, 0))
+		tpm_info.irq = pnp_irq(pnp_dev, 0);
+	else
+		tpm_info.irq = -1;
+
+	return tpm_tis_init(&pnp_dev->dev, &tpm_info);
+}
+
+static struct pnp_device_id tpm_pnp_tbl[] = {
+	{"PNP0C31", 0},		/* TPM */
+	{"ATM1200", 0},		/* Atmel */
+	{"IFX0102", 0},		/* Infineon */
+	{"BCM0101", 0},		/* Broadcom */
+	{"BCM0102", 0},		/* Broadcom */
+	{"NSC1200", 0},		/* National */
+	{"ICO0102", 0},		/* Intel */
+	/* Add new here */
+	{"", 0},		/* User Specified */
+	{"", 0}			/* Terminator */
+};
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+
+static void tpm_tis_pnp_remove(struct pnp_dev *dev)
+{
+	struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+	tpm_chip_unregister(chip);
+	tpm_tis_remove(chip);
+}
+
+static struct pnp_driver tis_pnp_driver = {
+	.name = "tpm_tis",
+	.id_table = tpm_pnp_tbl,
+	.probe = tpm_tis_pnp_init,
+	.remove = tpm_tis_pnp_remove,
+	.driver	= {
+		.pm = &tpm_tis_pm,
+	},
+};
+
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
+		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
+
+static struct platform_device *force_pdev;
+
+static int tpm_tis_plat_probe(struct platform_device *pdev)
+{
+	struct tpm_info tpm_info = {};
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "no memory resource defined\n");
+		return -ENODEV;
+	}
+	tpm_info.res = *res;
+
+	tpm_info.irq = platform_get_irq(pdev, 0);
+	if (tpm_info.irq <= 0) {
+		if (pdev != force_pdev)
+			tpm_info.irq = -1;
+		else
+			/* When forcing auto probe the IRQ */
+			tpm_info.irq = 0;
+	}
+
+	return tpm_tis_init(&pdev->dev, &tpm_info);
+}
+
+static int tpm_tis_plat_remove(struct platform_device *pdev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	tpm_chip_unregister(chip);
+	tpm_tis_remove(chip);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+	{.compatible = "tcg,tpm-tis-mmio"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
+static struct platform_driver tis_drv = {
+	.probe = tpm_tis_plat_probe,
+	.remove = tpm_tis_plat_remove,
+	.driver = {
+		.name		= "tpm_tis",
+		.pm		= &tpm_tis_pm,
+		.of_match_table = of_match_ptr(tis_of_platform_match),
+		.acpi_match_table = ACPI_PTR(tpm_acpi_tbl),
+	},
+};
+
+static int tpm_tis_force_device(void)
+{
+	struct platform_device *pdev;
+	static const struct resource x86_resources[] = {
+		{
+			.start = 0xFED40000,
+			.end = 0xFED40000 + TIS_MEM_LEN - 1,
+			.flags = IORESOURCE_MEM,
+		},
+	};
+
+	if (!force)
+		return 0;
+
+	/* The driver core will match the name tpm_tis of the device to
+	 * the tpm_tis platform driver and complete the setup via
+	 * tpm_tis_plat_probe
+	 */
+	pdev = platform_device_register_simple("tpm_tis", -1, x86_resources,
+					       ARRAY_SIZE(x86_resources));
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+	force_pdev = pdev;
+
+	return 0;
+}
+
+static int __init init_tis(void)
+{
+	int rc;
+
+	rc = tpm_tis_force_device();
+	if (rc)
+		goto err_force;
+
+	rc = platform_driver_register(&tis_drv);
+	if (rc)
+		goto err_platform;
+
+
+	if (IS_ENABLED(CONFIG_PNP)) {
+		rc = pnp_register_driver(&tis_pnp_driver);
+		if (rc)
+			goto err_pnp;
+	}
+
+	return 0;
+
+err_pnp:
+	platform_driver_unregister(&tis_drv);
+err_platform:
+	if (force_pdev)
+		platform_device_unregister(force_pdev);
+err_force:
+	return rc;
+}
+
+static void __exit cleanup_tis(void)
+{
+	pnp_unregister_driver(&tis_pnp_driver);
+	platform_driver_unregister(&tis_drv);
+
+	if (force_pdev)
+		platform_device_unregister(force_pdev);
+}
+
+module_init(init_tis);
+module_exit(cleanup_tis);
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
new file mode 100644
index 0000000..d2345d9
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+					bool check_cancel, bool *canceled)
+{
+	u8 status = chip->ops->status(chip);
+
+	*canceled = false;
+	if ((status & mask) == mask)
+		return true;
+	if (check_cancel && chip->ops->req_canceled(chip, status)) {
+		*canceled = true;
+		return true;
+	}
+	return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+		unsigned long timeout, wait_queue_head_t *queue,
+		bool check_cancel)
+{
+	unsigned long stop;
+	long rc;
+	u8 status;
+	bool canceled = false;
+
+	/* check current status */
+	status = chip->ops->status(chip);
+	if ((status & mask) == mask)
+		return 0;
+
+	stop = jiffies + timeout;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+		timeout = stop - jiffies;
+		if ((long)timeout <= 0)
+			return -ETIME;
+		rc = wait_event_interruptible_timeout(*queue,
+			wait_for_tpm_stat_cond(chip, mask, check_cancel,
+					       &canceled),
+			timeout);
+		if (rc > 0) {
+			if (canceled)
+				return -ECANCELED;
+			return 0;
+		}
+		if (rc == -ERESTARTSYS && freezing(current)) {
+			clear_thread_flag(TIF_SIGPENDING);
+			goto again;
+		}
+	} else {
+		do {
+			usleep_range(TPM_TIMEOUT_USECS_MIN,
+				     TPM_TIMEOUT_USECS_MAX);
+			status = chip->ops->status(chip);
+			if ((status & mask) == mask)
+				return 0;
+		} while (time_before(jiffies, stop));
+	}
+	return -ETIME;
+}
+
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.'
+ */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	unsigned long stop = jiffies + chip->timeout_a;
+
+	do {
+		int rc;
+		u8 access;
+
+		rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+		if (rc < 0)
+			return rc;
+
+		if (access & TPM_ACCESS_VALID)
+			return 0;
+		tpm_msleep(TPM_TIMEOUT);
+	} while (time_before(jiffies, stop));
+	return -1;
+}
+
+static bool check_locality(struct tpm_chip *chip, int l)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc;
+	u8 access;
+
+	rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+	if (rc < 0)
+		return false;
+
+	if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+		priv->locality = l;
+		return true;
+	}
+
+	return false;
+}
+
+static bool locality_inactive(struct tpm_chip *chip, int l)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc;
+	u8 access;
+
+	rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+	if (rc < 0)
+		return false;
+
+	if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+	    == TPM_ACCESS_VALID)
+		return true;
+
+	return false;
+}
+
+static int release_locality(struct tpm_chip *chip, int l)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	unsigned long stop, timeout;
+	long rc;
+
+	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+	stop = jiffies + chip->timeout_a;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+		timeout = stop - jiffies;
+		if ((long)timeout <= 0)
+			return -1;
+
+		rc = wait_event_interruptible_timeout(priv->int_queue,
+						      (locality_inactive(chip, l)),
+						      timeout);
+
+		if (rc > 0)
+			return 0;
+
+		if (rc == -ERESTARTSYS && freezing(current)) {
+			clear_thread_flag(TIF_SIGPENDING);
+			goto again;
+		}
+	} else {
+		do {
+			if (locality_inactive(chip, l))
+				return 0;
+			tpm_msleep(TPM_TIMEOUT);
+		} while (time_before(jiffies, stop));
+	}
+	return -1;
+}
+
+static int request_locality(struct tpm_chip *chip, int l)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	unsigned long stop, timeout;
+	long rc;
+
+	if (check_locality(chip, l))
+		return l;
+
+	rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE);
+	if (rc < 0)
+		return rc;
+
+	stop = jiffies + chip->timeout_a;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+		timeout = stop - jiffies;
+		if ((long)timeout <= 0)
+			return -1;
+		rc = wait_event_interruptible_timeout(priv->int_queue,
+						      (check_locality
+						       (chip, l)),
+						      timeout);
+		if (rc > 0)
+			return l;
+		if (rc == -ERESTARTSYS && freezing(current)) {
+			clear_thread_flag(TIF_SIGPENDING);
+			goto again;
+		}
+	} else {
+		/* wait for burstcount */
+		do {
+			if (check_locality(chip, l))
+				return l;
+			tpm_msleep(TPM_TIMEOUT);
+		} while (time_before(jiffies, stop));
+	}
+	return -1;
+}
+
+static u8 tpm_tis_status(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc;
+	u8 status;
+
+	rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status);
+	if (rc < 0)
+		return 0;
+
+	return status;
+}
+
+static void tpm_tis_ready(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+	/* this causes the current command to be aborted */
+	tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY);
+}
+
+static int get_burstcount(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	unsigned long stop;
+	int burstcnt, rc;
+	u32 value;
+
+	/* wait for burstcount */
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		stop = jiffies + chip->timeout_a;
+	else
+		stop = jiffies + chip->timeout_d;
+	do {
+		rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
+		if (rc < 0)
+			return rc;
+
+		burstcnt = (value >> 8) & 0xFFFF;
+		if (burstcnt)
+			return burstcnt;
+		usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX);
+	} while (time_before(jiffies, stop));
+	return -EBUSY;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int size = 0, burstcnt, rc;
+
+	while (size < count) {
+		rc = wait_for_tpm_stat(chip,
+				 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+				 chip->timeout_c,
+				 &priv->read_queue, true);
+		if (rc < 0)
+			return rc;
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0) {
+			dev_err(&chip->dev, "Unable to read burstcount\n");
+			return burstcnt;
+		}
+		burstcnt = min_t(int, burstcnt, count - size);
+
+		rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
+					burstcnt, buf + size);
+		if (rc < 0)
+			return rc;
+
+		size += burstcnt;
+	}
+	return size;
+}
+
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int size = 0;
+	int status;
+	u32 expected;
+
+	if (count < TPM_HEADER_SIZE) {
+		size = -EIO;
+		goto out;
+	}
+
+	size = recv_data(chip, buf, TPM_HEADER_SIZE);
+	/* read first 10 bytes, including tag, paramsize, and result */
+	if (size < TPM_HEADER_SIZE) {
+		dev_err(&chip->dev, "Unable to read header\n");
+		goto out;
+	}
+
+	expected = be32_to_cpu(*(__be32 *) (buf + 2));
+	if (expected > count || expected < TPM_HEADER_SIZE) {
+		size = -EIO;
+		goto out;
+	}
+
+	size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+			  expected - TPM_HEADER_SIZE);
+	if (size < expected) {
+		dev_err(&chip->dev, "Unable to read remainder of result\n");
+		size = -ETIME;
+		goto out;
+	}
+
+	if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+				&priv->int_queue, false) < 0) {
+		size = -ETIME;
+		goto out;
+	}
+	status = tpm_tis_status(chip);
+	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
+		dev_err(&chip->dev, "Error left over data\n");
+		size = -EIO;
+		goto out;
+	}
+
+out:
+	tpm_tis_ready(chip);
+	return size;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc, status, burstcnt;
+	size_t count = 0;
+	bool itpm = priv->flags & TPM_TIS_ITPM_WORKAROUND;
+
+	status = tpm_tis_status(chip);
+	if ((status & TPM_STS_COMMAND_READY) == 0) {
+		tpm_tis_ready(chip);
+		if (wait_for_tpm_stat
+		    (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+		     &priv->int_queue, false) < 0) {
+			rc = -ETIME;
+			goto out_err;
+		}
+	}
+
+	while (count < len - 1) {
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0) {
+			dev_err(&chip->dev, "Unable to read burstcount\n");
+			rc = burstcnt;
+			goto out_err;
+		}
+		burstcnt = min_t(int, burstcnt, len - count - 1);
+		rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
+					 burstcnt, buf + count);
+		if (rc < 0)
+			goto out_err;
+
+		count += burstcnt;
+
+		if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+					&priv->int_queue, false) < 0) {
+			rc = -ETIME;
+			goto out_err;
+		}
+		status = tpm_tis_status(chip);
+		if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+			rc = -EIO;
+			goto out_err;
+		}
+	}
+
+	/* write last byte */
+	rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]);
+	if (rc < 0)
+		goto out_err;
+
+	if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+				&priv->int_queue, false) < 0) {
+		rc = -ETIME;
+		goto out_err;
+	}
+	status = tpm_tis_status(chip);
+	if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
+		rc = -EIO;
+		goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	tpm_tis_ready(chip);
+	return rc;
+}
+
+static void disable_interrupts(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u32 intmask;
+	int rc;
+
+	rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+	if (rc < 0)
+		intmask = 0;
+
+	intmask &= ~TPM_GLOBAL_INT_ENABLE;
+	rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+	devm_free_irq(chip->dev.parent, priv->irq, chip);
+	priv->irq = 0;
+	chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc;
+	u32 ordinal;
+	unsigned long dur;
+
+	rc = tpm_tis_send_data(chip, buf, len);
+	if (rc < 0)
+		return rc;
+
+	/* go and do it */
+	rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
+	if (rc < 0)
+		goto out_err;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+		if (chip->flags & TPM_CHIP_FLAG_TPM2)
+			dur = tpm2_calc_ordinal_duration(chip, ordinal);
+		else
+			dur = tpm_calc_ordinal_duration(chip, ordinal);
+
+		if (wait_for_tpm_stat
+		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
+		     &priv->read_queue, false) < 0) {
+			rc = -ETIME;
+			goto out_err;
+		}
+	}
+	return len;
+out_err:
+	tpm_tis_ready(chip);
+	return rc;
+}
+
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	int rc, irq;
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+	if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested)
+		return tpm_tis_send_main(chip, buf, len);
+
+	/* Verify receipt of the expected IRQ */
+	irq = priv->irq;
+	priv->irq = 0;
+	chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+	rc = tpm_tis_send_main(chip, buf, len);
+	priv->irq = irq;
+	chip->flags |= TPM_CHIP_FLAG_IRQ;
+	if (!priv->irq_tested)
+		tpm_msleep(1);
+	if (!priv->irq_tested)
+		disable_interrupts(chip);
+	priv->irq_tested = true;
+	return rc;
+}
+
+struct tis_vendor_timeout_override {
+	u32 did_vid;
+	unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+	/* Atmel 3204 */
+	{ 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+			(TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+				    unsigned long *timeout_cap)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int i, rc;
+	u32 did_vid;
+
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, true);
+
+	rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+	if (rc < 0)
+		goto out;
+
+	for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+		if (vendor_timeout_overrides[i].did_vid != did_vid)
+			continue;
+		memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+		       sizeof(vendor_timeout_overrides[i].timeout_us));
+		rc = true;
+	}
+
+	rc = false;
+
+out:
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, false);
+
+	return rc;
+}
+
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+	static const u8 cmd_getticks[] = {
+		0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+		0x00, 0x00, 0x00, 0xf1
+	};
+	size_t len = sizeof(cmd_getticks);
+	u16 vendor;
+
+	if (priv->flags & TPM_TIS_ITPM_WORKAROUND)
+		return 0;
+
+	rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
+	if (rc < 0)
+		return rc;
+
+	/* probe only iTPMS */
+	if (vendor != TPM_VID_INTEL)
+		return 0;
+
+	if (request_locality(chip, 0) != 0)
+		return -EBUSY;
+
+	rc = tpm_tis_send_data(chip, cmd_getticks, len);
+	if (rc == 0)
+		goto out;
+
+	tpm_tis_ready(chip);
+
+	priv->flags |= TPM_TIS_ITPM_WORKAROUND;
+
+	rc = tpm_tis_send_data(chip, cmd_getticks, len);
+	if (rc == 0)
+		dev_info(&chip->dev, "Detected an iTPM.\n");
+	else {
+		priv->flags &= ~TPM_TIS_ITPM_WORKAROUND;
+		rc = -EFAULT;
+	}
+
+out:
+	tpm_tis_ready(chip);
+	release_locality(chip, priv->locality);
+
+	return rc;
+}
+
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+	switch (priv->manufacturer_id) {
+	case TPM_VID_WINBOND:
+		return ((status == TPM_STS_VALID) ||
+			(status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+	case TPM_VID_STM:
+		return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+	default:
+		return (status == TPM_STS_COMMAND_READY);
+	}
+}
+
+static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+{
+	struct tpm_chip *chip = dev_id;
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u32 interrupt;
+	int i, rc;
+
+	rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+	if (rc < 0)
+		return IRQ_NONE;
+
+	if (interrupt == 0)
+		return IRQ_NONE;
+
+	priv->irq_tested = true;
+	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+		wake_up_interruptible(&priv->read_queue);
+	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+		for (i = 0; i < 5; i++)
+			if (check_locality(chip, i))
+				break;
+	if (interrupt &
+	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+	     TPM_INTF_CMD_READY_INT))
+		wake_up_interruptible(&priv->int_queue);
+
+	/* Clear interrupts handled with TPM_EOI */
+	rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
+	if (rc < 0)
+		return IRQ_NONE;
+
+	tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+	return IRQ_HANDLED;
+}
+
+static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+{
+	const char *desc = "attempting to generate an interrupt";
+	u32 cap2;
+	cap_t cap;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+	else
+		return tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
+				  0);
+}
+
+/* Register the IRQ and issue a command that will cause an interrupt. If an
+ * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
+ * everything and leave in polling mode. Returns 0 on success.
+ */
+static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+				    int flags, int irq)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u8 original_int_vec;
+	int rc;
+	u32 int_status;
+
+	if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags,
+			     dev_name(&chip->dev), chip) != 0) {
+		dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
+			 irq);
+		return -1;
+	}
+	priv->irq = irq;
+
+	rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+			   &original_int_vec);
+	if (rc < 0)
+		return rc;
+
+	rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+	if (rc < 0)
+		return rc;
+
+	rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+	if (rc < 0)
+		return rc;
+
+	/* Clear all existing */
+	rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+	if (rc < 0)
+		return rc;
+
+	/* Turn on */
+	rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+			     intmask | TPM_GLOBAL_INT_ENABLE);
+	if (rc < 0)
+		return rc;
+
+	priv->irq_tested = false;
+
+	/* Generate an interrupt by having the core call through to
+	 * tpm_tis_send
+	 */
+	rc = tpm_tis_gen_interrupt(chip);
+	if (rc < 0)
+		return rc;
+
+	/* tpm_tis_send will either confirm the interrupt is working or it
+	 * will call disable_irq which undoes all of the above.
+	 */
+	if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+		rc = tpm_tis_write8(priv, original_int_vec,
+				TPM_INT_VECTOR(priv->locality));
+		if (rc < 0)
+			return rc;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+ * do not have ACPI/etc. We typically expect the interrupt to be declared if
+ * present.
+ */
+static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u8 original_int_vec;
+	int i, rc;
+
+	rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+			   &original_int_vec);
+	if (rc < 0)
+		return;
+
+	if (!original_int_vec) {
+		if (IS_ENABLED(CONFIG_X86))
+			for (i = 3; i <= 15; i++)
+				if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+							      i))
+					return;
+	} else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+					     original_int_vec))
+		return;
+}
+
+void tpm_tis_remove(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u32 reg = TPM_INT_ENABLE(priv->locality);
+	u32 interrupt;
+	int rc;
+
+	tpm_tis_clkrun_enable(chip, true);
+
+	rc = tpm_tis_read32(priv, reg, &interrupt);
+	if (rc < 0)
+		interrupt = 0;
+
+	tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+
+	tpm_tis_clkrun_enable(chip, false);
+
+	if (priv->ilb_base_addr)
+		iounmap(priv->ilb_base_addr);
+}
+EXPORT_SYMBOL_GPL(tpm_tis_remove);
+
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ *                           of a single TPM command
+ * @chip:	TPM chip to use
+ * @value:	1 - Disable CLKRUN protocol, so that clocks are free running
+ *		0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+	struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+	u32 clkrun_val;
+
+	if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+	    !data->ilb_base_addr)
+		return;
+
+	if (value) {
+		data->clkrun_enabled++;
+		if (data->clkrun_enabled > 1)
+			return;
+		clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+		/* Disable LPC CLKRUN# */
+		clkrun_val &= ~LPC_CLKRUN_EN;
+		iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+		/*
+		 * Write any random value on port 0x80 which is on LPC, to make
+		 * sure LPC clock is running before sending any TPM command.
+		 */
+		outb(0xCC, 0x80);
+	} else {
+		data->clkrun_enabled--;
+		if (data->clkrun_enabled)
+			return;
+
+		clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+		/* Enable LPC CLKRUN# */
+		clkrun_val |= LPC_CLKRUN_EN;
+		iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+		/*
+		 * Write any random value on port 0x80 which is on LPC, to make
+		 * sure LPC clock is running before sending any TPM command.
+		 */
+		outb(0xCC, 0x80);
+	}
+}
+
+static const struct tpm_class_ops tpm_tis = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.status = tpm_tis_status,
+	.recv = tpm_tis_recv,
+	.send = tpm_tis_send,
+	.cancel = tpm_tis_ready,
+	.update_timeouts = tpm_tis_update_timeouts,
+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+	.req_canceled = tpm_tis_req_canceled,
+	.request_locality = request_locality,
+	.relinquish_locality = release_locality,
+	.clk_enable = tpm_tis_clkrun_enable,
+};
+
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+		      const struct tpm_tis_phy_ops *phy_ops,
+		      acpi_handle acpi_dev_handle)
+{
+	u32 vendor;
+	u32 intfcaps;
+	u32 intmask;
+	u32 clkrun_val;
+	u8 rid;
+	int rc, probe;
+	struct tpm_chip *chip;
+
+	chip = tpmm_chip_alloc(dev, &tpm_tis);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+#ifdef CONFIG_ACPI
+	chip->acpi_dev_handle = acpi_dev_handle;
+#endif
+
+	chip->hwrng.quality = priv->rng_quality;
+
+	/* Maximum timeouts */
+	chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
+	chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
+	chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
+	chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+	priv->phy_ops = phy_ops;
+	dev_set_drvdata(&chip->dev, priv);
+
+	if (is_bsw()) {
+		priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+					ILB_REMAP_SIZE);
+		if (!priv->ilb_base_addr)
+			return -ENOMEM;
+
+		clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+		/* Check if CLKRUN# is already not enabled in the LPC bus */
+		if (!(clkrun_val & LPC_CLKRUN_EN)) {
+			iounmap(priv->ilb_base_addr);
+			priv->ilb_base_addr = NULL;
+		}
+	}
+
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, true);
+
+	if (wait_startup(chip, 0) != 0) {
+		rc = -ENODEV;
+		goto out_err;
+	}
+
+	/* Take control of the TPM's interrupt hardware and shut it off */
+	rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+	if (rc < 0)
+		goto out_err;
+
+	intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
+		   TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+	intmask &= ~TPM_GLOBAL_INT_ENABLE;
+	tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+	rc = tpm2_probe(chip);
+	if (rc)
+		goto out_err;
+
+	rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+	if (rc < 0)
+		goto out_err;
+
+	priv->manufacturer_id = vendor;
+
+	rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
+	if (rc < 0)
+		goto out_err;
+
+	dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+		 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
+		 vendor >> 16, rid);
+
+	probe = probe_itpm(chip);
+	if (probe < 0) {
+		rc = -ENODEV;
+		goto out_err;
+	}
+
+	/* Figure out the capabilities */
+	rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+	if (rc < 0)
+		goto out_err;
+
+	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+		intfcaps);
+	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+		dev_dbg(dev, "\tBurst Count Static\n");
+	if (intfcaps & TPM_INTF_CMD_READY_INT)
+		dev_dbg(dev, "\tCommand Ready Int Support\n");
+	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+		dev_dbg(dev, "\tInterrupt Edge Falling\n");
+	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+		dev_dbg(dev, "\tInterrupt Edge Rising\n");
+	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+		dev_dbg(dev, "\tInterrupt Level Low\n");
+	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+		dev_dbg(dev, "\tInterrupt Level High\n");
+	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
+		dev_dbg(dev, "\tLocality Change Int Support\n");
+	if (intfcaps & TPM_INTF_STS_VALID_INT)
+		dev_dbg(dev, "\tSts Valid Int Support\n");
+	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+		dev_dbg(dev, "\tData Avail Int Support\n");
+
+	/* INTERRUPT Setup */
+	init_waitqueue_head(&priv->read_queue);
+	init_waitqueue_head(&priv->int_queue);
+	if (irq != -1) {
+		/* Before doing irq testing issue a command to the TPM in polling mode
+		 * to make sure it works. May as well use that command to set the
+		 * proper timeouts for the driver.
+		 */
+		if (tpm_get_timeouts(chip)) {
+			dev_err(dev, "Could not get TPM timeouts and durations\n");
+			rc = -ENODEV;
+			goto out_err;
+		}
+
+		if (irq) {
+			tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+						 irq);
+			if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+				dev_err(&chip->dev, FW_BUG
+					"TPM interrupt not working, polling instead\n");
+		} else {
+			tpm_tis_probe_irq(chip, intmask);
+		}
+	}
+
+	rc = tpm_chip_register(chip);
+	if (rc)
+		goto out_err;
+
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, false);
+
+	return 0;
+out_err:
+	if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+		chip->ops->clk_enable(chip, false);
+
+	tpm_tis_remove(chip);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_core_init);
+
+#ifdef CONFIG_PM_SLEEP
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+	u32 intmask;
+	int rc;
+
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, true);
+
+	/* reenable interrupts that device may have lost or
+	 * BIOS/firmware may have disabled
+	 */
+	rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+	if (rc < 0)
+		goto out;
+
+	rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+	if (rc < 0)
+		goto out;
+
+	intmask |= TPM_INTF_CMD_READY_INT
+	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+	    | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+	tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+out:
+	if (chip->ops->clk_enable != NULL)
+		chip->ops->clk_enable(chip, false);
+
+	return;
+}
+
+int tpm_tis_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	int ret;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ)
+		tpm_tis_reenable_interrupts(chip);
+
+	ret = tpm_pm_resume(dev);
+	if (ret)
+		return ret;
+
+	/* TPM 1.2 requires self-test on resume. This function actually returns
+	 * an error code but for unknown reason it isn't handled.
+	 */
+	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+		tpm_do_selftest(chip);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_resume);
+#endif
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
new file mode 100644
index 0000000..f48125f
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __TPM_TIS_CORE_H__
+#define __TPM_TIS_CORE_H__
+
+#include "tpm.h"
+
+enum tis_access {
+	TPM_ACCESS_VALID = 0x80,
+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+	TPM_ACCESS_REQUEST_PENDING = 0x04,
+	TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+	TPM_STS_VALID = 0x80,
+	TPM_STS_COMMAND_READY = 0x40,
+	TPM_STS_GO = 0x20,
+	TPM_STS_DATA_AVAIL = 0x10,
+	TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_int_flags {
+	TPM_GLOBAL_INT_ENABLE = 0x80000000,
+	TPM_INTF_BURST_COUNT_STATIC = 0x100,
+	TPM_INTF_CMD_READY_INT = 0x080,
+	TPM_INTF_INT_EDGE_FALLING = 0x040,
+	TPM_INTF_INT_EDGE_RISING = 0x020,
+	TPM_INTF_INT_LEVEL_LOW = 0x010,
+	TPM_INTF_INT_LEVEL_HIGH = 0x008,
+	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+	TPM_INTF_STS_VALID_INT = 0x002,
+	TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+	TIS_MEM_LEN = 0x5000,
+	TIS_SHORT_TIMEOUT = 750,	/* ms */
+	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
+};
+
+/* Some timeout values are needed before it is known whether the chip is
+ * TPM 1.0 or TPM 2.0.
+ */
+#define TIS_TIMEOUT_A_MAX	max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
+#define TIS_TIMEOUT_B_MAX	max_t(int, TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
+#define TIS_TIMEOUT_C_MAX	max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
+#define TIS_TIMEOUT_D_MAX	max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
+
+#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
+#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
+#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
+#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
+#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
+#define	TPM_STS(l)			(0x0018 | ((l) << 12))
+#define	TPM_STS3(l)			(0x001b | ((l) << 12))
+#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
+
+#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
+#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
+
+#define LPC_CNTRL_OFFSET		0x84
+#define LPC_CLKRUN_EN			(1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR	0xFED08000
+#define ILB_REMAP_SIZE			0x100
+
+enum tpm_tis_flags {
+	TPM_TIS_ITPM_WORKAROUND		= BIT(0),
+};
+
+struct tpm_tis_data {
+	u16 manufacturer_id;
+	int locality;
+	int irq;
+	bool irq_tested;
+	unsigned int flags;
+	void __iomem *ilb_base_addr;
+	u16 clkrun_enabled;
+	wait_queue_head_t int_queue;
+	wait_queue_head_t read_queue;
+	const struct tpm_tis_phy_ops *phy_ops;
+	unsigned short rng_quality;
+};
+
+struct tpm_tis_phy_ops {
+	int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+			  u8 *result);
+	int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+			   const u8 *value);
+	int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
+	int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
+	int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
+};
+
+static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
+				     u16 len, u8 *result)
+{
+	return data->phy_ops->read_bytes(data, addr, len, result);
+}
+
+static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result)
+{
+	return data->phy_ops->read_bytes(data, addr, 1, result);
+}
+
+static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr,
+				 u16 *result)
+{
+	return data->phy_ops->read16(data, addr, result);
+}
+
+static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
+				 u32 *result)
+{
+	return data->phy_ops->read32(data, addr, result);
+}
+
+static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
+				      u16 len, const u8 *value)
+{
+	return data->phy_ops->write_bytes(data, addr, len, value);
+}
+
+static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value)
+{
+	return data->phy_ops->write_bytes(data, addr, 1, &value);
+}
+
+static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
+				  u32 value)
+{
+	return data->phy_ops->write32(data, addr, value);
+}
+
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+	return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+	return false;
+#endif
+}
+
+void tpm_tis_remove(struct tpm_chip *chip);
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+		      const struct tpm_tis_phy_ops *phy_ops,
+		      acpi_handle acpi_dev_handle);
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_resume(struct device *dev);
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
new file mode 100644
index 0000000..9914f69
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ * Christophe Ricard <christophe-h.ricard@st.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
+ * SPI access_.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall and Jarko Sakkinnen.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/tpm.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+#define MAX_SPI_FRAMESIZE 64
+
+struct tpm_tis_spi_phy {
+	struct tpm_tis_data priv;
+	struct spi_device *spi_device;
+	u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+	return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+				u8 *in, const u8 *out)
+{
+	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+	int ret = 0;
+	int i;
+	struct spi_message m;
+	struct spi_transfer spi_xfer;
+	u8 transfer_len;
+
+	spi_bus_lock(phy->spi_device->master);
+
+	while (len) {
+		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+		phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+		phy->iobuf[1] = 0xd4;
+		phy->iobuf[2] = addr >> 8;
+		phy->iobuf[3] = addr;
+
+		memset(&spi_xfer, 0, sizeof(spi_xfer));
+		spi_xfer.tx_buf = phy->iobuf;
+		spi_xfer.rx_buf = phy->iobuf;
+		spi_xfer.len = 4;
+		spi_xfer.cs_change = 1;
+
+		spi_message_init(&m);
+		spi_message_add_tail(&spi_xfer, &m);
+		ret = spi_sync_locked(phy->spi_device, &m);
+		if (ret < 0)
+			goto exit;
+
+		if ((phy->iobuf[3] & 0x01) == 0) {
+			// handle SPI wait states
+			phy->iobuf[0] = 0;
+
+			for (i = 0; i < TPM_RETRY; i++) {
+				spi_xfer.len = 1;
+				spi_message_init(&m);
+				spi_message_add_tail(&spi_xfer, &m);
+				ret = spi_sync_locked(phy->spi_device, &m);
+				if (ret < 0)
+					goto exit;
+				if (phy->iobuf[0] & 0x01)
+					break;
+			}
+
+			if (i == TPM_RETRY) {
+				ret = -ETIMEDOUT;
+				goto exit;
+			}
+		}
+
+		spi_xfer.cs_change = 0;
+		spi_xfer.len = transfer_len;
+		spi_xfer.delay_usecs = 5;
+
+		if (in) {
+			spi_xfer.tx_buf = NULL;
+		} else if (out) {
+			spi_xfer.rx_buf = NULL;
+			memcpy(phy->iobuf, out, transfer_len);
+			out += transfer_len;
+		}
+
+		spi_message_init(&m);
+		spi_message_add_tail(&spi_xfer, &m);
+		ret = spi_sync_locked(phy->spi_device, &m);
+		if (ret < 0)
+			goto exit;
+
+		if (in) {
+			memcpy(in, phy->iobuf, transfer_len);
+			in += transfer_len;
+		}
+
+		len -= transfer_len;
+	}
+
+exit:
+	spi_bus_unlock(phy->spi_device->master);
+	return ret;
+}
+
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+				  u16 len, u8 *result)
+{
+	return tpm_tis_spi_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+				   u16 len, const u8 *value)
+{
+	return tpm_tis_spi_transfer(data, addr, len, NULL, value);
+}
+
+static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+{
+	__le16 result_le;
+	int rc;
+
+	rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+				       (u8 *)&result_le);
+	if (!rc)
+		*result = le16_to_cpu(result_le);
+
+	return rc;
+}
+
+static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+{
+	__le32 result_le;
+	int rc;
+
+	rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+				       (u8 *)&result_le);
+	if (!rc)
+		*result = le32_to_cpu(result_le);
+
+	return rc;
+}
+
+static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+{
+	__le32 value_le;
+	int rc;
+
+	value_le = cpu_to_le32(value);
+	rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+					(u8 *)&value_le);
+
+	return rc;
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
+	.read_bytes = tpm_tis_spi_read_bytes,
+	.write_bytes = tpm_tis_spi_write_bytes,
+	.read16 = tpm_tis_spi_read16,
+	.read32 = tpm_tis_spi_read32,
+	.write32 = tpm_tis_spi_write32,
+};
+
+static int tpm_tis_spi_probe(struct spi_device *dev)
+{
+	struct tpm_tis_spi_phy *phy;
+	int irq;
+
+	phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->spi_device = dev;
+
+	phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+	if (!phy->iobuf)
+		return -ENOMEM;
+
+	/* If the SPI device has an IRQ then use that */
+	if (dev->irq > 0)
+		irq = dev->irq;
+	else
+		irq = -1;
+
+	return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
+				 NULL);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_spi_remove(struct spi_device *dev)
+{
+	struct tpm_chip *chip = spi_get_drvdata(dev);
+
+	tpm_chip_unregister(chip);
+	tpm_tis_remove(chip);
+	return 0;
+}
+
+static const struct spi_device_id tpm_tis_spi_id[] = {
+	{"tpm_tis_spi", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
+
+static const struct of_device_id of_tis_spi_match[] = {
+	{ .compatible = "st,st33htpm-spi", },
+	{ .compatible = "infineon,slb9670", },
+	{ .compatible = "tcg,tpm_tis-spi", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_tis_spi_match);
+
+static const struct acpi_device_id acpi_tis_spi_match[] = {
+	{"SMO0768", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
+
+static struct spi_driver tpm_tis_spi_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "tpm_tis_spi",
+		.pm = &tpm_tis_pm,
+		.of_match_table = of_match_ptr(of_tis_spi_match),
+		.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+	},
+	.probe = tpm_tis_spi_probe,
+	.remove = tpm_tis_spi_remove,
+	.id_table = tpm_tis_spi_id,
+};
+module_spi_driver(tpm_tis_spi_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native SPI access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
new file mode 100644
index 0000000..87a0ce4
--- /dev/null
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Stefan Berger <stefanb@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for vTPM (vTPM proxy driver)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/vtpm_proxy.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/compat.h>
+
+#include "tpm.h"
+
+#define VTPM_PROXY_REQ_COMPLETE_FLAG  BIT(0)
+
+struct proxy_dev {
+	struct tpm_chip *chip;
+
+	u32 flags;                   /* public API flags */
+
+	wait_queue_head_t wq;
+
+	struct mutex buf_lock;       /* protect buffer and flags */
+
+	long state;                  /* internal state */
+#define STATE_OPENED_FLAG        BIT(0)
+#define STATE_WAIT_RESPONSE_FLAG BIT(1)  /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG	 BIT(2)
+#define STATE_DRIVER_COMMAND     BIT(3)  /* sending a driver specific command */
+
+	size_t req_len;              /* length of queued TPM request */
+	size_t resp_len;             /* length of queued TPM response */
+	u8 buffer[TPM_BUFSIZE];      /* request/response buffer */
+
+	struct work_struct work;     /* task that retrieves TPM timeouts */
+};
+
+/* all supported flags */
+#define VTPM_PROXY_FLAGS_ALL  (VTPM_PROXY_FLAG_TPM2)
+
+static struct workqueue_struct *workqueue;
+
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
+
+/*
+ * Functions related to 'server side'
+ */
+
+/**
+ * vtpm_proxy_fops_read - Read TPM commands on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @off: offset
+ *
+ * Return:
+ *	Number of bytes read or negative error code
+ */
+static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
+				    size_t count, loff_t *off)
+{
+	struct proxy_dev *proxy_dev = filp->private_data;
+	size_t len;
+	int sig, rc;
+
+	sig = wait_event_interruptible(proxy_dev->wq,
+		proxy_dev->req_len != 0 ||
+		!(proxy_dev->state & STATE_OPENED_FLAG));
+	if (sig)
+		return -EINTR;
+
+	mutex_lock(&proxy_dev->buf_lock);
+
+	if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EPIPE;
+	}
+
+	len = proxy_dev->req_len;
+
+	if (count < len) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
+			 count, len);
+		return -EIO;
+	}
+
+	rc = copy_to_user(buf, proxy_dev->buffer, len);
+	memset(proxy_dev->buffer, 0, len);
+	proxy_dev->req_len = 0;
+
+	if (!rc)
+		proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG;
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	if (rc)
+		return -EFAULT;
+
+	return len;
+}
+
+/**
+ * vtpm_proxy_fops_write - Write TPM responses on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: write buffer
+ * @count: number of bytes to write
+ * @off: offset
+ *
+ * Return:
+ *	Number of bytes read or negative error value
+ */
+static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
+				     size_t count, loff_t *off)
+{
+	struct proxy_dev *proxy_dev = filp->private_data;
+
+	mutex_lock(&proxy_dev->buf_lock);
+
+	if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EPIPE;
+	}
+
+	if (count > sizeof(proxy_dev->buffer) ||
+	    !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EIO;
+	}
+
+	proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+	proxy_dev->req_len = 0;
+
+	if (copy_from_user(proxy_dev->buffer, buf, count)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EFAULT;
+	}
+
+	proxy_dev->resp_len = count;
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	wake_up_interruptible(&proxy_dev->wq);
+
+	return count;
+}
+
+/*
+ * vtpm_proxy_fops_poll - Poll status on 'server side'
+ *
+ * @filp: file pointer
+ * @wait: poll table
+ *
+ * Return: Poll flags
+ */
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+{
+	struct proxy_dev *proxy_dev = filp->private_data;
+	__poll_t ret;
+
+	poll_wait(filp, &proxy_dev->wq, wait);
+
+	ret = EPOLLOUT;
+
+	mutex_lock(&proxy_dev->buf_lock);
+
+	if (proxy_dev->req_len)
+		ret |= EPOLLIN | EPOLLRDNORM;
+
+	if (!(proxy_dev->state & STATE_OPENED_FLAG))
+		ret |= EPOLLHUP;
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	return ret;
+}
+
+/*
+ * vtpm_proxy_fops_open - Open vTPM device on 'server side'
+ *
+ * @filp: file pointer
+ *
+ * Called when setting up the anonymous file descriptor
+ */
+static void vtpm_proxy_fops_open(struct file *filp)
+{
+	struct proxy_dev *proxy_dev = filp->private_data;
+
+	proxy_dev->state |= STATE_OPENED_FLAG;
+}
+
+/**
+ * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open
+ *       Call to undo vtpm_proxy_fops_open
+ *
+ *@proxy_dev: tpm proxy device
+ */
+static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
+{
+	mutex_lock(&proxy_dev->buf_lock);
+
+	proxy_dev->state &= ~STATE_OPENED_FLAG;
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	/* no more TPM responses -- wake up anyone waiting for them */
+	wake_up_interruptible(&proxy_dev->wq);
+}
+
+/*
+ * vtpm_proxy_fops_release - Close 'server side'
+ *
+ * @inode: inode
+ * @filp: file pointer
+ * Return:
+ *      Always returns 0.
+ */
+static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
+{
+	struct proxy_dev *proxy_dev = filp->private_data;
+
+	filp->private_data = NULL;
+
+	vtpm_proxy_delete_device(proxy_dev);
+
+	return 0;
+}
+
+static const struct file_operations vtpm_proxy_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read = vtpm_proxy_fops_read,
+	.write = vtpm_proxy_fops_write,
+	.poll = vtpm_proxy_fops_poll,
+	.release = vtpm_proxy_fops_release,
+};
+
+/*
+ * Functions invoked by the core TPM driver to send TPM commands to
+ * 'server side' and receive responses from there.
+ */
+
+/*
+ * Called when core TPM driver reads TPM responses from 'server side'
+ *
+ * @chip: tpm chip to use
+ * @buf: receive buffer
+ * @count: bytes to read
+ * Return:
+ *      Number of TPM response bytes read, negative error value otherwise
+ */
+static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+	size_t len;
+
+	/* process gone ? */
+	mutex_lock(&proxy_dev->buf_lock);
+
+	if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EPIPE;
+	}
+
+	len = proxy_dev->resp_len;
+	if (count < len) {
+		dev_err(&chip->dev,
+			"Invalid size in recv: count=%zd, resp_len=%zd\n",
+			count, len);
+		len = -EIO;
+		goto out;
+	}
+
+	memcpy(buf, proxy_dev->buffer, len);
+	proxy_dev->resp_len = 0;
+
+out:
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	return len;
+}
+
+static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
+					u8 *buf, size_t count)
+{
+	struct tpm_input_header *hdr = (struct tpm_input_header *)buf;
+
+	if (count < sizeof(struct tpm_input_header))
+		return 0;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		switch (be32_to_cpu(hdr->ordinal)) {
+		case TPM2_CC_SET_LOCALITY:
+			return 1;
+		}
+	} else {
+		switch (be32_to_cpu(hdr->ordinal)) {
+		case TPM_ORD_SET_LOCALITY:
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Called when core TPM driver forwards TPM requests to 'server side'.
+ *
+ * @chip: tpm chip to use
+ * @buf: send buffer
+ * @count: bytes to send
+ *
+ * Return:
+ *      0 in case of success, negative error value otherwise.
+ */
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+	int rc = 0;
+
+	if (count > sizeof(proxy_dev->buffer)) {
+		dev_err(&chip->dev,
+			"Invalid size in send: count=%zd, buffer size=%zd\n",
+			count, sizeof(proxy_dev->buffer));
+		return -EIO;
+	}
+
+	if (!(proxy_dev->state & STATE_DRIVER_COMMAND) &&
+	    vtpm_proxy_is_driver_command(chip, buf, count))
+		return -EFAULT;
+
+	mutex_lock(&proxy_dev->buf_lock);
+
+	if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+		mutex_unlock(&proxy_dev->buf_lock);
+		return -EPIPE;
+	}
+
+	proxy_dev->resp_len = 0;
+
+	proxy_dev->req_len = count;
+	memcpy(proxy_dev->buffer, buf, count);
+
+	proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	wake_up_interruptible(&proxy_dev->wq);
+
+	return rc;
+}
+
+static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
+{
+	/* not supported */
+}
+
+static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip)
+{
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+	if (proxy_dev->resp_len)
+		return VTPM_PROXY_REQ_COMPLETE_FLAG;
+
+	return 0;
+}
+
+static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip  *chip, u8 status)
+{
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+	bool ret;
+
+	mutex_lock(&proxy_dev->buf_lock);
+
+	ret = !(proxy_dev->state & STATE_OPENED_FLAG);
+
+	mutex_unlock(&proxy_dev->buf_lock);
+
+	return ret;
+}
+
+static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
+{
+	struct tpm_buf buf;
+	int rc;
+	const struct tpm_output_header *header;
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS,
+				  TPM2_CC_SET_LOCALITY);
+	else
+		rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND,
+				  TPM_ORD_SET_LOCALITY);
+	if (rc)
+		return rc;
+	tpm_buf_append_u8(&buf, locality);
+
+	proxy_dev->state |= STATE_DRIVER_COMMAND;
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
+			      TPM_TRANSMIT_NESTED,
+			      "attempting to set locality");
+
+	proxy_dev->state &= ~STATE_DRIVER_COMMAND;
+
+	if (rc < 0) {
+		locality = rc;
+		goto out;
+	}
+
+	header = (const struct tpm_output_header *)buf.data;
+	rc = be32_to_cpu(header->return_code);
+	if (rc)
+		locality = -1;
+
+out:
+	tpm_buf_destroy(&buf);
+
+	return locality;
+}
+
+static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
+	.flags = TPM_OPS_AUTO_STARTUP,
+	.recv = vtpm_proxy_tpm_op_recv,
+	.send = vtpm_proxy_tpm_op_send,
+	.cancel = vtpm_proxy_tpm_op_cancel,
+	.status = vtpm_proxy_tpm_op_status,
+	.req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
+	.req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
+	.req_canceled = vtpm_proxy_tpm_req_canceled,
+	.request_locality = vtpm_proxy_request_locality,
+};
+
+/*
+ * Code related to the startup of the TPM 2 and startup of TPM 1.2 +
+ * retrieval of timeouts and durations.
+ */
+
+static void vtpm_proxy_work(struct work_struct *work)
+{
+	struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
+						   work);
+	int rc;
+
+	rc = tpm_chip_register(proxy_dev->chip);
+	if (rc)
+		vtpm_proxy_fops_undo_open(proxy_dev);
+	else
+		proxy_dev->state |= STATE_REGISTERED_FLAG;
+}
+
+/*
+ * vtpm_proxy_work_stop: make sure the work has finished
+ *
+ * This function is useful when user space closed the fd
+ * while the driver still determines timeouts.
+ */
+static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
+{
+	vtpm_proxy_fops_undo_open(proxy_dev);
+	flush_work(&proxy_dev->work);
+}
+
+/*
+ * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization
+ */
+static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev)
+{
+	queue_work(workqueue, &proxy_dev->work);
+}
+
+/*
+ * Code related to creation and deletion of device pairs
+ */
+static struct proxy_dev *vtpm_proxy_create_proxy_dev(void)
+{
+	struct proxy_dev *proxy_dev;
+	struct tpm_chip *chip;
+	int err;
+
+	proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL);
+	if (proxy_dev == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	init_waitqueue_head(&proxy_dev->wq);
+	mutex_init(&proxy_dev->buf_lock);
+	INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
+
+	chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops);
+	if (IS_ERR(chip)) {
+		err = PTR_ERR(chip);
+		goto err_proxy_dev_free;
+	}
+	dev_set_drvdata(&chip->dev, proxy_dev);
+
+	proxy_dev->chip = chip;
+
+	return proxy_dev;
+
+err_proxy_dev_free:
+	kfree(proxy_dev);
+
+	return ERR_PTR(err);
+}
+
+/*
+ * Undo what has been done in vtpm_create_proxy_dev
+ */
+static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
+{
+	put_device(&proxy_dev->chip->dev); /* frees chip */
+	kfree(proxy_dev);
+}
+
+/*
+ * Create a /dev/tpm%d and 'server side' file descriptor pair
+ *
+ * Return:
+ *      Returns file pointer on success, an error value otherwise
+ */
+static struct file *vtpm_proxy_create_device(
+				 struct vtpm_proxy_new_dev *vtpm_new_dev)
+{
+	struct proxy_dev *proxy_dev;
+	int rc, fd;
+	struct file *file;
+
+	if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	proxy_dev = vtpm_proxy_create_proxy_dev();
+	if (IS_ERR(proxy_dev))
+		return ERR_CAST(proxy_dev);
+
+	proxy_dev->flags = vtpm_new_dev->flags;
+
+	/* setup an anonymous file for the server-side */
+	fd = get_unused_fd_flags(O_RDWR);
+	if (fd < 0) {
+		rc = fd;
+		goto err_delete_proxy_dev;
+	}
+
+	file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev,
+				  O_RDWR);
+	if (IS_ERR(file)) {
+		rc = PTR_ERR(file);
+		goto err_put_unused_fd;
+	}
+
+	/* from now on we can unwind with put_unused_fd() + fput() */
+	/* simulate an open() on the server side */
+	vtpm_proxy_fops_open(file);
+
+	if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2)
+		proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+	vtpm_proxy_work_start(proxy_dev);
+
+	vtpm_new_dev->fd = fd;
+	vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt);
+	vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt);
+	vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num;
+
+	return file;
+
+err_put_unused_fd:
+	put_unused_fd(fd);
+
+err_delete_proxy_dev:
+	vtpm_proxy_delete_proxy_dev(proxy_dev);
+
+	return ERR_PTR(rc);
+}
+
+/*
+ * Counter part to vtpm_create_device.
+ */
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
+{
+	vtpm_proxy_work_stop(proxy_dev);
+
+	/*
+	 * A client may hold the 'ops' lock, so let it know that the server
+	 * side shuts down before we try to grab the 'ops' lock when
+	 * unregistering the chip.
+	 */
+	vtpm_proxy_fops_undo_open(proxy_dev);
+
+	if (proxy_dev->state & STATE_REGISTERED_FLAG)
+		tpm_chip_unregister(proxy_dev->chip);
+
+	vtpm_proxy_delete_proxy_dev(proxy_dev);
+}
+
+/*
+ * Code related to the control device /dev/vtpmx
+ */
+
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file:	/dev/vtpmx
+ * @ioctl:	the ioctl number
+ * @arg:	pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+			      unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+	struct vtpm_proxy_new_dev vtpm_new_dev;
+	struct file *vtpm_file;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	vtpm_new_dev_p = argp;
+
+	if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+			   sizeof(vtpm_new_dev)))
+		return -EFAULT;
+
+	vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+	if (IS_ERR(vtpm_file))
+		return PTR_ERR(vtpm_file);
+
+	if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+			 sizeof(vtpm_new_dev))) {
+		put_unused_fd(vtpm_new_dev.fd);
+		fput(vtpm_file);
+		return -EFAULT;
+	}
+
+	fd_install(vtpm_new_dev.fd, vtpm_file);
+	return 0;
+}
+
+/*
+ * vtpmx_fops_ioctl: ioctl on /dev/vtpmx
+ *
+ * Return:
+ *      Returns 0 on success, a negative error code otherwise.
+ */
+static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
+			     unsigned long arg)
+{
+	switch (ioctl) {
+	case VTPM_PROXY_IOC_NEW_DEV:
+		return vtpmx_ioc_new_dev(f, ioctl, arg);
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl,
+					  unsigned long arg)
+{
+	return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations vtpmx_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = vtpmx_fops_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = vtpmx_fops_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice vtpmx_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "vtpmx",
+	.fops = &vtpmx_fops,
+};
+
+static int vtpmx_init(void)
+{
+	return misc_register(&vtpmx_miscdev);
+}
+
+static void vtpmx_cleanup(void)
+{
+	misc_deregister(&vtpmx_miscdev);
+}
+
+static int __init vtpm_module_init(void)
+{
+	int rc;
+
+	rc = vtpmx_init();
+	if (rc) {
+		pr_err("couldn't create vtpmx device\n");
+		return rc;
+	}
+
+	workqueue = create_workqueue("tpm-vtpm");
+	if (!workqueue) {
+		pr_err("couldn't create workqueue\n");
+		rc = -ENOMEM;
+		goto err_vtpmx_cleanup;
+	}
+
+	return 0;
+
+err_vtpmx_cleanup:
+	vtpmx_cleanup();
+
+	return rc;
+}
+
+static void __exit vtpm_module_exit(void)
+{
+	destroy_workqueue(workqueue);
+	vtpmx_cleanup();
+}
+
+module_init(vtpm_module_init);
+module_exit(vtpm_module_exit);
+
+MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
+MODULE_DESCRIPTION("vTPM Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
new file mode 100644
index 0000000..1a0e97a
--- /dev/null
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2017 James.Bottomley@HansenPartnership.com
+ *
+ * GPLv2
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+struct tpmrm_priv {
+	struct file_priv priv;
+	struct tpm_space space;
+};
+
+static int tpmrm_open(struct inode *inode, struct file *file)
+{
+	struct tpm_chip *chip;
+	struct tpmrm_priv *priv;
+	int rc;
+
+	chip = container_of(inode->i_cdev, struct tpm_chip, cdevs);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+
+	rc = tpm2_init_space(&priv->space);
+	if (rc) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	tpm_common_open(file, chip, &priv->priv);
+
+	return 0;
+}
+
+static int tpmrm_release(struct inode *inode, struct file *file)
+{
+	struct file_priv *fpriv = file->private_data;
+	struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
+
+	tpm_common_release(file, fpriv);
+	tpm2_del_space(fpriv->chip, &priv->space);
+	kfree(priv);
+
+	return 0;
+}
+
+static ssize_t tpmrm_write(struct file *file, const char __user *buf,
+		   size_t size, loff_t *off)
+{
+	struct file_priv *fpriv = file->private_data;
+	struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
+
+	return tpm_common_write(file, buf, size, off, &priv->space);
+}
+
+const struct file_operations tpmrm_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.open = tpmrm_open,
+	.read = tpm_common_read,
+	.write = tpmrm_write,
+	.release = tpmrm_release,
+};
+
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
new file mode 100644
index 0000000..b150f87
--- /dev/null
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -0,0 +1,457 @@
+/*
+ * Implementation of the Xen vTPM device frontend
+ *
+ * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/freezer.h>
+#include <xen/xen.h>
+#include <xen/events.h>
+#include <xen/interface/io/tpmif.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+#include <xen/page.h>
+#include "tpm.h"
+#include <xen/platform_pci.h>
+
+struct tpm_private {
+	struct tpm_chip *chip;
+	struct xenbus_device *dev;
+
+	struct vtpm_shared_page *shr;
+
+	unsigned int evtchn;
+	int ring_ref;
+	domid_t backend_id;
+	int irq;
+	wait_queue_head_t read_queue;
+};
+
+enum status_bits {
+	VTPM_STATUS_RUNNING  = 0x1,
+	VTPM_STATUS_IDLE     = 0x2,
+	VTPM_STATUS_RESULT   = 0x4,
+	VTPM_STATUS_CANCELED = 0x8,
+};
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+					bool check_cancel, bool *canceled)
+{
+	u8 status = chip->ops->status(chip);
+
+	*canceled = false;
+	if ((status & mask) == mask)
+		return true;
+	if (check_cancel && chip->ops->req_canceled(chip, status)) {
+		*canceled = true;
+		return true;
+	}
+	return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+		unsigned long timeout, wait_queue_head_t *queue,
+		bool check_cancel)
+{
+	unsigned long stop;
+	long rc;
+	u8 status;
+	bool canceled = false;
+
+	/* check current status */
+	status = chip->ops->status(chip);
+	if ((status & mask) == mask)
+		return 0;
+
+	stop = jiffies + timeout;
+
+	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+		timeout = stop - jiffies;
+		if ((long)timeout <= 0)
+			return -ETIME;
+		rc = wait_event_interruptible_timeout(*queue,
+			wait_for_tpm_stat_cond(chip, mask, check_cancel,
+					       &canceled),
+			timeout);
+		if (rc > 0) {
+			if (canceled)
+				return -ECANCELED;
+			return 0;
+		}
+		if (rc == -ERESTARTSYS && freezing(current)) {
+			clear_thread_flag(TIF_SIGPENDING);
+			goto again;
+		}
+	} else {
+		do {
+			tpm_msleep(TPM_TIMEOUT);
+			status = chip->ops->status(chip);
+			if ((status & mask) == mask)
+				return 0;
+		} while (time_before(jiffies, stop));
+	}
+	return -ETIME;
+}
+
+static u8 vtpm_status(struct tpm_chip *chip)
+{
+	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+	switch (priv->shr->state) {
+	case VTPM_STATE_IDLE:
+		return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
+	case VTPM_STATE_FINISH:
+		return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
+	case VTPM_STATE_SUBMIT:
+	case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
+		return VTPM_STATUS_RUNNING;
+	default:
+		return 0;
+	}
+}
+
+static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+	return status & VTPM_STATUS_CANCELED;
+}
+
+static void vtpm_cancel(struct tpm_chip *chip)
+{
+	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+	priv->shr->state = VTPM_STATE_CANCEL;
+	wmb();
+	notify_remote_via_evtchn(priv->evtchn);
+}
+
+static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
+{
+	return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
+}
+
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+	struct vtpm_shared_page *shr = priv->shr;
+	unsigned int offset = shr_data_offset(shr);
+
+	u32 ordinal;
+	unsigned long duration;
+
+	if (offset > PAGE_SIZE)
+		return -EINVAL;
+
+	if (offset + count > PAGE_SIZE)
+		return -EINVAL;
+
+	/* Wait for completion of any existing command or cancellation */
+	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
+			&priv->read_queue, true) < 0) {
+		vtpm_cancel(chip);
+		return -ETIME;
+	}
+
+	memcpy(offset + (u8 *)shr, buf, count);
+	shr->length = count;
+	barrier();
+	shr->state = VTPM_STATE_SUBMIT;
+	wmb();
+	notify_remote_via_evtchn(priv->evtchn);
+
+	ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
+	duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
+			&priv->read_queue, true) < 0) {
+		/* got a signal or timeout, try to cancel */
+		vtpm_cancel(chip);
+		return -ETIME;
+	}
+
+	return count;
+}
+
+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+	struct vtpm_shared_page *shr = priv->shr;
+	unsigned int offset = shr_data_offset(shr);
+	size_t length = shr->length;
+
+	if (shr->state == VTPM_STATE_IDLE)
+		return -ECANCELED;
+
+	/* In theory the wait at the end of _send makes this one unnecessary */
+	if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
+			&priv->read_queue, true) < 0) {
+		vtpm_cancel(chip);
+		return -ETIME;
+	}
+
+	if (offset > PAGE_SIZE)
+		return -EIO;
+
+	if (offset + length > PAGE_SIZE)
+		length = PAGE_SIZE - offset;
+
+	if (length > count)
+		length = count;
+
+	memcpy(buf, offset + (u8 *)shr, length);
+
+	return length;
+}
+
+static const struct tpm_class_ops tpm_vtpm = {
+	.status = vtpm_status,
+	.recv = vtpm_recv,
+	.send = vtpm_send,
+	.cancel = vtpm_cancel,
+	.req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+	.req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+	.req_canceled      = vtpm_req_canceled,
+};
+
+static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
+{
+	struct tpm_private *priv = dev_id;
+
+	switch (priv->shr->state) {
+	case VTPM_STATE_IDLE:
+	case VTPM_STATE_FINISH:
+		wake_up_interruptible(&priv->read_queue);
+		break;
+	case VTPM_STATE_SUBMIT:
+	case VTPM_STATE_CANCEL:
+	default:
+		break;
+	}
+	return IRQ_HANDLED;
+}
+
+static int setup_chip(struct device *dev, struct tpm_private *priv)
+{
+	struct tpm_chip *chip;
+
+	chip = tpmm_chip_alloc(dev, &tpm_vtpm);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	init_waitqueue_head(&priv->read_queue);
+
+	priv->chip = chip;
+	dev_set_drvdata(&chip->dev, priv);
+
+	return 0;
+}
+
+/* caller must clean up in case of errors */
+static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
+{
+	struct xenbus_transaction xbt;
+	const char *message = NULL;
+	int rv;
+	grant_ref_t gref;
+
+	priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+	if (!priv->shr) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+		return -ENOMEM;
+	}
+
+	rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
+	if (rv < 0)
+		return rv;
+
+	priv->ring_ref = gref;
+
+	rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
+	if (rv)
+		return rv;
+
+	rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
+				       "tpmif", priv);
+	if (rv <= 0) {
+		xenbus_dev_fatal(dev, rv, "allocating TPM irq");
+		return rv;
+	}
+	priv->irq = rv;
+
+ again:
+	rv = xenbus_transaction_start(&xbt);
+	if (rv) {
+		xenbus_dev_fatal(dev, rv, "starting transaction");
+		return rv;
+	}
+
+	rv = xenbus_printf(xbt, dev->nodename,
+			"ring-ref", "%u", priv->ring_ref);
+	if (rv) {
+		message = "writing ring-ref";
+		goto abort_transaction;
+	}
+
+	rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+			priv->evtchn);
+	if (rv) {
+		message = "writing event-channel";
+		goto abort_transaction;
+	}
+
+	rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
+	if (rv) {
+		message = "writing feature-protocol-v2";
+		goto abort_transaction;
+	}
+
+	rv = xenbus_transaction_end(xbt, 0);
+	if (rv == -EAGAIN)
+		goto again;
+	if (rv) {
+		xenbus_dev_fatal(dev, rv, "completing transaction");
+		return rv;
+	}
+
+	xenbus_switch_state(dev, XenbusStateInitialised);
+
+	return 0;
+
+ abort_transaction:
+	xenbus_transaction_end(xbt, 1);
+	if (message)
+		xenbus_dev_error(dev, rv, "%s", message);
+
+	return rv;
+}
+
+static void ring_free(struct tpm_private *priv)
+{
+	if (!priv)
+		return;
+
+	if (priv->ring_ref)
+		gnttab_end_foreign_access(priv->ring_ref, 0,
+				(unsigned long)priv->shr);
+	else
+		free_page((unsigned long)priv->shr);
+
+	if (priv->irq)
+		unbind_from_irqhandler(priv->irq, priv);
+
+	kfree(priv);
+}
+
+static int tpmfront_probe(struct xenbus_device *dev,
+		const struct xenbus_device_id *id)
+{
+	struct tpm_private *priv;
+	int rv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
+		return -ENOMEM;
+	}
+
+	rv = setup_chip(&dev->dev, priv);
+	if (rv) {
+		kfree(priv);
+		return rv;
+	}
+
+	rv = setup_ring(dev, priv);
+	if (rv) {
+		ring_free(priv);
+		return rv;
+	}
+
+	tpm_get_timeouts(priv->chip);
+
+	return tpm_chip_register(priv->chip);
+}
+
+static int tpmfront_remove(struct xenbus_device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+	tpm_chip_unregister(chip);
+	ring_free(priv);
+	dev_set_drvdata(&chip->dev, NULL);
+	return 0;
+}
+
+static int tpmfront_resume(struct xenbus_device *dev)
+{
+	/* A suspend/resume/migrate will interrupt a vTPM anyway */
+	tpmfront_remove(dev);
+	return tpmfront_probe(dev, NULL);
+}
+
+static void backend_changed(struct xenbus_device *dev,
+		enum xenbus_state backend_state)
+{
+	switch (backend_state) {
+	case XenbusStateInitialised:
+	case XenbusStateConnected:
+		if (dev->state == XenbusStateConnected)
+			break;
+
+		if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+					  0)) {
+			xenbus_dev_fatal(dev, -EINVAL,
+					"vTPM protocol 2 required");
+			return;
+		}
+		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosing:
+	case XenbusStateClosed:
+		device_unregister(&dev->dev);
+		xenbus_frontend_closed(dev);
+		break;
+	default:
+		break;
+	}
+}
+
+static const struct xenbus_device_id tpmfront_ids[] = {
+	{ "vtpm" },
+	{ "" }
+};
+MODULE_ALIAS("xen:vtpm");
+
+static struct xenbus_driver tpmfront_driver = {
+	.ids = tpmfront_ids,
+	.probe = tpmfront_probe,
+	.remove = tpmfront_remove,
+	.resume = tpmfront_resume,
+	.otherend_changed = backend_changed,
+};
+
+static int __init xen_tpmfront_init(void)
+{
+	if (!xen_domain())
+		return -ENODEV;
+
+	if (!xen_has_pv_devices())
+		return -ENODEV;
+
+	return xenbus_register_frontend(&tpmfront_driver);
+}
+module_init(xen_tpmfront_init);
+
+static void __exit xen_tpmfront_exit(void)
+{
+	xenbus_unregister_driver(&tpmfront_driver);
+}
+module_exit(xen_tpmfront_exit);
+
+MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("Xen vTPM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
new file mode 100644
index 0000000..67549ce
--- /dev/null
+++ b/drivers/char/ttyprintk.c
@@ -0,0 +1,221 @@
+/*
+ *  linux/drivers/char/ttyprintk.c
+ *
+ *  Copyright (C) 2010  Samo Pogacnik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+/*
+ * This pseudo device allows user to make printk messages. It is possible
+ * to store "console" messages inline with kernel messages for better analyses
+ * of the boot process, for example.
+ */
+
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+
+struct ttyprintk_port {
+	struct tty_port port;
+	struct mutex port_write_mutex;
+};
+
+static struct ttyprintk_port tpk_port;
+
+/*
+ * Our simple preformatting supports transparent output of (time-stamped)
+ * printk messages (also suitable for logging service):
+ * - any cr is replaced by nl
+ * - adds a ttyprintk source tag in front of each line
+ * - too long message is fragmented, with '\'nl between fragments
+ * - TPK_STR_SIZE isn't really the write_room limiting factor, because
+ *   it is emptied on the fly during preformatting.
+ */
+#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
+#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
+static int tpk_curr;
+
+static char tpk_buffer[TPK_STR_SIZE + 4];
+
+static void tpk_flush(void)
+{
+	if (tpk_curr > 0) {
+		tpk_buffer[tpk_curr] = '\0';
+		pr_info("[U] %s\n", tpk_buffer);
+		tpk_curr = 0;
+	}
+}
+
+static int tpk_printk(const unsigned char *buf, int count)
+{
+	int i = tpk_curr;
+
+	if (buf == NULL) {
+		tpk_flush();
+		return i;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (tpk_curr >= TPK_STR_SIZE) {
+			/* end of tmp buffer reached: cut the message in two */
+			tpk_buffer[tpk_curr++] = '\\';
+			tpk_flush();
+		}
+
+		switch (buf[i]) {
+		case '\r':
+			tpk_flush();
+			if ((i + 1) < count && buf[i + 1] == '\n')
+				i++;
+			break;
+		case '\n':
+			tpk_flush();
+			break;
+		default:
+			tpk_buffer[tpk_curr++] = buf[i];
+			break;
+		}
+	}
+
+	return count;
+}
+
+/*
+ * TTY operations open function.
+ */
+static int tpk_open(struct tty_struct *tty, struct file *filp)
+{
+	tty->driver_data = &tpk_port;
+
+	return tty_port_open(&tpk_port.port, tty, filp);
+}
+
+/*
+ * TTY operations close function.
+ */
+static void tpk_close(struct tty_struct *tty, struct file *filp)
+{
+	struct ttyprintk_port *tpkp = tty->driver_data;
+
+	mutex_lock(&tpkp->port_write_mutex);
+	/* flush tpk_printk buffer */
+	tpk_printk(NULL, 0);
+	mutex_unlock(&tpkp->port_write_mutex);
+
+	tty_port_close(&tpkp->port, tty, filp);
+}
+
+/*
+ * TTY operations write function.
+ */
+static int tpk_write(struct tty_struct *tty,
+		const unsigned char *buf, int count)
+{
+	struct ttyprintk_port *tpkp = tty->driver_data;
+	int ret;
+
+
+	/* exclusive use of tpk_printk within this tty */
+	mutex_lock(&tpkp->port_write_mutex);
+	ret = tpk_printk(buf, count);
+	mutex_unlock(&tpkp->port_write_mutex);
+
+	return ret;
+}
+
+/*
+ * TTY operations write_room function.
+ */
+static int tpk_write_room(struct tty_struct *tty)
+{
+	return TPK_MAX_ROOM;
+}
+
+/*
+ * TTY operations ioctl function.
+ */
+static int tpk_ioctl(struct tty_struct *tty,
+			unsigned int cmd, unsigned long arg)
+{
+	struct ttyprintk_port *tpkp = tty->driver_data;
+
+	if (!tpkp)
+		return -EINVAL;
+
+	switch (cmd) {
+	/* Stop TIOCCONS */
+	case TIOCCONS:
+		return -EOPNOTSUPP;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+
+static const struct tty_operations ttyprintk_ops = {
+	.open = tpk_open,
+	.close = tpk_close,
+	.write = tpk_write,
+	.write_room = tpk_write_room,
+	.ioctl = tpk_ioctl,
+};
+
+static const struct tty_port_operations null_ops = { };
+
+static struct tty_driver *ttyprintk_driver;
+
+static int __init ttyprintk_init(void)
+{
+	int ret = -ENOMEM;
+
+	mutex_init(&tpk_port.port_write_mutex);
+
+	ttyprintk_driver = tty_alloc_driver(1,
+			TTY_DRIVER_RESET_TERMIOS |
+			TTY_DRIVER_REAL_RAW |
+			TTY_DRIVER_UNNUMBERED_NODE);
+	if (IS_ERR(ttyprintk_driver))
+		return PTR_ERR(ttyprintk_driver);
+
+	tty_port_init(&tpk_port.port);
+	tpk_port.port.ops = &null_ops;
+
+	ttyprintk_driver->driver_name = "ttyprintk";
+	ttyprintk_driver->name = "ttyprintk";
+	ttyprintk_driver->major = TTYAUX_MAJOR;
+	ttyprintk_driver->minor_start = 3;
+	ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
+	ttyprintk_driver->init_termios = tty_std_termios;
+	ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
+	tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
+	tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0);
+
+	ret = tty_register_driver(ttyprintk_driver);
+	if (ret < 0) {
+		printk(KERN_ERR "Couldn't register ttyprintk driver\n");
+		goto error;
+	}
+
+	return 0;
+
+error:
+	put_tty_driver(ttyprintk_driver);
+	tty_port_destroy(&tpk_port.port);
+	return ret;
+}
+
+static void __exit ttyprintk_exit(void)
+{
+	tty_unregister_driver(ttyprintk_driver);
+	put_tty_driver(ttyprintk_driver);
+	tty_port_destroy(&tpk_port.port);
+}
+
+device_initcall(ttyprintk_init);
+module_exit(ttyprintk_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
new file mode 100644
index 0000000..956ebe2
--- /dev/null
+++ b/drivers/char/uv_mmtimer.c
@@ -0,0 +1,220 @@
+/*
+ * Timer device implementation for SGI UV platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2009 Silicon Graphics, Inc.  All rights reserved.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mmtimer.h>
+#include <linux/miscdevice.h>
+#include <linux/posix-timers.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#include <asm/genapic.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+
+MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>");
+MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer");
+MODULE_LICENSE("GPL");
+
+/* name of the device, usually in /dev */
+#define UV_MMTIMER_NAME "mmtimer"
+#define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer"
+#define UV_MMTIMER_VERSION "1.0"
+
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg);
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
+
+/*
+ * Period in femtoseconds (10^-15 s)
+ */
+static unsigned long uv_mmtimer_femtoperiod;
+
+static const struct file_operations uv_mmtimer_fops = {
+	.owner = THIS_MODULE,
+	.mmap =	uv_mmtimer_mmap,
+	.unlocked_ioctl = uv_mmtimer_ioctl,
+	.llseek = noop_llseek,
+};
+
+/**
+ * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer
+ * @file: file structure for the device
+ * @cmd: command to execute
+ * @arg: optional argument to command
+ *
+ * Executes the command specified by @cmd.  Returns 0 for success, < 0 for
+ * failure.
+ *
+ * Valid commands:
+ *
+ * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
+ * of the page where the registers are mapped) for the counter in question.
+ *
+ * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
+ * seconds
+ *
+ * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
+ * specified by @arg
+ *
+ * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
+ *
+ * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace
+ *
+ * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
+ * in the address specified by @arg.
+ */
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case MMTIMER_GETOFFSET:	/* offset of the counter */
+		/*
+		 * Starting with HUB rev 2.0, the UV RTC register is
+		 * replicated across all cachelines of it's own page.
+		 * This allows faster simultaneous reads from a given socket.
+		 *
+		 * The offset returned is in 64 bit units.
+		 */
+		if (uv_get_min_hub_revision_id() == 1)
+			ret = 0;
+		else
+			ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) %
+					PAGE_SIZE) / 8;
+		break;
+
+	case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
+		if (copy_to_user((unsigned long __user *)arg,
+				&uv_mmtimer_femtoperiod, sizeof(unsigned long)))
+			ret = -EFAULT;
+		break;
+
+	case MMTIMER_GETFREQ: /* frequency in Hz */
+		if (copy_to_user((unsigned long __user *)arg,
+				&sn_rtc_cycles_per_second,
+				sizeof(unsigned long)))
+			ret = -EFAULT;
+		break;
+
+	case MMTIMER_GETBITS: /* number of bits in the clock */
+		ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
+		break;
+
+	case MMTIMER_MMAPAVAIL:
+		ret = 1;
+		break;
+
+	case MMTIMER_GETCOUNTER:
+		if (copy_to_user((unsigned long __user *)arg,
+				(unsigned long *)uv_local_mmr_address(UVH_RTC),
+				sizeof(unsigned long)))
+			ret = -EFAULT;
+		break;
+	default:
+		ret = -ENOTTY;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * uv_mmtimer_mmap - maps the clock's registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * Calls remap_pfn_range() to map the clock's registers into
+ * the calling process' address space.
+ */
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long uv_mmtimer_addr;
+
+	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+		return -EINVAL;
+
+	if (vma->vm_flags & VM_WRITE)
+		return -EPERM;
+
+	if (PAGE_SIZE > (1 << 16))
+		return -ENOSYS;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC;
+	uv_mmtimer_addr &= ~(PAGE_SIZE - 1);
+	uv_mmtimer_addr &= 0xfffffffffffffffUL;
+
+	if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT,
+					PAGE_SIZE, vma->vm_page_prot)) {
+		printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n");
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static struct miscdevice uv_mmtimer_miscdev = {
+	MISC_DYNAMIC_MINOR,
+	UV_MMTIMER_NAME,
+	&uv_mmtimer_fops
+};
+
+
+/**
+ * uv_mmtimer_init - device initialization routine
+ *
+ * Does initial setup for the uv_mmtimer device.
+ */
+static int __init uv_mmtimer_init(void)
+{
+	if (!is_uv_system()) {
+		printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME);
+		return -1;
+	}
+
+	/*
+	 * Sanity check the cycles/sec variable
+	 */
+	if (sn_rtc_cycles_per_second < 100000) {
+		printk(KERN_ERR "%s: unable to determine clock frequency\n",
+		       UV_MMTIMER_NAME);
+		return -1;
+	}
+
+	uv_mmtimer_femtoperiod = ((unsigned long)1E15 +
+				sn_rtc_cycles_per_second / 2) /
+				sn_rtc_cycles_per_second;
+
+	if (misc_register(&uv_mmtimer_miscdev)) {
+		printk(KERN_ERR "%s: failed to register device\n",
+		       UV_MMTIMER_NAME);
+		return -1;
+	}
+
+	printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC,
+		UV_MMTIMER_VERSION,
+		sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+	return 0;
+}
+
+module_init(uv_mmtimer_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
new file mode 100644
index 0000000..5b5b5d7
--- /dev/null
+++ b/drivers/char/virtio_console.c
@@ -0,0 +1,2312 @@
+/*
+ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/splice.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_console.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include "../tty/hvc/hvc_console.h"
+
+#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+
+/*
+ * This is a global struct for storing common data for all the devices
+ * this driver handles.
+ *
+ * Mainly, it has a linked list for all the consoles in one place so
+ * that callbacks from hvc for get_chars(), put_chars() work properly
+ * across multiple devices and multiple ports per device.
+ */
+struct ports_driver_data {
+	/* Used for registering chardevs */
+	struct class *class;
+
+	/* Used for exporting per-port information to debugfs */
+	struct dentry *debugfs_dir;
+
+	/* List of all the devices we're handling */
+	struct list_head portdevs;
+
+	/*
+	 * This is used to keep track of the number of hvc consoles
+	 * spawned by this driver.  This number is given as the first
+	 * argument to hvc_alloc().  To correctly map an initial
+	 * console spawned via hvc_instantiate to the console being
+	 * hooked up via hvc_alloc, we need to pass the same vtermno.
+	 *
+	 * We also just assume the first console being initialised was
+	 * the first one that got used as the initial console.
+	 */
+	unsigned int next_vtermno;
+
+	/* All the console devices handled by this driver */
+	struct list_head consoles;
+};
+static struct ports_driver_data pdrvdata;
+
+static DEFINE_SPINLOCK(pdrvdata_lock);
+static DECLARE_COMPLETION(early_console_added);
+
+/* This struct holds information that's relevant only for console ports */
+struct console {
+	/* We'll place all consoles in a list in the pdrvdata struct */
+	struct list_head list;
+
+	/* The hvc device associated with this console port */
+	struct hvc_struct *hvc;
+
+	/* The size of the console */
+	struct winsize ws;
+
+	/*
+	 * This number identifies the number that we used to register
+	 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
+	 * number passed on by the hvc callbacks to us to
+	 * differentiate between the other console ports handled by
+	 * this driver
+	 */
+	u32 vtermno;
+};
+
+struct port_buffer {
+	char *buf;
+
+	/* size of the buffer in *buf above */
+	size_t size;
+
+	/* used length of the buffer */
+	size_t len;
+	/* offset in the buf from which to consume data */
+	size_t offset;
+
+	/* DMA address of buffer */
+	dma_addr_t dma;
+
+	/* Device we got DMA memory from */
+	struct device *dev;
+
+	/* List of pending dma buffers to free */
+	struct list_head list;
+
+	/* If sgpages == 0 then buf is used */
+	unsigned int sgpages;
+
+	/* sg is used if spages > 0. sg must be the last in is struct */
+	struct scatterlist sg[0];
+};
+
+/*
+ * This is a per-device struct that stores data common to all the
+ * ports for that device (vdev->priv).
+ */
+struct ports_device {
+	/* Next portdev in the list, head is in the pdrvdata struct */
+	struct list_head list;
+
+	/*
+	 * Workqueue handlers where we process deferred work after
+	 * notification
+	 */
+	struct work_struct control_work;
+	struct work_struct config_work;
+
+	struct list_head ports;
+
+	/* To protect the list of ports */
+	spinlock_t ports_lock;
+
+	/* To protect the vq operations for the control channel */
+	spinlock_t c_ivq_lock;
+	spinlock_t c_ovq_lock;
+
+	/* max. number of ports this device can hold */
+	u32 max_nr_ports;
+
+	/* The virtio device we're associated with */
+	struct virtio_device *vdev;
+
+	/*
+	 * A couple of virtqueues for the control channel: one for
+	 * guest->host transfers, one for host->guest transfers
+	 */
+	struct virtqueue *c_ivq, *c_ovq;
+
+	/*
+	 * A control packet buffer for guest->host requests, protected
+	 * by c_ovq_lock.
+	 */
+	struct virtio_console_control cpkt;
+
+	/* Array of per-port IO virtqueues */
+	struct virtqueue **in_vqs, **out_vqs;
+
+	/* Major number for this device.  Ports will be created as minors. */
+	int chr_major;
+};
+
+struct port_stats {
+	unsigned long bytes_sent, bytes_received, bytes_discarded;
+};
+
+/* This struct holds the per-port data */
+struct port {
+	/* Next port in the list, head is in the ports_device */
+	struct list_head list;
+
+	/* Pointer to the parent virtio_console device */
+	struct ports_device *portdev;
+
+	/* The current buffer from which data has to be fed to readers */
+	struct port_buffer *inbuf;
+
+	/*
+	 * To protect the operations on the in_vq associated with this
+	 * port.  Has to be a spinlock because it can be called from
+	 * interrupt context (get_char()).
+	 */
+	spinlock_t inbuf_lock;
+
+	/* Protect the operations on the out_vq. */
+	spinlock_t outvq_lock;
+
+	/* The IO vqs for this port */
+	struct virtqueue *in_vq, *out_vq;
+
+	/* File in the debugfs directory that exposes this port's information */
+	struct dentry *debugfs_file;
+
+	/*
+	 * Keep count of the bytes sent, received and discarded for
+	 * this port for accounting and debugging purposes.  These
+	 * counts are not reset across port open / close events.
+	 */
+	struct port_stats stats;
+
+	/*
+	 * The entries in this struct will be valid if this port is
+	 * hooked up to an hvc console
+	 */
+	struct console cons;
+
+	/* Each port associates with a separate char device */
+	struct cdev *cdev;
+	struct device *dev;
+
+	/* Reference-counting to handle port hot-unplugs and file operations */
+	struct kref kref;
+
+	/* A waitqueue for poll() or blocking read operations */
+	wait_queue_head_t waitqueue;
+
+	/* The 'name' of the port that we expose via sysfs properties */
+	char *name;
+
+	/* We can notify apps of host connect / disconnect events via SIGIO */
+	struct fasync_struct *async_queue;
+
+	/* The 'id' to identify the port with the Host */
+	u32 id;
+
+	bool outvq_full;
+
+	/* Is the host device open */
+	bool host_connected;
+
+	/* We should allow only one process to open a port */
+	bool guest_connected;
+};
+
+/* This is the very early arch-specified put chars function. */
+static int (*early_put_chars)(u32, const char *, int);
+
+static struct port *find_port_by_vtermno(u32 vtermno)
+{
+	struct port *port;
+	struct console *cons;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdrvdata_lock, flags);
+	list_for_each_entry(cons, &pdrvdata.consoles, list) {
+		if (cons->vtermno == vtermno) {
+			port = container_of(cons, struct port, cons);
+			goto out;
+		}
+	}
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&pdrvdata_lock, flags);
+	return port;
+}
+
+static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+						 dev_t dev)
+{
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdev->ports_lock, flags);
+	list_for_each_entry(port, &portdev->ports, list) {
+		if (port->cdev->dev == dev) {
+			kref_get(&port->kref);
+			goto out;
+		}
+	}
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+	return port;
+}
+
+static struct port *find_port_by_devt(dev_t dev)
+{
+	struct ports_device *portdev;
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdrvdata_lock, flags);
+	list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
+		port = find_port_by_devt_in_portdev(portdev, dev);
+		if (port)
+			goto out;
+	}
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&pdrvdata_lock, flags);
+	return port;
+}
+
+static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
+{
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdev->ports_lock, flags);
+	list_for_each_entry(port, &portdev->ports, list)
+		if (port->id == id)
+			goto out;
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+	return port;
+}
+
+static struct port *find_port_by_vq(struct ports_device *portdev,
+				    struct virtqueue *vq)
+{
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdev->ports_lock, flags);
+	list_for_each_entry(port, &portdev->ports, list)
+		if (port->in_vq == vq || port->out_vq == vq)
+			goto out;
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&portdev->ports_lock, flags);
+	return port;
+}
+
+static bool is_console_port(struct port *port)
+{
+	if (port->cons.hvc)
+		return true;
+	return false;
+}
+
+static bool is_rproc_serial(const struct virtio_device *vdev)
+{
+	return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
+}
+
+static inline bool use_multiport(struct ports_device *portdev)
+{
+	/*
+	 * This condition can be true when put_chars is called from
+	 * early_init
+	 */
+	if (!portdev->vdev)
+		return false;
+	return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
+}
+
+static DEFINE_SPINLOCK(dma_bufs_lock);
+static LIST_HEAD(pending_free_dma_bufs);
+
+static void free_buf(struct port_buffer *buf, bool can_sleep)
+{
+	unsigned int i;
+
+	for (i = 0; i < buf->sgpages; i++) {
+		struct page *page = sg_page(&buf->sg[i]);
+		if (!page)
+			break;
+		put_page(page);
+	}
+
+	if (!buf->dev) {
+		kfree(buf->buf);
+	} else if (is_rproc_enabled) {
+		unsigned long flags;
+
+		/* dma_free_coherent requires interrupts to be enabled. */
+		if (!can_sleep) {
+			/* queue up dma-buffers to be freed later */
+			spin_lock_irqsave(&dma_bufs_lock, flags);
+			list_add_tail(&buf->list, &pending_free_dma_bufs);
+			spin_unlock_irqrestore(&dma_bufs_lock, flags);
+			return;
+		}
+		dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
+
+		/* Release device refcnt and allow it to be freed */
+		put_device(buf->dev);
+	}
+
+	kfree(buf);
+}
+
+static void reclaim_dma_bufs(void)
+{
+	unsigned long flags;
+	struct port_buffer *buf, *tmp;
+	LIST_HEAD(tmp_list);
+
+	if (list_empty(&pending_free_dma_bufs))
+		return;
+
+	/* Create a copy of the pending_free_dma_bufs while holding the lock */
+	spin_lock_irqsave(&dma_bufs_lock, flags);
+	list_cut_position(&tmp_list, &pending_free_dma_bufs,
+			  pending_free_dma_bufs.prev);
+	spin_unlock_irqrestore(&dma_bufs_lock, flags);
+
+	/* Release the dma buffers, without irqs enabled */
+	list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
+		list_del(&buf->list);
+		free_buf(buf, true);
+	}
+}
+
+static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
+				     int pages)
+{
+	struct port_buffer *buf;
+
+	reclaim_dma_bufs();
+
+	/*
+	 * Allocate buffer and the sg list. The sg list array is allocated
+	 * directly after the port_buffer struct.
+	 */
+	buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL);
+	if (!buf)
+		goto fail;
+
+	buf->sgpages = pages;
+	if (pages > 0) {
+		buf->dev = NULL;
+		buf->buf = NULL;
+		return buf;
+	}
+
+	if (is_rproc_serial(vdev)) {
+		/*
+		 * Allocate DMA memory from ancestor. When a virtio
+		 * device is created by remoteproc, the DMA memory is
+		 * associated with the grandparent device:
+		 * vdev => rproc => platform-dev.
+		 */
+		if (!vdev->dev.parent || !vdev->dev.parent->parent)
+			goto free_buf;
+		buf->dev = vdev->dev.parent->parent;
+
+		/* Increase device refcnt to avoid freeing it */
+		get_device(buf->dev);
+		buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
+					      GFP_KERNEL);
+	} else {
+		buf->dev = NULL;
+		buf->buf = kmalloc(buf_size, GFP_KERNEL);
+	}
+
+	if (!buf->buf)
+		goto free_buf;
+	buf->len = 0;
+	buf->offset = 0;
+	buf->size = buf_size;
+	return buf;
+
+free_buf:
+	kfree(buf);
+fail:
+	return NULL;
+}
+
+/* Callers should take appropriate locks */
+static struct port_buffer *get_inbuf(struct port *port)
+{
+	struct port_buffer *buf;
+	unsigned int len;
+
+	if (port->inbuf)
+		return port->inbuf;
+
+	buf = virtqueue_get_buf(port->in_vq, &len);
+	if (buf) {
+		buf->len = len;
+		buf->offset = 0;
+		port->stats.bytes_received += len;
+	}
+	return buf;
+}
+
+/*
+ * Create a scatter-gather list representing our input buffer and put
+ * it in the queue.
+ *
+ * Callers should take appropriate locks.
+ */
+static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
+{
+	struct scatterlist sg[1];
+	int ret;
+
+	sg_init_one(sg, buf->buf, buf->size);
+
+	ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
+	virtqueue_kick(vq);
+	if (!ret)
+		ret = vq->num_free;
+	return ret;
+}
+
+/* Discard any unread data this port has. Callers lockers. */
+static void discard_port_data(struct port *port)
+{
+	struct port_buffer *buf;
+	unsigned int err;
+
+	if (!port->portdev) {
+		/* Device has been unplugged.  vqs are already gone. */
+		return;
+	}
+	buf = get_inbuf(port);
+
+	err = 0;
+	while (buf) {
+		port->stats.bytes_discarded += buf->len - buf->offset;
+		if (add_inbuf(port->in_vq, buf) < 0) {
+			err++;
+			free_buf(buf, false);
+		}
+		port->inbuf = NULL;
+		buf = get_inbuf(port);
+	}
+	if (err)
+		dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
+			 err);
+}
+
+static bool port_has_data(struct port *port)
+{
+	unsigned long flags;
+	bool ret;
+
+	ret = false;
+	spin_lock_irqsave(&port->inbuf_lock, flags);
+	port->inbuf = get_inbuf(port);
+	if (port->inbuf)
+		ret = true;
+
+	spin_unlock_irqrestore(&port->inbuf_lock, flags);
+	return ret;
+}
+
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+				  unsigned int event, unsigned int value)
+{
+	struct scatterlist sg[1];
+	struct virtqueue *vq;
+	unsigned int len;
+
+	if (!use_multiport(portdev))
+		return 0;
+
+	vq = portdev->c_ovq;
+
+	spin_lock(&portdev->c_ovq_lock);
+
+	portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+	portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+	portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+	sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+	if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
+		virtqueue_kick(vq);
+		while (!virtqueue_get_buf(vq, &len)
+			&& !virtqueue_is_broken(vq))
+			cpu_relax();
+	}
+
+	spin_unlock(&portdev->c_ovq_lock);
+	return 0;
+}
+
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+				unsigned int value)
+{
+	/* Did the port get unplugged before userspace closed it? */
+	if (port->portdev)
+		return __send_control_msg(port->portdev, port->id, event, value);
+	return 0;
+}
+
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+	struct port_buffer *buf;
+	unsigned int len;
+
+	if (!port->portdev) {
+		/* Device has been unplugged.  vqs are already gone. */
+		return;
+	}
+	while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+		free_buf(buf, false);
+		port->outvq_full = false;
+	}
+}
+
+static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
+			      int nents, size_t in_count,
+			      void *data, bool nonblock)
+{
+	struct virtqueue *out_vq;
+	int err;
+	unsigned long flags;
+	unsigned int len;
+
+	out_vq = port->out_vq;
+
+	spin_lock_irqsave(&port->outvq_lock, flags);
+
+	reclaim_consumed_buffers(port);
+
+	err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
+
+	/* Tell Host to go! */
+	virtqueue_kick(out_vq);
+
+	if (err) {
+		in_count = 0;
+		goto done;
+	}
+
+	if (out_vq->num_free == 0)
+		port->outvq_full = true;
+
+	if (nonblock)
+		goto done;
+
+	/*
+	 * Wait till the host acknowledges it pushed out the data we
+	 * sent.  This is done for data from the hvc_console; the tty
+	 * operations are performed with spinlocks held so we can't
+	 * sleep here.  An alternative would be to copy the data to a
+	 * buffer and relax the spinning requirement.  The downside is
+	 * we need to kmalloc a GFP_ATOMIC buffer each time the
+	 * console driver writes something out.
+	 */
+	while (!virtqueue_get_buf(out_vq, &len)
+		&& !virtqueue_is_broken(out_vq))
+		cpu_relax();
+done:
+	spin_unlock_irqrestore(&port->outvq_lock, flags);
+
+	port->stats.bytes_sent += in_count;
+	/*
+	 * We're expected to return the amount of data we wrote -- all
+	 * of it
+	 */
+	return in_count;
+}
+
+/*
+ * Give out the data that's requested from the buffer that we have
+ * queued up.
+ */
+static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+			    size_t out_count, bool to_user)
+{
+	struct port_buffer *buf;
+	unsigned long flags;
+
+	if (!out_count || !port_has_data(port))
+		return 0;
+
+	buf = port->inbuf;
+	out_count = min(out_count, buf->len - buf->offset);
+
+	if (to_user) {
+		ssize_t ret;
+
+		ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+		if (ret)
+			return -EFAULT;
+	} else {
+		memcpy((__force char *)out_buf, buf->buf + buf->offset,
+		       out_count);
+	}
+
+	buf->offset += out_count;
+
+	if (buf->offset == buf->len) {
+		/*
+		 * We're done using all the data in this buffer.
+		 * Re-queue so that the Host can send us more data.
+		 */
+		spin_lock_irqsave(&port->inbuf_lock, flags);
+		port->inbuf = NULL;
+
+		if (add_inbuf(port->in_vq, buf) < 0)
+			dev_warn(port->dev, "failed add_buf\n");
+
+		spin_unlock_irqrestore(&port->inbuf_lock, flags);
+	}
+	/* Return the number of bytes actually copied */
+	return out_count;
+}
+
+/* The condition that must be true for polling to end */
+static bool will_read_block(struct port *port)
+{
+	if (!port->guest_connected) {
+		/* Port got hot-unplugged. Let's exit. */
+		return false;
+	}
+	return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
+{
+	bool ret;
+
+	if (!port->guest_connected) {
+		/* Port got hot-unplugged. Let's exit. */
+		return false;
+	}
+	if (!port->host_connected)
+		return true;
+
+	spin_lock_irq(&port->outvq_lock);
+	/*
+	 * Check if the Host has consumed any buffers since we last
+	 * sent data (this is only applicable for nonblocking ports).
+	 */
+	reclaim_consumed_buffers(port);
+	ret = port->outvq_full;
+	spin_unlock_irq(&port->outvq_lock);
+
+	return ret;
+}
+
+static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+			      size_t count, loff_t *offp)
+{
+	struct port *port;
+	ssize_t ret;
+
+	port = filp->private_data;
+
+	/* Port is hot-unplugged. */
+	if (!port->guest_connected)
+		return -ENODEV;
+
+	if (!port_has_data(port)) {
+		/*
+		 * If nothing's connected on the host just return 0 in
+		 * case of list_empty; this tells the userspace app
+		 * that there's no connection
+		 */
+		if (!port->host_connected)
+			return 0;
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		ret = wait_event_freezable(port->waitqueue,
+					   !will_read_block(port));
+		if (ret < 0)
+			return ret;
+	}
+	/* Port got hot-unplugged while we were waiting above. */
+	if (!port->guest_connected)
+		return -ENODEV;
+	/*
+	 * We could've received a disconnection message while we were
+	 * waiting for more data.
+	 *
+	 * This check is not clubbed in the if() statement above as we
+	 * might receive some data as well as the host could get
+	 * disconnected after we got woken up from our wait.  So we
+	 * really want to give off whatever data we have and only then
+	 * check for host_connected.
+	 */
+	if (!port_has_data(port) && !port->host_connected)
+		return 0;
+
+	return fill_readbuf(port, ubuf, count, true);
+}
+
+static int wait_port_writable(struct port *port, bool nonblock)
+{
+	int ret;
+
+	if (will_write_block(port)) {
+		if (nonblock)
+			return -EAGAIN;
+
+		ret = wait_event_freezable(port->waitqueue,
+					   !will_write_block(port));
+		if (ret < 0)
+			return ret;
+	}
+	/* Port got hot-unplugged. */
+	if (!port->guest_connected)
+		return -ENODEV;
+
+	return 0;
+}
+
+static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+			       size_t count, loff_t *offp)
+{
+	struct port *port;
+	struct port_buffer *buf;
+	ssize_t ret;
+	bool nonblock;
+	struct scatterlist sg[1];
+
+	/* Userspace could be out to fool us */
+	if (!count)
+		return 0;
+
+	port = filp->private_data;
+
+	nonblock = filp->f_flags & O_NONBLOCK;
+
+	ret = wait_port_writable(port, nonblock);
+	if (ret < 0)
+		return ret;
+
+	count = min((size_t)(32 * 1024), count);
+
+	buf = alloc_buf(port->portdev->vdev, count, 0);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = copy_from_user(buf->buf, ubuf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	/*
+	 * We now ask send_buf() to not spin for generic ports -- we
+	 * can re-use the same code path that non-blocking file
+	 * descriptors take for blocking file descriptors since the
+	 * wait is already done and we're certain the write will go
+	 * through to the host.
+	 */
+	nonblock = true;
+	sg_init_one(sg, buf->buf, count);
+	ret = __send_to_port(port, sg, 1, count, buf, nonblock);
+
+	if (nonblock && ret > 0)
+		goto out;
+
+free_buf:
+	free_buf(buf, true);
+out:
+	return ret;
+}
+
+struct sg_list {
+	unsigned int n;
+	unsigned int size;
+	size_t len;
+	struct scatterlist *sg;
+};
+
+static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+			struct splice_desc *sd)
+{
+	struct sg_list *sgl = sd->u.data;
+	unsigned int offset, len;
+
+	if (sgl->n == sgl->size)
+		return 0;
+
+	/* Try lock this page */
+	if (pipe_buf_steal(pipe, buf) == 0) {
+		/* Get reference and unlock page for moving */
+		get_page(buf->page);
+		unlock_page(buf->page);
+
+		len = min(buf->len, sd->len);
+		sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
+	} else {
+		/* Failback to copying a page */
+		struct page *page = alloc_page(GFP_KERNEL);
+		char *src;
+
+		if (!page)
+			return -ENOMEM;
+
+		offset = sd->pos & ~PAGE_MASK;
+
+		len = sd->len;
+		if (len + offset > PAGE_SIZE)
+			len = PAGE_SIZE - offset;
+
+		src = kmap_atomic(buf->page);
+		memcpy(page_address(page) + offset, src + buf->offset, len);
+		kunmap_atomic(src);
+
+		sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
+	}
+	sgl->n++;
+	sgl->len += len;
+
+	return len;
+}
+
+/* Faster zero-copy write by splicing */
+static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
+				      struct file *filp, loff_t *ppos,
+				      size_t len, unsigned int flags)
+{
+	struct port *port = filp->private_data;
+	struct sg_list sgl;
+	ssize_t ret;
+	struct port_buffer *buf;
+	struct splice_desc sd = {
+		.total_len = len,
+		.flags = flags,
+		.pos = *ppos,
+		.u.data = &sgl,
+	};
+
+	/*
+	 * Rproc_serial does not yet support splice. To support splice
+	 * pipe_to_sg() must allocate dma-buffers and copy content from
+	 * regular pages to dma pages. And alloc_buf and free_buf must
+	 * support allocating and freeing such a list of dma-buffers.
+	 */
+	if (is_rproc_serial(port->out_vq->vdev))
+		return -EINVAL;
+
+	/*
+	 * pipe->nrbufs == 0 means there are no data to transfer,
+	 * so this returns just 0 for no data.
+	 */
+	pipe_lock(pipe);
+	if (!pipe->nrbufs) {
+		ret = 0;
+		goto error_out;
+	}
+
+	ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
+	if (ret < 0)
+		goto error_out;
+
+	buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto error_out;
+	}
+
+	sgl.n = 0;
+	sgl.len = 0;
+	sgl.size = pipe->nrbufs;
+	sgl.sg = buf->sg;
+	sg_init_table(sgl.sg, sgl.size);
+	ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+	pipe_unlock(pipe);
+	if (likely(ret > 0))
+		ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
+
+	if (unlikely(ret <= 0))
+		free_buf(buf, true);
+	return ret;
+
+error_out:
+	pipe_unlock(pipe);
+	return ret;
+}
+
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
+{
+	struct port *port;
+	__poll_t ret;
+
+	port = filp->private_data;
+	poll_wait(filp, &port->waitqueue, wait);
+
+	if (!port->guest_connected) {
+		/* Port got unplugged */
+		return EPOLLHUP;
+	}
+	ret = 0;
+	if (!will_read_block(port))
+		ret |= EPOLLIN | EPOLLRDNORM;
+	if (!will_write_block(port))
+		ret |= EPOLLOUT;
+	if (!port->host_connected)
+		ret |= EPOLLHUP;
+
+	return ret;
+}
+
+static void remove_port(struct kref *kref);
+
+static int port_fops_release(struct inode *inode, struct file *filp)
+{
+	struct port *port;
+
+	port = filp->private_data;
+
+	/* Notify host of port being closed */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+	spin_lock_irq(&port->inbuf_lock);
+	port->guest_connected = false;
+
+	discard_port_data(port);
+
+	spin_unlock_irq(&port->inbuf_lock);
+
+	spin_lock_irq(&port->outvq_lock);
+	reclaim_consumed_buffers(port);
+	spin_unlock_irq(&port->outvq_lock);
+
+	reclaim_dma_bufs();
+	/*
+	 * Locks aren't necessary here as a port can't be opened after
+	 * unplug, and if a port isn't unplugged, a kref would already
+	 * exist for the port.  Plus, taking ports_lock here would
+	 * create a dependency on other locks taken by functions
+	 * inside remove_port if we're the last holder of the port,
+	 * creating many problems.
+	 */
+	kref_put(&port->kref, remove_port);
+
+	return 0;
+}
+
+static int port_fops_open(struct inode *inode, struct file *filp)
+{
+	struct cdev *cdev = inode->i_cdev;
+	struct port *port;
+	int ret;
+
+	/* We get the port with a kref here */
+	port = find_port_by_devt(cdev->dev);
+	if (!port) {
+		/* Port was unplugged before we could proceed */
+		return -ENXIO;
+	}
+	filp->private_data = port;
+
+	/*
+	 * Don't allow opening of console port devices -- that's done
+	 * via /dev/hvc
+	 */
+	if (is_console_port(port)) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	/* Allow only one process to open a particular port at a time */
+	spin_lock_irq(&port->inbuf_lock);
+	if (port->guest_connected) {
+		spin_unlock_irq(&port->inbuf_lock);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	port->guest_connected = true;
+	spin_unlock_irq(&port->inbuf_lock);
+
+	spin_lock_irq(&port->outvq_lock);
+	/*
+	 * There might be a chance that we missed reclaiming a few
+	 * buffers in the window of the port getting previously closed
+	 * and opening now.
+	 */
+	reclaim_consumed_buffers(port);
+	spin_unlock_irq(&port->outvq_lock);
+
+	nonseekable_open(inode, filp);
+
+	/* Notify host of port being opened */
+	send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+	return 0;
+out:
+	kref_put(&port->kref, remove_port);
+	return ret;
+}
+
+static int port_fops_fasync(int fd, struct file *filp, int mode)
+{
+	struct port *port;
+
+	port = filp->private_data;
+	return fasync_helper(fd, filp, mode, &port->async_queue);
+}
+
+/*
+ * The file operations that we support: programs in the guest can open
+ * a console device, read from it, write to it, poll for data and
+ * close it.  The devices are at
+ *   /dev/vport<device number>p<port number>
+ */
+static const struct file_operations port_fops = {
+	.owner = THIS_MODULE,
+	.open  = port_fops_open,
+	.read  = port_fops_read,
+	.write = port_fops_write,
+	.splice_write = port_fops_splice_write,
+	.poll  = port_fops_poll,
+	.release = port_fops_release,
+	.fasync = port_fops_fasync,
+	.llseek = no_llseek,
+};
+
+/*
+ * The put_chars() callback is pretty straightforward.
+ *
+ * We turn the characters into a scatter-gather list, add it to the
+ * output queue and then kick the Host.  Then we sit here waiting for
+ * it to finish: inefficient in theory, but in practice
+ * implementations will do it immediately.
+ */
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+	struct port *port;
+	struct scatterlist sg[1];
+	void *data;
+	int ret;
+
+	if (unlikely(early_put_chars))
+		return early_put_chars(vtermno, buf, count);
+
+	port = find_port_by_vtermno(vtermno);
+	if (!port)
+		return -EPIPE;
+
+	data = kmemdup(buf, count, GFP_ATOMIC);
+	if (!data)
+		return -ENOMEM;
+
+	sg_init_one(sg, data, count);
+	ret = __send_to_port(port, sg, 1, count, data, false);
+	kfree(data);
+	return ret;
+}
+
+/*
+ * get_chars() is the callback from the hvc_console infrastructure
+ * when an interrupt is received.
+ *
+ * We call out to fill_readbuf that gets us the required data from the
+ * buffers that are queued up.
+ */
+static int get_chars(u32 vtermno, char *buf, int count)
+{
+	struct port *port;
+
+	/* If we've not set up the port yet, we have no input to give. */
+	if (unlikely(early_put_chars))
+		return 0;
+
+	port = find_port_by_vtermno(vtermno);
+	if (!port)
+		return -EPIPE;
+
+	/* If we don't have an input queue yet, we can't get input. */
+	BUG_ON(!port->in_vq);
+
+	return fill_readbuf(port, (__force char __user *)buf, count, false);
+}
+
+static void resize_console(struct port *port)
+{
+	struct virtio_device *vdev;
+
+	/* The port could have been hot-unplugged */
+	if (!port || !is_console_port(port))
+		return;
+
+	vdev = port->portdev->vdev;
+
+	/* Don't test F_SIZE at all if we're rproc: not a valid feature! */
+	if (!is_rproc_serial(vdev) &&
+	    virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+		hvc_resize(port->cons.hvc, port->cons.ws);
+}
+
+/* We set the configuration at this point, since we now have a tty */
+static int notifier_add_vio(struct hvc_struct *hp, int data)
+{
+	struct port *port;
+
+	port = find_port_by_vtermno(hp->vtermno);
+	if (!port)
+		return -EINVAL;
+
+	hp->irq_requested = 1;
+	resize_console(port);
+
+	return 0;
+}
+
+static void notifier_del_vio(struct hvc_struct *hp, int data)
+{
+	hp->irq_requested = 0;
+}
+
+/* The operations for console ports. */
+static const struct hv_ops hv_ops = {
+	.get_chars = get_chars,
+	.put_chars = put_chars,
+	.notifier_add = notifier_add_vio,
+	.notifier_del = notifier_del_vio,
+	.notifier_hangup = notifier_del_vio,
+};
+
+/*
+ * Console drivers are initialized very early so boot messages can go
+ * out, so we do things slightly differently from the generic virtio
+ * initialization of the net and block drivers.
+ *
+ * At this stage, the console is output-only.  It's too early to set
+ * up a virtqueue, so we let the drivers do some boutique early-output
+ * thing.
+ */
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
+{
+	early_put_chars = put_chars;
+	return hvc_instantiate(0, 0, &hv_ops);
+}
+
+static int init_port_console(struct port *port)
+{
+	int ret;
+
+	/*
+	 * The Host's telling us this port is a console port.  Hook it
+	 * up with an hvc console.
+	 *
+	 * To set up and manage our virtual console, we call
+	 * hvc_alloc().
+	 *
+	 * The first argument of hvc_alloc() is the virtual console
+	 * number.  The second argument is the parameter for the
+	 * notification mechanism (like irq number).  We currently
+	 * leave this as zero, virtqueues have implicit notifications.
+	 *
+	 * The third argument is a "struct hv_ops" containing the
+	 * put_chars() get_chars(), notifier_add() and notifier_del()
+	 * pointers.  The final argument is the output buffer size: we
+	 * can do any size, so we put PAGE_SIZE here.
+	 */
+	port->cons.vtermno = pdrvdata.next_vtermno;
+
+	port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
+	if (IS_ERR(port->cons.hvc)) {
+		ret = PTR_ERR(port->cons.hvc);
+		dev_err(port->dev,
+			"error %d allocating hvc for port\n", ret);
+		port->cons.hvc = NULL;
+		return ret;
+	}
+	spin_lock_irq(&pdrvdata_lock);
+	pdrvdata.next_vtermno++;
+	list_add_tail(&port->cons.list, &pdrvdata.consoles);
+	spin_unlock_irq(&pdrvdata_lock);
+	port->guest_connected = true;
+
+	/*
+	 * Start using the new console output if this is the first
+	 * console to come up.
+	 */
+	if (early_put_chars)
+		early_put_chars = NULL;
+
+	/* Notify host of port being opened */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+	return 0;
+}
+
+static ssize_t show_port_name(struct device *dev,
+			      struct device_attribute *attr, char *buffer)
+{
+	struct port *port;
+
+	port = dev_get_drvdata(dev);
+
+	return sprintf(buffer, "%s\n", port->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
+
+static struct attribute *port_sysfs_entries[] = {
+	&dev_attr_name.attr,
+	NULL
+};
+
+static const struct attribute_group port_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = port_sysfs_entries,
+};
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	struct port *port = s->private;
+
+	seq_printf(s, "name: %s\n", port->name ? port->name : "");
+	seq_printf(s, "guest_connected: %d\n", port->guest_connected);
+	seq_printf(s, "host_connected: %d\n", port->host_connected);
+	seq_printf(s, "outvq_full: %d\n", port->outvq_full);
+	seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
+	seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
+	seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
+	seq_printf(s, "is_console: %s\n",
+		   is_console_port(port) ? "yes" : "no");
+	seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
+
+	return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations port_debugfs_ops = {
+	.owner = THIS_MODULE,
+	.open = debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+	if (!port || !is_console_port(port))
+		return;
+
+	port->cons.ws.ws_row = rows;
+	port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+	struct port_buffer *buf;
+	unsigned int nr_added_bufs;
+	int ret;
+
+	nr_added_bufs = 0;
+	do {
+		buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
+		if (!buf)
+			break;
+
+		spin_lock_irq(lock);
+		ret = add_inbuf(vq, buf);
+		if (ret < 0) {
+			spin_unlock_irq(lock);
+			free_buf(buf, true);
+			break;
+		}
+		nr_added_bufs++;
+		spin_unlock_irq(lock);
+	} while (ret > 0);
+
+	return nr_added_bufs;
+}
+
+static void send_sigio_to_port(struct port *port)
+{
+	if (port->async_queue && port->guest_connected)
+		kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+	char debugfs_name[16];
+	struct port *port;
+	dev_t devt;
+	unsigned int nr_added_bufs;
+	int err;
+
+	port = kmalloc(sizeof(*port), GFP_KERNEL);
+	if (!port) {
+		err = -ENOMEM;
+		goto fail;
+	}
+	kref_init(&port->kref);
+
+	port->portdev = portdev;
+	port->id = id;
+
+	port->name = NULL;
+	port->inbuf = NULL;
+	port->cons.hvc = NULL;
+	port->async_queue = NULL;
+
+	port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+	port->host_connected = port->guest_connected = false;
+	port->stats = (struct port_stats) { 0 };
+
+	port->outvq_full = false;
+
+	port->in_vq = portdev->in_vqs[port->id];
+	port->out_vq = portdev->out_vqs[port->id];
+
+	port->cdev = cdev_alloc();
+	if (!port->cdev) {
+		dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
+		err = -ENOMEM;
+		goto free_port;
+	}
+	port->cdev->ops = &port_fops;
+
+	devt = MKDEV(portdev->chr_major, id);
+	err = cdev_add(port->cdev, devt, 1);
+	if (err < 0) {
+		dev_err(&port->portdev->vdev->dev,
+			"Error %d adding cdev for port %u\n", err, id);
+		goto free_cdev;
+	}
+	port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+				  devt, port, "vport%up%u",
+				  port->portdev->vdev->index, id);
+	if (IS_ERR(port->dev)) {
+		err = PTR_ERR(port->dev);
+		dev_err(&port->portdev->vdev->dev,
+			"Error %d creating device for port %u\n",
+			err, id);
+		goto free_cdev;
+	}
+
+	spin_lock_init(&port->inbuf_lock);
+	spin_lock_init(&port->outvq_lock);
+	init_waitqueue_head(&port->waitqueue);
+
+	/* Fill the in_vq with buffers so the host can send us data. */
+	nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+	if (!nr_added_bufs) {
+		dev_err(port->dev, "Error allocating inbufs\n");
+		err = -ENOMEM;
+		goto free_device;
+	}
+
+	if (is_rproc_serial(port->portdev->vdev))
+		/*
+		 * For rproc_serial assume remote processor is connected.
+		 * rproc_serial does not want the console port, only
+		 * the generic port implementation.
+		 */
+		port->host_connected = true;
+	else if (!use_multiport(port->portdev)) {
+		/*
+		 * If we're not using multiport support,
+		 * this has to be a console port.
+		 */
+		err = init_port_console(port);
+		if (err)
+			goto free_inbufs;
+	}
+
+	spin_lock_irq(&portdev->ports_lock);
+	list_add_tail(&port->list, &port->portdev->ports);
+	spin_unlock_irq(&portdev->ports_lock);
+
+	/*
+	 * Tell the Host we're set so that it can send us various
+	 * configuration parameters for this port (eg, port name,
+	 * caching, whether this is a console port, etc.)
+	 */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+	if (pdrvdata.debugfs_dir) {
+		/*
+		 * Finally, create the debugfs file that we can use to
+		 * inspect a port's state at any time
+		 */
+		snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
+			 port->portdev->vdev->index, id);
+		port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+							 pdrvdata.debugfs_dir,
+							 port,
+							 &port_debugfs_ops);
+	}
+	return 0;
+
+free_inbufs:
+free_device:
+	device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+	cdev_del(port->cdev);
+free_port:
+	kfree(port);
+fail:
+	/* The host might want to notify management sw about port add failure */
+	__send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+	return err;
+}
+
+/* No users remain, remove all port-specific data. */
+static void remove_port(struct kref *kref)
+{
+	struct port *port;
+
+	port = container_of(kref, struct port, kref);
+
+	kfree(port);
+}
+
+static void remove_port_data(struct port *port)
+{
+	spin_lock_irq(&port->inbuf_lock);
+	/* Remove unused data this port might have received. */
+	discard_port_data(port);
+	spin_unlock_irq(&port->inbuf_lock);
+
+	spin_lock_irq(&port->outvq_lock);
+	reclaim_consumed_buffers(port);
+	spin_unlock_irq(&port->outvq_lock);
+}
+
+/*
+ * Port got unplugged.  Remove port from portdev's list and drop the
+ * kref reference.  If no userspace has this port opened, it will
+ * result in immediate removal the port.
+ */
+static void unplug_port(struct port *port)
+{
+	spin_lock_irq(&port->portdev->ports_lock);
+	list_del(&port->list);
+	spin_unlock_irq(&port->portdev->ports_lock);
+
+	spin_lock_irq(&port->inbuf_lock);
+	if (port->guest_connected) {
+		/* Let the app know the port is going down. */
+		send_sigio_to_port(port);
+
+		/* Do this after sigio is actually sent */
+		port->guest_connected = false;
+		port->host_connected = false;
+
+		wake_up_interruptible(&port->waitqueue);
+	}
+	spin_unlock_irq(&port->inbuf_lock);
+
+	if (is_console_port(port)) {
+		spin_lock_irq(&pdrvdata_lock);
+		list_del(&port->cons.list);
+		spin_unlock_irq(&pdrvdata_lock);
+		hvc_remove(port->cons.hvc);
+	}
+
+	remove_port_data(port);
+
+	/*
+	 * We should just assume the device itself has gone off --
+	 * else a close on an open port later will try to send out a
+	 * control message.
+	 */
+	port->portdev = NULL;
+
+	sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+	device_destroy(pdrvdata.class, port->dev->devt);
+	cdev_del(port->cdev);
+
+	debugfs_remove(port->debugfs_file);
+	kfree(port->name);
+
+	/*
+	 * Locks around here are not necessary - a port can't be
+	 * opened after we removed the port struct from ports_list
+	 * above.
+	 */
+	kref_put(&port->kref, remove_port);
+}
+
+/* Any private messages that the Host and Guest want to share */
+static void handle_control_message(struct virtio_device *vdev,
+				   struct ports_device *portdev,
+				   struct port_buffer *buf)
+{
+	struct virtio_console_control *cpkt;
+	struct port *port;
+	size_t name_size;
+	int err;
+
+	cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
+
+	port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
+	if (!port &&
+	    cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
+		/* No valid header at start of buffer.  Drop it. */
+		dev_dbg(&portdev->vdev->dev,
+			"Invalid index %u in control packet\n", cpkt->id);
+		return;
+	}
+
+	switch (virtio16_to_cpu(vdev, cpkt->event)) {
+	case VIRTIO_CONSOLE_PORT_ADD:
+		if (port) {
+			dev_dbg(&portdev->vdev->dev,
+				"Port %u already added\n", port->id);
+			send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+			break;
+		}
+		if (virtio32_to_cpu(vdev, cpkt->id) >=
+		    portdev->max_nr_ports) {
+			dev_warn(&portdev->vdev->dev,
+				"Request for adding port with "
+				"out-of-bound id %u, max. supported id: %u\n",
+				cpkt->id, portdev->max_nr_ports - 1);
+			break;
+		}
+		add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
+		break;
+	case VIRTIO_CONSOLE_PORT_REMOVE:
+		unplug_port(port);
+		break;
+	case VIRTIO_CONSOLE_CONSOLE_PORT:
+		if (!cpkt->value)
+			break;
+		if (is_console_port(port))
+			break;
+
+		init_port_console(port);
+		complete(&early_console_added);
+		/*
+		 * Could remove the port here in case init fails - but
+		 * have to notify the host first.
+		 */
+		break;
+	case VIRTIO_CONSOLE_RESIZE: {
+		struct {
+			__u16 rows;
+			__u16 cols;
+		} size;
+
+		if (!is_console_port(port))
+			break;
+
+		memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+		       sizeof(size));
+		set_console_size(port, size.rows, size.cols);
+
+		port->cons.hvc->irq_requested = 1;
+		resize_console(port);
+		break;
+	}
+	case VIRTIO_CONSOLE_PORT_OPEN:
+		port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
+		wake_up_interruptible(&port->waitqueue);
+		/*
+		 * If the host port got closed and the host had any
+		 * unconsumed buffers, we'll be able to reclaim them
+		 * now.
+		 */
+		spin_lock_irq(&port->outvq_lock);
+		reclaim_consumed_buffers(port);
+		spin_unlock_irq(&port->outvq_lock);
+
+		/*
+		 * If the guest is connected, it'll be interested in
+		 * knowing the host connection state changed.
+		 */
+		spin_lock_irq(&port->inbuf_lock);
+		send_sigio_to_port(port);
+		spin_unlock_irq(&port->inbuf_lock);
+		break;
+	case VIRTIO_CONSOLE_PORT_NAME:
+		/*
+		 * If we woke up after hibernation, we can get this
+		 * again.  Skip it in that case.
+		 */
+		if (port->name)
+			break;
+
+		/*
+		 * Skip the size of the header and the cpkt to get the size
+		 * of the name that was sent
+		 */
+		name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
+
+		port->name = kmalloc(name_size, GFP_KERNEL);
+		if (!port->name) {
+			dev_err(port->dev,
+				"Not enough space to store port name\n");
+			break;
+		}
+		strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+			name_size - 1);
+		port->name[name_size - 1] = 0;
+
+		/*
+		 * Since we only have one sysfs attribute, 'name',
+		 * create it only if we have a name for the port.
+		 */
+		err = sysfs_create_group(&port->dev->kobj,
+					 &port_attribute_group);
+		if (err) {
+			dev_err(port->dev,
+				"Error %d creating sysfs device attributes\n",
+				err);
+		} else {
+			/*
+			 * Generate a udev event so that appropriate
+			 * symlinks can be created based on udev
+			 * rules.
+			 */
+			kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
+		}
+		break;
+	}
+}
+
+static void control_work_handler(struct work_struct *work)
+{
+	struct ports_device *portdev;
+	struct virtqueue *vq;
+	struct port_buffer *buf;
+	unsigned int len;
+
+	portdev = container_of(work, struct ports_device, control_work);
+	vq = portdev->c_ivq;
+
+	spin_lock(&portdev->c_ivq_lock);
+	while ((buf = virtqueue_get_buf(vq, &len))) {
+		spin_unlock(&portdev->c_ivq_lock);
+
+		buf->len = len;
+		buf->offset = 0;
+
+		handle_control_message(vq->vdev, portdev, buf);
+
+		spin_lock(&portdev->c_ivq_lock);
+		if (add_inbuf(portdev->c_ivq, buf) < 0) {
+			dev_warn(&portdev->vdev->dev,
+				 "Error adding buffer to queue\n");
+			free_buf(buf, false);
+		}
+	}
+	spin_unlock(&portdev->c_ivq_lock);
+}
+
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+	struct port_buffer *buf;
+	unsigned int len;
+
+	while ((buf = virtqueue_get_buf(vq, &len)))
+		free_buf(buf, can_sleep);
+}
+
+static void out_intr(struct virtqueue *vq)
+{
+	struct port *port;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port) {
+		flush_bufs(vq, false);
+		return;
+	}
+
+	wake_up_interruptible(&port->waitqueue);
+}
+
+static void in_intr(struct virtqueue *vq)
+{
+	struct port *port;
+	unsigned long flags;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port) {
+		flush_bufs(vq, false);
+		return;
+	}
+
+	spin_lock_irqsave(&port->inbuf_lock, flags);
+	port->inbuf = get_inbuf(port);
+
+	/*
+	 * Normally the port should not accept data when the port is
+	 * closed. For generic serial ports, the host won't (shouldn't)
+	 * send data till the guest is connected. But this condition
+	 * can be reached when a console port is not yet connected (no
+	 * tty is spawned) and the other side sends out data over the
+	 * vring, or when a remote devices start sending data before
+	 * the ports are opened.
+	 *
+	 * A generic serial port will discard data if not connected,
+	 * while console ports and rproc-serial ports accepts data at
+	 * any time. rproc-serial is initiated with guest_connected to
+	 * false because port_fops_open expects this. Console ports are
+	 * hooked up with an HVC console and is initialized with
+	 * guest_connected to true.
+	 */
+
+	if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
+		discard_port_data(port);
+
+	/* Send a SIGIO indicating new data in case the process asked for it */
+	send_sigio_to_port(port);
+
+	spin_unlock_irqrestore(&port->inbuf_lock, flags);
+
+	wake_up_interruptible(&port->waitqueue);
+
+	if (is_console_port(port) && hvc_poll(port->cons.hvc))
+		hvc_kick();
+}
+
+static void control_intr(struct virtqueue *vq)
+{
+	struct ports_device *portdev;
+
+	portdev = vq->vdev->priv;
+	schedule_work(&portdev->control_work);
+}
+
+static void config_intr(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+
+	portdev = vdev->priv;
+
+	if (!use_multiport(portdev))
+		schedule_work(&portdev->config_work);
+}
+
+static void config_work_handler(struct work_struct *work)
+{
+	struct ports_device *portdev;
+
+	portdev = container_of(work, struct ports_device, config_work);
+	if (!use_multiport(portdev)) {
+		struct virtio_device *vdev;
+		struct port *port;
+		u16 rows, cols;
+
+		vdev = portdev->vdev;
+		virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+		virtio_cread(vdev, struct virtio_console_config, rows, &rows);
+
+		port = find_port_by_id(portdev, 0);
+		set_console_size(port, rows, cols);
+
+		/*
+		 * We'll use this way of resizing only for legacy
+		 * support.  For newer userspace
+		 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+		 * to indicate console size changes so that it can be
+		 * done per-port.
+		 */
+		resize_console(port);
+	}
+}
+
+static int init_vqs(struct ports_device *portdev)
+{
+	vq_callback_t **io_callbacks;
+	char **io_names;
+	struct virtqueue **vqs;
+	u32 i, j, nr_ports, nr_queues;
+	int err;
+
+	nr_ports = portdev->max_nr_ports;
+	nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
+
+	vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
+	io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *),
+				     GFP_KERNEL);
+	io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL);
+	portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+					GFP_KERNEL);
+	portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+					 GFP_KERNEL);
+	if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
+	    !portdev->out_vqs) {
+		err = -ENOMEM;
+		goto free;
+	}
+
+	/*
+	 * For backward compat (newer host but older guest), the host
+	 * spawns a console port first and also inits the vqs for port
+	 * 0 before others.
+	 */
+	j = 0;
+	io_callbacks[j] = in_intr;
+	io_callbacks[j + 1] = out_intr;
+	io_names[j] = "input";
+	io_names[j + 1] = "output";
+	j += 2;
+
+	if (use_multiport(portdev)) {
+		io_callbacks[j] = control_intr;
+		io_callbacks[j + 1] = NULL;
+		io_names[j] = "control-i";
+		io_names[j + 1] = "control-o";
+
+		for (i = 1; i < nr_ports; i++) {
+			j += 2;
+			io_callbacks[j] = in_intr;
+			io_callbacks[j + 1] = out_intr;
+			io_names[j] = "input";
+			io_names[j + 1] = "output";
+		}
+	}
+	/* Find the queues. */
+	err = virtio_find_vqs(portdev->vdev, nr_queues, vqs,
+			      io_callbacks,
+			      (const char **)io_names, NULL);
+	if (err)
+		goto free;
+
+	j = 0;
+	portdev->in_vqs[0] = vqs[0];
+	portdev->out_vqs[0] = vqs[1];
+	j += 2;
+	if (use_multiport(portdev)) {
+		portdev->c_ivq = vqs[j];
+		portdev->c_ovq = vqs[j + 1];
+
+		for (i = 1; i < nr_ports; i++) {
+			j += 2;
+			portdev->in_vqs[i] = vqs[j];
+			portdev->out_vqs[i] = vqs[j + 1];
+		}
+	}
+	kfree(io_names);
+	kfree(io_callbacks);
+	kfree(vqs);
+
+	return 0;
+
+free:
+	kfree(portdev->out_vqs);
+	kfree(portdev->in_vqs);
+	kfree(io_names);
+	kfree(io_callbacks);
+	kfree(vqs);
+
+	return err;
+}
+
+static const struct file_operations portdev_fops = {
+	.owner = THIS_MODULE,
+};
+
+static void remove_vqs(struct ports_device *portdev)
+{
+	struct virtqueue *vq;
+
+	virtio_device_for_each_vq(portdev->vdev, vq) {
+		struct port_buffer *buf;
+
+		flush_bufs(vq, true);
+		while ((buf = virtqueue_detach_unused_buf(vq)))
+			free_buf(buf, true);
+	}
+	portdev->vdev->config->del_vqs(portdev->vdev);
+	kfree(portdev->in_vqs);
+	kfree(portdev->out_vqs);
+}
+
+static void virtcons_remove(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	struct port *port, *port2;
+
+	portdev = vdev->priv;
+
+	spin_lock_irq(&pdrvdata_lock);
+	list_del(&portdev->list);
+	spin_unlock_irq(&pdrvdata_lock);
+
+	/* Disable interrupts for vqs */
+	vdev->config->reset(vdev);
+	/* Finish up work that's lined up */
+	if (use_multiport(portdev))
+		cancel_work_sync(&portdev->control_work);
+	else
+		cancel_work_sync(&portdev->config_work);
+
+	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+		unplug_port(port);
+
+	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+	/*
+	 * When yanking out a device, we immediately lose the
+	 * (device-side) queues.  So there's no point in keeping the
+	 * guest side around till we drop our final reference.  This
+	 * also means that any ports which are in an open state will
+	 * have to just stop using the port, as the vqs are going
+	 * away.
+	 */
+	remove_vqs(portdev);
+	kfree(portdev);
+}
+
+/*
+ * Once we're further in boot, we get probed like any other virtio
+ * device.
+ *
+ * If the host also supports multiple console ports, we check the
+ * config space to see how many ports the host has spawned.  We
+ * initialize each port found.
+ */
+static int virtcons_probe(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	int err;
+	bool multiport;
+	bool early = early_put_chars != NULL;
+
+	/* We only need a config space if features are offered */
+	if (!vdev->config->get &&
+	    (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
+	     || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
+		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Ensure to read early_put_chars now */
+	barrier();
+
+	portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
+	if (!portdev) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* Attach this portdev to this virtio_device, and vice-versa. */
+	portdev->vdev = vdev;
+	vdev->priv = portdev;
+
+	portdev->chr_major = register_chrdev(0, "virtio-portsdev",
+					     &portdev_fops);
+	if (portdev->chr_major < 0) {
+		dev_err(&vdev->dev,
+			"Error %d registering chrdev for device %u\n",
+			portdev->chr_major, vdev->index);
+		err = portdev->chr_major;
+		goto free;
+	}
+
+	multiport = false;
+	portdev->max_nr_ports = 1;
+
+	/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
+	if (!is_rproc_serial(vdev) &&
+	    virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+				 struct virtio_console_config, max_nr_ports,
+				 &portdev->max_nr_ports) == 0) {
+		multiport = true;
+	}
+
+	err = init_vqs(portdev);
+	if (err < 0) {
+		dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+		goto free_chrdev;
+	}
+
+	spin_lock_init(&portdev->ports_lock);
+	INIT_LIST_HEAD(&portdev->ports);
+	INIT_LIST_HEAD(&portdev->list);
+
+	virtio_device_ready(portdev->vdev);
+
+	INIT_WORK(&portdev->config_work, &config_work_handler);
+	INIT_WORK(&portdev->control_work, &control_work_handler);
+
+	if (multiport) {
+		unsigned int nr_added_bufs;
+
+		spin_lock_init(&portdev->c_ivq_lock);
+		spin_lock_init(&portdev->c_ovq_lock);
+
+		nr_added_bufs = fill_queue(portdev->c_ivq,
+					   &portdev->c_ivq_lock);
+		if (!nr_added_bufs) {
+			dev_err(&vdev->dev,
+				"Error allocating buffers for control queue\n");
+			/*
+			 * The host might want to notify mgmt sw about device
+			 * add failure.
+			 */
+			__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+					   VIRTIO_CONSOLE_DEVICE_READY, 0);
+			/* Device was functional: we need full cleanup. */
+			virtcons_remove(vdev);
+			return -ENOMEM;
+		}
+	} else {
+		/*
+		 * For backward compatibility: Create a console port
+		 * if we're running on older host.
+		 */
+		add_port(portdev, 0);
+	}
+
+	spin_lock_irq(&pdrvdata_lock);
+	list_add_tail(&portdev->list, &pdrvdata.portdevs);
+	spin_unlock_irq(&pdrvdata_lock);
+
+	__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+			   VIRTIO_CONSOLE_DEVICE_READY, 1);
+
+	/*
+	 * If there was an early virtio console, assume that there are no
+	 * other consoles. We need to wait until the hvc_alloc matches the
+	 * hvc_instantiate, otherwise tty_open will complain, resulting in
+	 * a "Warning: unable to open an initial console" boot failure.
+	 * Without multiport this is done in add_port above. With multiport
+	 * this might take some host<->guest communication - thus we have to
+	 * wait.
+	 */
+	if (multiport && early)
+		wait_for_completion(&early_console_added);
+
+	return 0;
+
+free_chrdev:
+	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+free:
+	kfree(portdev);
+fail:
+	return err;
+}
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static unsigned int features[] = {
+	VIRTIO_CONSOLE_F_SIZE,
+	VIRTIO_CONSOLE_F_MULTIPORT,
+};
+
+static struct virtio_device_id rproc_serial_id_table[] = {
+#if IS_ENABLED(CONFIG_REMOTEPROC)
+	{ VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
+#endif
+	{ 0 },
+};
+
+static unsigned int rproc_serial_features[] = {
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcons_freeze(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	struct port *port;
+
+	portdev = vdev->priv;
+
+	vdev->config->reset(vdev);
+
+	if (use_multiport(portdev))
+		virtqueue_disable_cb(portdev->c_ivq);
+	cancel_work_sync(&portdev->control_work);
+	cancel_work_sync(&portdev->config_work);
+	/*
+	 * Once more: if control_work_handler() was running, it would
+	 * enable the cb as the last step.
+	 */
+	if (use_multiport(portdev))
+		virtqueue_disable_cb(portdev->c_ivq);
+
+	list_for_each_entry(port, &portdev->ports, list) {
+		virtqueue_disable_cb(port->in_vq);
+		virtqueue_disable_cb(port->out_vq);
+		/*
+		 * We'll ask the host later if the new invocation has
+		 * the port opened or closed.
+		 */
+		port->host_connected = false;
+		remove_port_data(port);
+	}
+	remove_vqs(portdev);
+
+	return 0;
+}
+
+static int virtcons_restore(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	struct port *port;
+	int ret;
+
+	portdev = vdev->priv;
+
+	ret = init_vqs(portdev);
+	if (ret)
+		return ret;
+
+	virtio_device_ready(portdev->vdev);
+
+	if (use_multiport(portdev))
+		fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+
+	list_for_each_entry(port, &portdev->ports, list) {
+		port->in_vq = portdev->in_vqs[port->id];
+		port->out_vq = portdev->out_vqs[port->id];
+
+		fill_queue(port->in_vq, &port->inbuf_lock);
+
+		/* Get port open/close status on the host */
+		send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+		/*
+		 * If a port was open at the time of suspending, we
+		 * have to let the host know that it's still open.
+		 */
+		if (port->guest_connected)
+			send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+	}
+	return 0;
+}
+#endif
+
+static struct virtio_driver virtio_console = {
+	.feature_table = features,
+	.feature_table_size = ARRAY_SIZE(features),
+	.driver.name =	KBUILD_MODNAME,
+	.driver.owner =	THIS_MODULE,
+	.id_table =	id_table,
+	.probe =	virtcons_probe,
+	.remove =	virtcons_remove,
+	.config_changed = config_intr,
+#ifdef CONFIG_PM_SLEEP
+	.freeze =	virtcons_freeze,
+	.restore =	virtcons_restore,
+#endif
+};
+
+static struct virtio_driver virtio_rproc_serial = {
+	.feature_table = rproc_serial_features,
+	.feature_table_size = ARRAY_SIZE(rproc_serial_features),
+	.driver.name =	"virtio_rproc_serial",
+	.driver.owner =	THIS_MODULE,
+	.id_table =	rproc_serial_id_table,
+	.probe =	virtcons_probe,
+	.remove =	virtcons_remove,
+};
+
+static int __init init(void)
+{
+	int err;
+
+	pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
+	if (IS_ERR(pdrvdata.class)) {
+		err = PTR_ERR(pdrvdata.class);
+		pr_err("Error %d creating virtio-ports class\n", err);
+		return err;
+	}
+
+	pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
+	if (!pdrvdata.debugfs_dir)
+		pr_warn("Error creating debugfs dir for virtio-ports\n");
+	INIT_LIST_HEAD(&pdrvdata.consoles);
+	INIT_LIST_HEAD(&pdrvdata.portdevs);
+
+	err = register_virtio_driver(&virtio_console);
+	if (err < 0) {
+		pr_err("Error %d registering virtio driver\n", err);
+		goto free;
+	}
+	err = register_virtio_driver(&virtio_rproc_serial);
+	if (err < 0) {
+		pr_err("Error %d registering virtio rproc serial driver\n",
+		       err);
+		goto unregister;
+	}
+	return 0;
+unregister:
+	unregister_virtio_driver(&virtio_console);
+free:
+	debugfs_remove_recursive(pdrvdata.debugfs_dir);
+	class_destroy(pdrvdata.class);
+	return err;
+}
+
+static void __exit fini(void)
+{
+	reclaim_dma_bufs();
+
+	unregister_virtio_driver(&virtio_console);
+	unregister_virtio_driver(&virtio_rproc_serial);
+
+	class_destroy(pdrvdata.class);
+	debugfs_remove_recursive(pdrvdata.debugfs_dir);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio console driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/Makefile b/drivers/char/xilinx_hwicap/Makefile
new file mode 100644
index 0000000..5491cbc
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilinx OPB hwicap driver
+#
+
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o 
+ 
+xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
new file mode 100644
index 0000000..35981ca
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -0,0 +1,361 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2003-2008 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "buffer_icap.h"
+
+/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
+#define XHI_MAX_BUFFER_BYTES        2048
+#define XHI_MAX_BUFFER_INTS         (XHI_MAX_BUFFER_BYTES >> 2)
+
+/* File access and error constants */
+#define XHI_DEVICE_READ_ERROR       -1
+#define XHI_DEVICE_WRITE_ERROR      -2
+#define XHI_BUFFER_OVERFLOW_ERROR   -3
+
+#define XHI_DEVICE_READ             0x1
+#define XHI_DEVICE_WRITE            0x0
+
+/* Constants for checking transfer status */
+#define XHI_CYCLE_DONE              0
+#define XHI_CYCLE_EXECUTING         1
+
+/* buffer_icap register offsets */
+
+/* Size of transfer, read & write */
+#define XHI_SIZE_REG_OFFSET        0x800L
+/* offset into bram, read & write */
+#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L
+/* Read not Configure, direction of transfer.  Write only */
+#define XHI_RNC_REG_OFFSET         0x808L
+/* Indicates transfer complete. Read only */
+#define XHI_STATUS_REG_OFFSET      0x80CL
+
+/* Constants for setting the RNC register */
+#define XHI_CONFIGURE              0x0UL
+#define XHI_READBACK               0x1UL
+
+/* Constants for the Done register */
+#define XHI_NOT_FINISHED           0x0UL
+#define XHI_FINISHED               0x1UL
+
+#define XHI_BUFFER_START 0
+
+/**
+ * buffer_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+	return in_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET);
+}
+
+/**
+ * buffer_icap_get_bram - Reads data from the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset from which the data should be read.
+ *
+ * A bram is used as a configuration memory cache.  One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline u32 buffer_icap_get_bram(void __iomem *base_address,
+		u32 offset)
+{
+	return in_be32(base_address + (offset << 2));
+}
+
+/**
+ * buffer_icap_busy - Return true if the icap device is busy
+ * @base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_busy(void __iomem *base_address)
+{
+	u32 status = in_be32(base_address + XHI_STATUS_REG_OFFSET);
+	return (status & 1) == XHI_NOT_FINISHED;
+}
+
+/**
+ * buffer_icap_set_size - Set the size register.
+ * @base_address: is the base address of the device
+ * @data: The size in bytes.
+ *
+ * The size register holds the number of 8 bit bytes to transfer between
+ * bram and the icap (or icap to bram).
+ **/
+static inline void buffer_icap_set_size(void __iomem *base_address,
+		u32 data)
+{
+	out_be32(base_address + XHI_SIZE_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_offset - Set the bram offset register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The bram offset register holds the starting bram address to transfer
+ * data from during configuration or write data to during readback.
+ **/
+static inline void buffer_icap_set_offset(void __iomem *base_address,
+		u32 data)
+{
+	out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The RNC register determines the direction of the data transfer.  It
+ * controls whether a configuration or readback take place.  Writing to
+ * this register initiates the transfer.  A value of 1 initiates a
+ * readback while writing a value of 0 initiates a configuration.
+ **/
+static inline void buffer_icap_set_rnc(void __iomem *base_address,
+		u32 data)
+{
+	out_be32(base_address + XHI_RNC_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_bram - Write data to the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset at which the data should be written.
+ * @data: The value to be written to the bram offset.
+ *
+ * A bram is used as a configuration memory cache.  One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline void buffer_icap_set_bram(void __iomem *base_address,
+		u32 offset, u32 data)
+{
+	out_be32(base_address + (offset << 2), data);
+}
+
+/**
+ * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ *           device (ICAP).
+ **/
+static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
+		u32 offset, u32 count)
+{
+
+	s32 retries = 0;
+	void __iomem *base_address = drvdata->base_address;
+
+	if (buffer_icap_busy(base_address))
+		return -EBUSY;
+
+	if ((offset + count) > XHI_MAX_BUFFER_INTS)
+		return -EINVAL;
+
+	/* setSize count*4 to get bytes. */
+	buffer_icap_set_size(base_address, (count << 2));
+	buffer_icap_set_offset(base_address, offset);
+	buffer_icap_set_rnc(base_address, XHI_READBACK);
+
+	while (buffer_icap_busy(base_address)) {
+		retries++;
+		if (retries > XHI_MAX_RETRIES)
+			return -EBUSY;
+	}
+	return 0;
+
+};
+
+/**
+ * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ *           device (ICAP).
+ **/
+static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
+		u32 offset, u32 count)
+{
+
+	s32 retries = 0;
+	void __iomem *base_address = drvdata->base_address;
+
+	if (buffer_icap_busy(base_address))
+		return -EBUSY;
+
+	if ((offset + count) > XHI_MAX_BUFFER_INTS)
+		return -EINVAL;
+
+	/* setSize count*4 to get bytes. */
+	buffer_icap_set_size(base_address, count << 2);
+	buffer_icap_set_offset(base_address, offset);
+	buffer_icap_set_rnc(base_address, XHI_CONFIGURE);
+
+	while (buffer_icap_busy(base_address)) {
+		retries++;
+		if (retries > XHI_MAX_RETRIES)
+			return -EBUSY;
+	}
+	return 0;
+
+};
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Writing to the status register resets the ICAP logic in an internal
+ * version of the core.  For the version of the core published in EDK,
+ * this is a noop.
+ **/
+void buffer_icap_reset(struct hwicap_drvdata *drvdata)
+{
+    out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE);
+}
+
+/**
+ * buffer_icap_set_configuration - Load a partial bitstream from system memory.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Kernel address of the partial bitstream.
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+			     u32 size)
+{
+	int status;
+	s32 buffer_count = 0;
+	bool dirty = false;
+	u32 i;
+	void __iomem *base_address = drvdata->base_address;
+
+	/* Loop through all the data */
+	for (i = 0, buffer_count = 0; i < size; i++) {
+
+		/* Copy data to bram */
+		buffer_icap_set_bram(base_address, buffer_count, data[i]);
+		dirty = true;
+
+		if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
+			buffer_count++;
+			continue;
+		}
+
+		/* Write data to ICAP */
+		status = buffer_icap_device_write(
+				drvdata,
+				XHI_BUFFER_START,
+				XHI_MAX_BUFFER_INTS);
+		if (status != 0) {
+			/* abort. */
+			buffer_icap_reset(drvdata);
+			return status;
+		}
+
+		buffer_count = 0;
+		dirty = false;
+	}
+
+	/* Write unwritten data to ICAP */
+	if (dirty) {
+		/* Write data to ICAP */
+		status = buffer_icap_device_write(drvdata, XHI_BUFFER_START,
+					     buffer_count);
+		if (status != 0) {
+			/* abort. */
+			buffer_icap_reset(drvdata);
+		}
+		return status;
+	}
+
+	return 0;
+};
+
+/**
+ * buffer_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+			     u32 size)
+{
+	int status;
+	s32 buffer_count = 0;
+	u32 i;
+	void __iomem *base_address = drvdata->base_address;
+
+	/* Loop through all the data */
+	for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) {
+		if (buffer_count == XHI_MAX_BUFFER_INTS) {
+			u32 words_remaining = size - i;
+			u32 words_to_read =
+				words_remaining <
+				XHI_MAX_BUFFER_INTS ? words_remaining :
+				XHI_MAX_BUFFER_INTS;
+
+			/* Read data from ICAP */
+			status = buffer_icap_device_read(
+					drvdata,
+					XHI_BUFFER_START,
+					words_to_read);
+			if (status != 0) {
+				/* abort. */
+				buffer_icap_reset(drvdata);
+				return status;
+			}
+
+			buffer_count = 0;
+		}
+
+		/* Copy data from bram */
+		data[i] = buffer_icap_get_bram(base_address, buffer_count);
+		buffer_count++;
+	}
+
+	return 0;
+};
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
new file mode 100644
index 0000000..d4f419e
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -0,0 +1,54 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2003-2008 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_BUFFER_ICAP_H_	/* prevent circular inclusions */
+#define XILINX_BUFFER_ICAP_H_	/* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+			     u32 Size);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+			     u32 Size);
+
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata);
+void buffer_icap_reset(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
new file mode 100644
index 0000000..02225eb
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -0,0 +1,393 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2007-2008 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "fifo_icap.h"
+
+/* Register offsets for the XHwIcap device. */
+#define XHI_GIER_OFFSET	0x1C  /* Device Global Interrupt Enable Reg */
+#define XHI_IPISR_OFFSET 0x20  /* Interrupt Status Register */
+#define XHI_IPIER_OFFSET 0x28  /* Interrupt Enable Register */
+#define XHI_WF_OFFSET 0x100 /* Write FIFO */
+#define XHI_RF_OFFSET 0x104 /* Read FIFO */
+#define XHI_SZ_OFFSET 0x108 /* Size Register */
+#define XHI_CR_OFFSET 0x10C /* Control Register */
+#define XHI_SR_OFFSET 0x110 /* Status Register */
+#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */
+#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */
+
+/* Device Global Interrupt Enable Register (GIER) bit definitions */
+
+#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */
+
+/**
+ * HwIcap Device Interrupt Status/Enable Registers
+ *
+ * Interrupt Status Register (IPISR) : This register holds the
+ * interrupt status flags for the device. These bits are toggle on
+ * write.
+ *
+ * Interrupt Enable Register (IPIER) : This register is used to enable
+ * interrupt sources for the device.
+ * Writing a '1' to a bit enables the corresponding interrupt.
+ * Writing a '0' to a bit disables the corresponding interrupt.
+ *
+ * IPISR/IPIER registers have the same bit definitions and are only defined
+ * once.
+ */
+#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */
+#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */
+#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */
+#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */
+#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */
+
+/* Control Register (CR) */
+#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */
+#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */
+#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */
+#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */
+
+
+#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */
+#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */
+/* The maximum amount we can request from fifo_icap_get_configuration
+   at once, in bytes. */
+#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF
+
+
+/**
+ * fifo_icap_fifo_write - Write data to the write FIFO.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the 32-bit value to be written to the FIFO.
+ *
+ * This function will silently fail if the fifo is full.
+ **/
+static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
+		u32 data)
+{
+	dev_dbg(drvdata->dev, "fifo_write: %x\n", data);
+	out_be32(drvdata->base_address + XHI_WF_OFFSET, data);
+}
+
+/**
+ * fifo_icap_fifo_read - Read data from the Read FIFO.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function will silently fail if the fifo is empty.
+ **/
+static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
+{
+	u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET);
+	dev_dbg(drvdata->dev, "fifo_read: %x\n", data);
+	return data;
+}
+
+/**
+ * fifo_icap_set_read_size - Set the the size register.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the size of the following read transaction, in words.
+ **/
+static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
+		u32 data)
+{
+	out_be32(drvdata->base_address + XHI_SZ_OFFSET, data);
+}
+
+/**
+ * fifo_icap_start_config - Initiate a configuration (write) to the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
+{
+	out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK);
+	dev_dbg(drvdata->dev, "configuration started\n");
+}
+
+/**
+ * fifo_icap_start_readback - Initiate a readback from the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
+{
+	out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK);
+	dev_dbg(drvdata->dev, "readback started\n");
+}
+
+/**
+ * fifo_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+	u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+	dev_dbg(drvdata->dev, "Getting status = %x\n", status);
+	return status;
+}
+
+/**
+ * fifo_icap_busy - Return true if the ICAP is still processing a transaction.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
+{
+	u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+	return (status & XHI_SR_DONE_MASK) ? 0 : 1;
+}
+
+/**
+ * fifo_icap_write_fifo_vacancy - Query the write fifo available space.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely pushed into the write fifo.
+ **/
+static inline u32 fifo_icap_write_fifo_vacancy(
+		struct hwicap_drvdata *drvdata)
+{
+	return in_be32(drvdata->base_address + XHI_WFV_OFFSET);
+}
+
+/**
+ * fifo_icap_read_fifo_occupancy - Query the read fifo available data.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely read from the read fifo.
+ **/
+static inline u32 fifo_icap_read_fifo_occupancy(
+		struct hwicap_drvdata *drvdata)
+{
+	return in_be32(drvdata->base_address + XHI_RFO_OFFSET);
+}
+
+/**
+ * fifo_icap_set_configuration - Send configuration data to the ICAP.
+ * @drvdata: a pointer to the drvdata.
+ * @frame_buffer: a pointer to the data to be written to the
+ *		ICAP device.
+ * @num_words: the number of words (32 bit) to write to the ICAP
+ *		device.
+
+ * This function writes the given user data to the Write FIFO in
+ * polled mode and starts the transfer of the data to
+ * the ICAP device.
+ **/
+int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
+		u32 *frame_buffer, u32 num_words)
+{
+
+	u32 write_fifo_vacancy = 0;
+	u32 retries = 0;
+	u32 remaining_words;
+
+	dev_dbg(drvdata->dev, "fifo_set_configuration\n");
+
+	/*
+	 * Check if the ICAP device is Busy with the last Read/Write
+	 */
+	if (fifo_icap_busy(drvdata))
+		return -EBUSY;
+
+	/*
+	 * Set up the buffer pointer and the words to be transferred.
+	 */
+	remaining_words = num_words;
+
+	while (remaining_words > 0) {
+		/*
+		 * Wait until we have some data in the fifo.
+		 */
+		while (write_fifo_vacancy == 0) {
+			write_fifo_vacancy =
+				fifo_icap_write_fifo_vacancy(drvdata);
+			retries++;
+			if (retries > XHI_MAX_RETRIES)
+				return -EIO;
+		}
+
+		/*
+		 * Write data into the Write FIFO.
+		 */
+		while ((write_fifo_vacancy != 0) &&
+				(remaining_words > 0)) {
+			fifo_icap_fifo_write(drvdata, *frame_buffer);
+
+			remaining_words--;
+			write_fifo_vacancy--;
+			frame_buffer++;
+		}
+		/* Start pushing whatever is in the FIFO into the ICAP. */
+		fifo_icap_start_config(drvdata);
+	}
+
+	/* Wait until the write has finished. */
+	while (fifo_icap_busy(drvdata)) {
+		retries++;
+		if (retries > XHI_MAX_RETRIES)
+			break;
+	}
+
+	dev_dbg(drvdata->dev, "done fifo_set_configuration\n");
+
+	/*
+	 * If the requested number of words have not been read from
+	 * the device then indicate failure.
+	 */
+	if (remaining_words != 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * fifo_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ *
+ * This function reads the specified number of words from the ICAP device in
+ * the polled mode.
+ */
+int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
+		u32 *frame_buffer, u32 num_words)
+{
+
+	u32 read_fifo_occupancy = 0;
+	u32 retries = 0;
+	u32 *data = frame_buffer;
+	u32 remaining_words;
+	u32 words_to_read;
+
+	dev_dbg(drvdata->dev, "fifo_get_configuration\n");
+
+	/*
+	 * Check if the ICAP device is Busy with the last Write/Read
+	 */
+	if (fifo_icap_busy(drvdata))
+		return -EBUSY;
+
+	remaining_words = num_words;
+
+	while (remaining_words > 0) {
+		words_to_read = remaining_words;
+		/* The hardware has a limit on the number of words
+		   that can be read at one time.  */
+		if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS)
+			words_to_read = XHI_MAX_READ_TRANSACTION_WORDS;
+
+		remaining_words -= words_to_read;
+
+		fifo_icap_set_read_size(drvdata, words_to_read);
+		fifo_icap_start_readback(drvdata);
+
+		while (words_to_read > 0) {
+			/* Wait until we have some data in the fifo. */
+			while (read_fifo_occupancy == 0) {
+				read_fifo_occupancy =
+					fifo_icap_read_fifo_occupancy(drvdata);
+				retries++;
+				if (retries > XHI_MAX_RETRIES)
+					return -EIO;
+			}
+
+			if (read_fifo_occupancy > words_to_read)
+				read_fifo_occupancy = words_to_read;
+
+			words_to_read -= read_fifo_occupancy;
+
+			/* Read the data from the Read FIFO. */
+			while (read_fifo_occupancy != 0) {
+				*data++ = fifo_icap_fifo_read(drvdata);
+				read_fifo_occupancy--;
+			}
+		}
+	}
+
+	dev_dbg(drvdata->dev, "done fifo_get_configuration\n");
+
+	return 0;
+}
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function forces the software reset of the complete HWICAP device.
+ * All the registers will return to the default value and the FIFO is also
+ * flushed as a part of this software reset.
+ */
+void fifo_icap_reset(struct hwicap_drvdata *drvdata)
+{
+	u32 reg_data;
+	/*
+	 * Reset the device by setting/clearing the RESET bit in the
+	 * Control Register.
+	 */
+	reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+	out_be32(drvdata->base_address + XHI_CR_OFFSET,
+				reg_data | XHI_CR_SW_RESET_MASK);
+
+	out_be32(drvdata->base_address + XHI_CR_OFFSET,
+				reg_data & (~XHI_CR_SW_RESET_MASK));
+
+}
+
+/**
+ * fifo_icap_flush_fifo - This function flushes the FIFOs in the device.
+ * @drvdata: a pointer to the drvdata.
+ */
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
+{
+	u32 reg_data;
+	/*
+	 * Flush the FIFO by setting/clearing the FIFO Clear bit in the
+	 * Control Register.
+	 */
+	reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+	out_be32(drvdata->base_address + XHI_CR_OFFSET,
+				reg_data | XHI_CR_FIFO_CLR_MASK);
+
+	out_be32(drvdata->base_address + XHI_CR_OFFSET,
+				reg_data & (~XHI_CR_FIFO_CLR_MASK));
+}
+
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
new file mode 100644
index 0000000..4c9dd9a
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -0,0 +1,59 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2007-2008 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_FIFO_ICAP_H_	/* prevent circular inclusions */
+#define XILINX_FIFO_ICAP_H_	/* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Reads integers from the device into the storage buffer. */
+int fifo_icap_get_configuration(
+		struct hwicap_drvdata *drvdata,
+		u32 *FrameBuffer,
+		u32 NumWords);
+
+/* Writes integers to the device from the storage buffer. */
+int fifo_icap_set_configuration(
+		struct hwicap_drvdata *drvdata,
+		u32 *FrameBuffer,
+		u32 NumWords);
+
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata);
+void fifo_icap_reset(struct hwicap_drvdata *drvdata);
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
new file mode 100644
index 0000000..067396b
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -0,0 +1,897 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
+ *     (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
+ *     (c) Copyright 2007-2008 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * This is the code behind /dev/icap* -- it allows a user-space
+ * application to use the Xilinx ICAP subsystem.
+ *
+ * The following operations are possible:
+ *
+ * open         open the port and initialize for access.
+ * release      release port
+ * write        Write a bitstream to the configuration processor.
+ * read         Read a data stream from the configuration processor.
+ *
+ * After being opened, the port is initialized and accessed to avoid a
+ * corrupted first read which may occur with some hardware.  The port
+ * is left in a desynched state, requiring that a synch sequence be
+ * transmitted before any valid configuration data.  A user will have
+ * exclusive access to the device while it remains open, and the state
+ * of the ICAP cannot be guaranteed after the device is closed.  Note
+ * that a complete reset of the core and the state of the ICAP cannot
+ * be performed on many versions of the cores, hence users of this
+ * device should avoid making inconsistent accesses to the device.  In
+ * particular, accessing the read interface, without first generating
+ * a write containing a readback packet can leave the ICAP in an
+ * inaccessible state.
+ *
+ * Note that in order to use the read interface, it is first necessary
+ * to write a request packet to the write interface.  i.e., it is not
+ * possible to simply readback the bitstream (or any configuration
+ * bits) from a device without specifically requesting them first.
+ * The code to craft such packets is intended to be part of the
+ * user-space application code that uses this device.  The simplest
+ * way to use this interface is simply:
+ *
+ * cp foo.bit /dev/icap0
+ *
+ * Note that unless foo.bit is an appropriately constructed partial
+ * bitstream, this has a high likelihood of overwriting the design
+ * currently programmed in the FPGA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_OF
+/* For open firmware. */
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xilinx_hwicap.h"
+#include "buffer_icap.h"
+#include "fifo_icap.h"
+
+#define DRIVER_NAME "icap"
+
+#define HWICAP_REGS   (0x10000)
+
+#define XHWICAP_MAJOR 259
+#define XHWICAP_MINOR 0
+#define HWICAP_DEVICES 1
+
+/* An array, which is set to true when the device is registered. */
+static DEFINE_MUTEX(hwicap_mutex);
+static bool probed_devices[HWICAP_DEVICES];
+static struct mutex icap_sem;
+
+static struct class *icap_class;
+
+#define UNIMPLEMENTED 0xFFFF
+
+static const struct config_registers v2_config_registers = {
+	.CRC = 0,
+	.FAR = 1,
+	.FDRI = 2,
+	.FDRO = 3,
+	.CMD = 4,
+	.CTL = 5,
+	.MASK = 6,
+	.STAT = 7,
+	.LOUT = 8,
+	.COR = 9,
+	.MFWR = 10,
+	.FLR = 11,
+	.KEY = 12,
+	.CBC = 13,
+	.IDCODE = 14,
+	.AXSS = UNIMPLEMENTED,
+	.C0R_1 = UNIMPLEMENTED,
+	.CSOB = UNIMPLEMENTED,
+	.WBSTAR = UNIMPLEMENTED,
+	.TIMER = UNIMPLEMENTED,
+	.BOOTSTS = UNIMPLEMENTED,
+	.CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v4_config_registers = {
+	.CRC = 0,
+	.FAR = 1,
+	.FDRI = 2,
+	.FDRO = 3,
+	.CMD = 4,
+	.CTL = 5,
+	.MASK = 6,
+	.STAT = 7,
+	.LOUT = 8,
+	.COR = 9,
+	.MFWR = 10,
+	.FLR = UNIMPLEMENTED,
+	.KEY = UNIMPLEMENTED,
+	.CBC = 11,
+	.IDCODE = 12,
+	.AXSS = 13,
+	.C0R_1 = UNIMPLEMENTED,
+	.CSOB = UNIMPLEMENTED,
+	.WBSTAR = UNIMPLEMENTED,
+	.TIMER = UNIMPLEMENTED,
+	.BOOTSTS = UNIMPLEMENTED,
+	.CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v5_config_registers = {
+	.CRC = 0,
+	.FAR = 1,
+	.FDRI = 2,
+	.FDRO = 3,
+	.CMD = 4,
+	.CTL = 5,
+	.MASK = 6,
+	.STAT = 7,
+	.LOUT = 8,
+	.COR = 9,
+	.MFWR = 10,
+	.FLR = UNIMPLEMENTED,
+	.KEY = UNIMPLEMENTED,
+	.CBC = 11,
+	.IDCODE = 12,
+	.AXSS = 13,
+	.C0R_1 = 14,
+	.CSOB = 15,
+	.WBSTAR = 16,
+	.TIMER = 17,
+	.BOOTSTS = 18,
+	.CTL_1 = 19,
+};
+
+static const struct config_registers v6_config_registers = {
+	.CRC = 0,
+	.FAR = 1,
+	.FDRI = 2,
+	.FDRO = 3,
+	.CMD = 4,
+	.CTL = 5,
+	.MASK = 6,
+	.STAT = 7,
+	.LOUT = 8,
+	.COR = 9,
+	.MFWR = 10,
+	.FLR = UNIMPLEMENTED,
+	.KEY = UNIMPLEMENTED,
+	.CBC = 11,
+	.IDCODE = 12,
+	.AXSS = 13,
+	.C0R_1 = 14,
+	.CSOB = 15,
+	.WBSTAR = 16,
+	.TIMER = 17,
+	.BOOTSTS = 22,
+	.CTL_1 = 24,
+};
+
+/**
+ * hwicap_command_desync - Send a DESYNC command to the ICAP port.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Returns: '0' on success and failure value on error
+ *
+ * This command desynchronizes the ICAP After this command, a
+ * bitstream containing a NULL packet, followed by a SYNCH packet is
+ * required before the ICAP will recognize commands.
+ */
+static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+{
+	u32 buffer[4];
+	u32 index = 0;
+
+	/*
+	 * Create the data to be written to the ICAP.
+	 */
+	buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+	buffer[index++] = XHI_CMD_DESYNCH;
+	buffer[index++] = XHI_NOOP_PACKET;
+	buffer[index++] = XHI_NOOP_PACKET;
+
+	/*
+	 * Write the data to the FIFO and intiate the transfer of data present
+	 * in the FIFO to the ICAP device.
+	 */
+	return drvdata->config->set_configuration(drvdata,
+			&buffer[0], index);
+}
+
+/**
+ * hwicap_get_configuration_register - Query a configuration register.
+ * @drvdata: a pointer to the drvdata.
+ * @reg: a constant which represents the configuration
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
+ * @reg_data: returns the value of the register.
+ *
+ * Returns: '0' on success and failure value on error
+ *
+ * Sends a query packet to the ICAP and then receives the response.
+ * The icap is left in Synched state.
+ */
+static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+		u32 reg, u32 *reg_data)
+{
+	int status;
+	u32 buffer[6];
+	u32 index = 0;
+
+	/*
+	 * Create the data to be written to the ICAP.
+	 */
+	buffer[index++] = XHI_DUMMY_PACKET;
+	buffer[index++] = XHI_NOOP_PACKET;
+	buffer[index++] = XHI_SYNC_PACKET;
+	buffer[index++] = XHI_NOOP_PACKET;
+	buffer[index++] = XHI_NOOP_PACKET;
+
+	/*
+	 * Write the data to the FIFO and initiate the transfer of data present
+	 * in the FIFO to the ICAP device.
+	 */
+	status = drvdata->config->set_configuration(drvdata,
+						    &buffer[0], index);
+	if (status)
+		return status;
+
+	/* If the syncword was not found, then we need to start over. */
+	status = drvdata->config->get_status(drvdata);
+	if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK)
+		return -EIO;
+
+	index = 0;
+	buffer[index++] = hwicap_type_1_read(reg) | 1;
+	buffer[index++] = XHI_NOOP_PACKET;
+	buffer[index++] = XHI_NOOP_PACKET;
+
+	/*
+	 * Write the data to the FIFO and intiate the transfer of data present
+	 * in the FIFO to the ICAP device.
+	 */
+	status = drvdata->config->set_configuration(drvdata,
+			&buffer[0], index);
+	if (status)
+		return status;
+
+	/*
+	 * Read the configuration register
+	 */
+	status = drvdata->config->get_configuration(drvdata, reg_data, 1);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+{
+	int status;
+	u32 idcode;
+
+	dev_dbg(drvdata->dev, "initializing\n");
+
+	/* Abort any current transaction, to make sure we have the
+	 * ICAP in a good state.
+	 */
+	dev_dbg(drvdata->dev, "Reset...\n");
+	drvdata->config->reset(drvdata);
+
+	dev_dbg(drvdata->dev, "Desync...\n");
+	status = hwicap_command_desync(drvdata);
+	if (status)
+		return status;
+
+	/* Attempt to read the IDCODE from ICAP.  This
+	 * may not be returned correctly, due to the design of the
+	 * hardware.
+	 */
+	dev_dbg(drvdata->dev, "Reading IDCODE...\n");
+	status = hwicap_get_configuration_register(
+			drvdata, drvdata->config_regs->IDCODE, &idcode);
+	dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
+	if (status)
+		return status;
+
+	dev_dbg(drvdata->dev, "Desync...\n");
+	status = hwicap_command_desync(drvdata);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static ssize_t
+hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct hwicap_drvdata *drvdata = file->private_data;
+	ssize_t bytes_to_read = 0;
+	u32 *kbuf;
+	u32 words;
+	u32 bytes_remaining;
+	int status;
+
+	status = mutex_lock_interruptible(&drvdata->sem);
+	if (status)
+		return status;
+
+	if (drvdata->read_buffer_in_use) {
+		/* If there are leftover bytes in the buffer, just */
+		/* return them and don't try to read more from the */
+		/* ICAP device. */
+		bytes_to_read =
+			(count < drvdata->read_buffer_in_use) ? count :
+			drvdata->read_buffer_in_use;
+
+		/* Return the data currently in the read buffer. */
+		if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
+			status = -EFAULT;
+			goto error;
+		}
+		drvdata->read_buffer_in_use -= bytes_to_read;
+		memmove(drvdata->read_buffer,
+		       drvdata->read_buffer + bytes_to_read,
+		       4 - bytes_to_read);
+	} else {
+		/* Get new data from the ICAP, and return was was requested. */
+		kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
+		if (!kbuf) {
+			status = -ENOMEM;
+			goto error;
+		}
+
+		/* The ICAP device is only able to read complete */
+		/* words.  If a number of bytes that do not correspond */
+		/* to complete words is requested, then we read enough */
+		/* words to get the required number of bytes, and then */
+		/* save the remaining bytes for the next read. */
+
+		/* Determine the number of words to read, rounding up */
+		/* if necessary. */
+		words = ((count + 3) >> 2);
+		bytes_to_read = words << 2;
+
+		if (bytes_to_read > PAGE_SIZE)
+			bytes_to_read = PAGE_SIZE;
+
+		/* Ensure we only read a complete number of words. */
+		bytes_remaining = bytes_to_read & 3;
+		bytes_to_read &= ~3;
+		words = bytes_to_read >> 2;
+
+		status = drvdata->config->get_configuration(drvdata,
+				kbuf, words);
+
+		/* If we didn't read correctly, then bail out. */
+		if (status) {
+			free_page((unsigned long)kbuf);
+			goto error;
+		}
+
+		/* If we fail to return the data to the user, then bail out. */
+		if (copy_to_user(buf, kbuf, bytes_to_read)) {
+			free_page((unsigned long)kbuf);
+			status = -EFAULT;
+			goto error;
+		}
+		memcpy(drvdata->read_buffer,
+		       kbuf,
+		       bytes_remaining);
+		drvdata->read_buffer_in_use = bytes_remaining;
+		free_page((unsigned long)kbuf);
+	}
+	status = bytes_to_read;
+ error:
+	mutex_unlock(&drvdata->sem);
+	return status;
+}
+
+static ssize_t
+hwicap_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct hwicap_drvdata *drvdata = file->private_data;
+	ssize_t written = 0;
+	ssize_t left = count;
+	u32 *kbuf;
+	ssize_t len;
+	ssize_t status;
+
+	status = mutex_lock_interruptible(&drvdata->sem);
+	if (status)
+		return status;
+
+	left += drvdata->write_buffer_in_use;
+
+	/* Only write multiples of 4 bytes. */
+	if (left < 4) {
+		status = 0;
+		goto error;
+	}
+
+	kbuf = (u32 *) __get_free_page(GFP_KERNEL);
+	if (!kbuf) {
+		status = -ENOMEM;
+		goto error;
+	}
+
+	while (left > 3) {
+		/* only write multiples of 4 bytes, so there might */
+		/* be as many as 3 bytes left (at the end). */
+		len = left;
+
+		if (len > PAGE_SIZE)
+			len = PAGE_SIZE;
+		len &= ~3;
+
+		if (drvdata->write_buffer_in_use) {
+			memcpy(kbuf, drvdata->write_buffer,
+					drvdata->write_buffer_in_use);
+			if (copy_from_user(
+			    (((char *)kbuf) + drvdata->write_buffer_in_use),
+			    buf + written,
+			    len - (drvdata->write_buffer_in_use))) {
+				free_page((unsigned long)kbuf);
+				status = -EFAULT;
+				goto error;
+			}
+		} else {
+			if (copy_from_user(kbuf, buf + written, len)) {
+				free_page((unsigned long)kbuf);
+				status = -EFAULT;
+				goto error;
+			}
+		}
+
+		status = drvdata->config->set_configuration(drvdata,
+				kbuf, len >> 2);
+
+		if (status) {
+			free_page((unsigned long)kbuf);
+			status = -EFAULT;
+			goto error;
+		}
+		if (drvdata->write_buffer_in_use) {
+			len -= drvdata->write_buffer_in_use;
+			left -= drvdata->write_buffer_in_use;
+			drvdata->write_buffer_in_use = 0;
+		}
+		written += len;
+		left -= len;
+	}
+	if ((left > 0) && (left < 4)) {
+		if (!copy_from_user(drvdata->write_buffer,
+						buf + written, left)) {
+			drvdata->write_buffer_in_use = left;
+			written += left;
+			left = 0;
+		}
+	}
+
+	free_page((unsigned long)kbuf);
+	status = written;
+ error:
+	mutex_unlock(&drvdata->sem);
+	return status;
+}
+
+static int hwicap_open(struct inode *inode, struct file *file)
+{
+	struct hwicap_drvdata *drvdata;
+	int status;
+
+	mutex_lock(&hwicap_mutex);
+	drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
+
+	status = mutex_lock_interruptible(&drvdata->sem);
+	if (status)
+		goto out;
+
+	if (drvdata->is_open) {
+		status = -EBUSY;
+		goto error;
+	}
+
+	status = hwicap_initialize_hwicap(drvdata);
+	if (status) {
+		dev_err(drvdata->dev, "Failed to open file");
+		goto error;
+	}
+
+	file->private_data = drvdata;
+	drvdata->write_buffer_in_use = 0;
+	drvdata->read_buffer_in_use = 0;
+	drvdata->is_open = 1;
+
+ error:
+	mutex_unlock(&drvdata->sem);
+ out:
+	mutex_unlock(&hwicap_mutex);
+	return status;
+}
+
+static int hwicap_release(struct inode *inode, struct file *file)
+{
+	struct hwicap_drvdata *drvdata = file->private_data;
+	int i;
+	int status = 0;
+
+	mutex_lock(&drvdata->sem);
+
+	if (drvdata->write_buffer_in_use) {
+		/* Flush write buffer. */
+		for (i = drvdata->write_buffer_in_use; i < 4; i++)
+			drvdata->write_buffer[i] = 0;
+
+		status = drvdata->config->set_configuration(drvdata,
+				(u32 *) drvdata->write_buffer, 1);
+		if (status)
+			goto error;
+	}
+
+	status = hwicap_command_desync(drvdata);
+	if (status)
+		goto error;
+
+ error:
+	drvdata->is_open = 0;
+	mutex_unlock(&drvdata->sem);
+	return status;
+}
+
+static const struct file_operations hwicap_fops = {
+	.owner = THIS_MODULE,
+	.write = hwicap_write,
+	.read = hwicap_read,
+	.open = hwicap_open,
+	.release = hwicap_release,
+	.llseek = noop_llseek,
+};
+
+static int hwicap_setup(struct device *dev, int id,
+		const struct resource *regs_res,
+		const struct hwicap_driver_config *config,
+		const struct config_registers *config_regs)
+{
+	dev_t devt;
+	struct hwicap_drvdata *drvdata = NULL;
+	int retval = 0;
+
+	dev_info(dev, "Xilinx icap port driver\n");
+
+	mutex_lock(&icap_sem);
+
+	if (id < 0) {
+		for (id = 0; id < HWICAP_DEVICES; id++)
+			if (!probed_devices[id])
+				break;
+	}
+	if (id < 0 || id >= HWICAP_DEVICES) {
+		mutex_unlock(&icap_sem);
+		dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
+		return -EINVAL;
+	}
+	if (probed_devices[id]) {
+		mutex_unlock(&icap_sem);
+		dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+			DRIVER_NAME, id);
+		return -EBUSY;
+	}
+
+	probed_devices[id] = 1;
+	mutex_unlock(&icap_sem);
+
+	devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
+
+	drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		retval = -ENOMEM;
+		goto failed0;
+	}
+	dev_set_drvdata(dev, (void *)drvdata);
+
+	if (!regs_res) {
+		dev_err(dev, "Couldn't get registers resource\n");
+		retval = -EFAULT;
+		goto failed1;
+	}
+
+	drvdata->mem_start = regs_res->start;
+	drvdata->mem_end = regs_res->end;
+	drvdata->mem_size = resource_size(regs_res);
+
+	if (!request_mem_region(drvdata->mem_start,
+					drvdata->mem_size, DRIVER_NAME)) {
+		dev_err(dev, "Couldn't lock memory region at %Lx\n",
+			(unsigned long long) regs_res->start);
+		retval = -EBUSY;
+		goto failed1;
+	}
+
+	drvdata->devt = devt;
+	drvdata->dev = dev;
+	drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
+	if (!drvdata->base_address) {
+		dev_err(dev, "ioremap() failed\n");
+		retval = -ENOMEM;
+		goto failed2;
+	}
+
+	drvdata->config = config;
+	drvdata->config_regs = config_regs;
+
+	mutex_init(&drvdata->sem);
+	drvdata->is_open = 0;
+
+	dev_info(dev, "ioremap %llx to %p with size %llx\n",
+		 (unsigned long long) drvdata->mem_start,
+		 drvdata->base_address,
+		 (unsigned long long) drvdata->mem_size);
+
+	cdev_init(&drvdata->cdev, &hwicap_fops);
+	drvdata->cdev.owner = THIS_MODULE;
+	retval = cdev_add(&drvdata->cdev, devt, 1);
+	if (retval) {
+		dev_err(dev, "cdev_add() failed\n");
+		goto failed3;
+	}
+
+	device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
+	return 0;		/* success */
+
+ failed3:
+	iounmap(drvdata->base_address);
+
+ failed2:
+	release_mem_region(regs_res->start, drvdata->mem_size);
+
+ failed1:
+	kfree(drvdata);
+
+ failed0:
+	mutex_lock(&icap_sem);
+	probed_devices[id] = 0;
+	mutex_unlock(&icap_sem);
+
+	return retval;
+}
+
+static struct hwicap_driver_config buffer_icap_config = {
+	.get_configuration = buffer_icap_get_configuration,
+	.set_configuration = buffer_icap_set_configuration,
+	.get_status = buffer_icap_get_status,
+	.reset = buffer_icap_reset,
+};
+
+static struct hwicap_driver_config fifo_icap_config = {
+	.get_configuration = fifo_icap_get_configuration,
+	.set_configuration = fifo_icap_set_configuration,
+	.get_status = fifo_icap_get_status,
+	.reset = fifo_icap_reset,
+};
+
+static int hwicap_remove(struct device *dev)
+{
+	struct hwicap_drvdata *drvdata;
+
+	drvdata = dev_get_drvdata(dev);
+
+	if (!drvdata)
+		return 0;
+
+	device_destroy(icap_class, drvdata->devt);
+	cdev_del(&drvdata->cdev);
+	iounmap(drvdata->base_address);
+	release_mem_region(drvdata->mem_start, drvdata->mem_size);
+	kfree(drvdata);
+
+	mutex_lock(&icap_sem);
+	probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
+	mutex_unlock(&icap_sem);
+	return 0;		/* success */
+}
+
+#ifdef CONFIG_OF
+static int hwicap_of_probe(struct platform_device *op,
+				     const struct hwicap_driver_config *config)
+{
+	struct resource res;
+	const unsigned int *id;
+	const char *family;
+	int rc;
+	const struct config_registers *regs;
+
+
+	rc = of_address_to_resource(op->dev.of_node, 0, &res);
+	if (rc) {
+		dev_err(&op->dev, "invalid address\n");
+		return rc;
+	}
+
+	id = of_get_property(op->dev.of_node, "port-number", NULL);
+
+	/* It's most likely that we're using V4, if the family is not
+	 * specified
+	 */
+	regs = &v4_config_registers;
+	family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
+
+	if (family) {
+		if (!strcmp(family, "virtex2p"))
+			regs = &v2_config_registers;
+		else if (!strcmp(family, "virtex4"))
+			regs = &v4_config_registers;
+		else if (!strcmp(family, "virtex5"))
+			regs = &v5_config_registers;
+		else if (!strcmp(family, "virtex6"))
+			regs = &v6_config_registers;
+	}
+	return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+			regs);
+}
+#else
+static inline int hwicap_of_probe(struct platform_device *op,
+				  const struct hwicap_driver_config *config)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static const struct of_device_id hwicap_of_match[];
+static int hwicap_drv_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct resource *res;
+	const struct config_registers *regs;
+	const char *family;
+
+	match = of_match_device(hwicap_of_match, &pdev->dev);
+	if (match)
+		return hwicap_of_probe(pdev, match->data);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	/* It's most likely that we're using V4, if the family is not
+	 * specified
+	 */
+	regs = &v4_config_registers;
+	family = pdev->dev.platform_data;
+
+	if (family) {
+		if (!strcmp(family, "virtex2p"))
+			regs = &v2_config_registers;
+		else if (!strcmp(family, "virtex4"))
+			regs = &v4_config_registers;
+		else if (!strcmp(family, "virtex5"))
+			regs = &v5_config_registers;
+		else if (!strcmp(family, "virtex6"))
+			regs = &v6_config_registers;
+	}
+
+	return hwicap_setup(&pdev->dev, pdev->id, res,
+			&buffer_icap_config, regs);
+}
+
+static int hwicap_drv_remove(struct platform_device *pdev)
+{
+	return hwicap_remove(&pdev->dev);
+}
+
+#ifdef CONFIG_OF
+/* Match table for device tree binding */
+static const struct of_device_id hwicap_of_match[] = {
+	{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
+	{ .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
+	{},
+};
+MODULE_DEVICE_TABLE(of, hwicap_of_match);
+#else
+#define hwicap_of_match NULL
+#endif
+
+static struct platform_driver hwicap_platform_driver = {
+	.probe = hwicap_drv_probe,
+	.remove = hwicap_drv_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = hwicap_of_match,
+	},
+};
+
+static int __init hwicap_module_init(void)
+{
+	dev_t devt;
+	int retval;
+
+	icap_class = class_create(THIS_MODULE, "xilinx_config");
+	mutex_init(&icap_sem);
+
+	devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+	retval = register_chrdev_region(devt,
+					HWICAP_DEVICES,
+					DRIVER_NAME);
+	if (retval < 0)
+		return retval;
+
+	retval = platform_driver_register(&hwicap_platform_driver);
+	if (retval)
+		goto failed;
+
+	return retval;
+
+ failed:
+	unregister_chrdev_region(devt, HWICAP_DEVICES);
+
+	return retval;
+}
+
+static void __exit hwicap_module_cleanup(void)
+{
+	dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+
+	class_destroy(icap_class);
+
+	platform_driver_unregister(&hwicap_platform_driver);
+
+	unregister_chrdev_region(devt, HWICAP_DEVICES);
+}
+
+module_init(hwicap_module_init);
+module_exit(hwicap_module_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
+MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
new file mode 100644
index 0000000..6b963d1
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -0,0 +1,224 @@
+/*****************************************************************************
+ *
+ *     Author: Xilinx, Inc.
+ *
+ *     This program is free software; you can redistribute it and/or modify it
+ *     under the terms of the GNU General Public License as published by the
+ *     Free Software Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ *     AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ *     SOLUTIONS FOR XILINX DEVICES.  BY PROVIDING THIS DESIGN, CODE,
+ *     OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ *     APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ *     THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ *     AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ *     FOR YOUR IMPLEMENTATION.  XILINX EXPRESSLY DISCLAIMS ANY
+ *     WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ *     IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ *     REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE.
+ *
+ *     (c) Copyright 2003-2007 Xilinx Inc.
+ *     All rights reserved.
+ *
+ *     You should have received a copy of the GNU General Public License along
+ *     with this program; if not, write to the Free Software Foundation, Inc.,
+ *     675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_HWICAP_H_	/* prevent circular inclusions */
+#define XILINX_HWICAP_H_	/* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <linux/io.h>
+
+struct hwicap_drvdata {
+	u32 write_buffer_in_use;  /* Always in [0,3] */
+	u8 write_buffer[4];
+	u32 read_buffer_in_use;	  /* Always in [0,3] */
+	u8 read_buffer[4];
+	resource_size_t mem_start;/* phys. address of the control registers */
+	resource_size_t mem_end;  /* phys. address of the control registers */
+	resource_size_t mem_size;
+	void __iomem *base_address;/* virt. address of the control registers */
+
+	struct device *dev;
+	struct cdev cdev;	/* Char device structure */
+	dev_t devt;
+
+	const struct hwicap_driver_config *config;
+	const struct config_registers *config_regs;
+	void *private_data;
+	bool is_open;
+	struct mutex sem;
+};
+
+struct hwicap_driver_config {
+	/* Read configuration data given by size into the data buffer.
+	 * Return 0 if successful.
+	 */
+	int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+			u32 size);
+	/* Write configuration data given by size from the data buffer.
+	 * Return 0 if successful.
+	 */
+	int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+			u32 size);
+	/* Get the status register, bit pattern given by:
+	 * D8 - 0 = configuration error
+	 * D7 - 1 = alignment found
+	 * D6 - 1 = readback in progress
+	 * D5 - 0 = abort in progress
+	 * D4 - Always 1
+	 * D3 - Always 1
+	 * D2 - Always 1
+	 * D1 - Always 1
+	 * D0 - 1 = operation completed
+	 */
+	u32 (*get_status)(struct hwicap_drvdata *drvdata);
+	/* Reset the hw */
+	void (*reset)(struct hwicap_drvdata *drvdata);
+};
+
+/* Number of times to poll the done register. This has to be large
+ * enough to allow an entire configuration to complete. If an entire
+ * page (4kb) is configured at once, that could take up to 4k cycles
+ * with a byte-wide icap interface. In most cases, this driver is
+ * used with a much smaller fifo, but this should be sufficient in the
+ * worst case.
+ */
+#define XHI_MAX_RETRIES     5000
+
+/************ Constant Definitions *************/
+
+#define XHI_PAD_FRAMES              0x1
+
+/* Mask for calculating configuration packet headers */
+#define XHI_WORD_COUNT_MASK_TYPE_1  0x7FFUL
+#define XHI_WORD_COUNT_MASK_TYPE_2  0x1FFFFFUL
+#define XHI_TYPE_MASK               0x7
+#define XHI_REGISTER_MASK           0xF
+#define XHI_OP_MASK                 0x3
+
+#define XHI_TYPE_SHIFT              29
+#define XHI_REGISTER_SHIFT          13
+#define XHI_OP_SHIFT                27
+
+#define XHI_TYPE_1                  1
+#define XHI_TYPE_2                  2
+#define XHI_OP_WRITE                2
+#define XHI_OP_READ                 1
+
+/* Address Block Types */
+#define XHI_FAR_CLB_BLOCK           0
+#define XHI_FAR_BRAM_BLOCK          1
+#define XHI_FAR_BRAM_INT_BLOCK      2
+
+struct config_registers {
+	u32 CRC;
+	u32 FAR;
+	u32 FDRI;
+	u32 FDRO;
+	u32 CMD;
+	u32 CTL;
+	u32 MASK;
+	u32 STAT;
+	u32 LOUT;
+	u32 COR;
+	u32 MFWR;
+	u32 FLR;
+	u32 KEY;
+	u32 CBC;
+	u32 IDCODE;
+	u32 AXSS;
+	u32 C0R_1;
+	u32 CSOB;
+	u32 WBSTAR;
+	u32 TIMER;
+	u32 BOOTSTS;
+	u32 CTL_1;
+};
+
+/* Configuration Commands */
+#define XHI_CMD_NULL                0
+#define XHI_CMD_WCFG                1
+#define XHI_CMD_MFW                 2
+#define XHI_CMD_DGHIGH              3
+#define XHI_CMD_RCFG                4
+#define XHI_CMD_START               5
+#define XHI_CMD_RCAP                6
+#define XHI_CMD_RCRC                7
+#define XHI_CMD_AGHIGH              8
+#define XHI_CMD_SWITCH              9
+#define XHI_CMD_GRESTORE            10
+#define XHI_CMD_SHUTDOWN            11
+#define XHI_CMD_GCAPTURE            12
+#define XHI_CMD_DESYNCH             13
+#define XHI_CMD_IPROG               15 /* Only in Virtex5 */
+#define XHI_CMD_CRCC                16 /* Only in Virtex5 */
+#define XHI_CMD_LTIMER              17 /* Only in Virtex5 */
+
+/* Packet constants */
+#define XHI_SYNC_PACKET             0xAA995566UL
+#define XHI_DUMMY_PACKET            0xFFFFFFFFUL
+#define XHI_NOOP_PACKET             (XHI_TYPE_1 << XHI_TYPE_SHIFT)
+#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+			(XHI_OP_READ << XHI_OP_SHIFT))
+
+#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+			(XHI_OP_WRITE << XHI_OP_SHIFT))
+
+#define XHI_TYPE2_CNT_MASK          0x07FFFFFF
+
+#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL
+#define XHI_TYPE_1_HEADER_BYTES     4
+#define XHI_TYPE_2_HEADER_BYTES     8
+
+/* Constant to use for CRC check when CRC has been disabled */
+#define XHI_DISABLED_AUTO_CRC       0x0000DEFCUL
+
+/* Meanings of the bits returned by get_status */
+#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */
+#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */
+#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */
+#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */
+#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask  */
+
+/**
+ * hwicap_type_1_read - Generates a Type 1 read packet header.
+ * @reg: is the address of the register to be read back.
+ *
+ * Return:
+ * Generates a Type 1 read packet header, which is used to indirectly
+ * read registers in the configuration logic.  This packet must then
+ * be sent through the icap device, and a return packet received with
+ * the information.
+ */
+static inline u32 hwicap_type_1_read(u32 reg)
+{
+	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+		(reg << XHI_REGISTER_SHIFT) |
+		(XHI_OP_READ << XHI_OP_SHIFT);
+}
+
+/**
+ * hwicap_type_1_write - Generates a Type 1 write packet header
+ * @reg: is the address of the register to be read back.
+ *
+ * Return: Type 1 write packet header
+ */
+static inline u32 hwicap_type_1_write(u32 reg)
+{
+	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+		(reg << XHI_REGISTER_SHIFT) |
+		(XHI_OP_WRITE << XHI_OP_SHIFT);
+}
+
+#endif
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
new file mode 100644
index 0000000..a1f16df
--- /dev/null
+++ b/drivers/char/xillybus/Kconfig
@@ -0,0 +1,33 @@
+#
+# Xillybus devices
+#
+
+config XILLYBUS
+	tristate "Xillybus generic FPGA interface"
+	depends on PCI || OF
+	select CRC32
+	help
+	  Xillybus is a generic interface for peripherals designed on
+	  programmable logic (FPGA). The driver probes the hardware for
+	  its capabilities, and creates device files accordingly.
+
+	  If unsure, say N.
+
+if XILLYBUS
+
+config XILLYBUS_PCIE
+	tristate "Xillybus over PCIe"
+	depends on PCI_MSI
+	help
+	  Set to M if you want Xillybus to use PCI Express for communicating
+	  with the FPGA.
+
+config XILLYBUS_OF
+	tristate "Xillybus over Device Tree"
+	depends on OF && HAS_DMA
+	help
+	  Set to M if you want Xillybus to find its resources from the
+	  Open Firmware Flattened Device Tree. If the target is an embedded
+	  system, say M.
+
+endif # if XILLYBUS
diff --git a/drivers/char/xillybus/Makefile b/drivers/char/xillybus/Makefile
new file mode 100644
index 0000000..b68b7eb
--- /dev/null
+++ b/drivers/char/xillybus/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Xillybus driver
+#
+
+obj-$(CONFIG_XILLYBUS)		+= xillybus_core.o
+obj-$(CONFIG_XILLYBUS_PCIE)	+= xillybus_pcie.o
+obj-$(CONFIG_XILLYBUS_OF)	+= xillybus_of.o
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h
new file mode 100644
index 0000000..b9a9eb6
--- /dev/null
+++ b/drivers/char/xillybus/xillybus.h
@@ -0,0 +1,160 @@
+/*
+ * linux/drivers/misc/xillybus.h
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Header file for the Xillybus FPGA/host framework.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __XILLYBUS_H
+#define __XILLYBUS_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct xilly_endpoint_hardware;
+
+struct xilly_buffer {
+	void *addr;
+	dma_addr_t dma_addr;
+	int end_offset; /* Counting elements, not bytes */
+};
+
+struct xilly_idt_handle {
+	unsigned char *chandesc;
+	unsigned char *idt;
+	int entries;
+};
+
+/*
+ * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so
+ * wr_* buffers are those consumed by read(), since the FPGA writes to them
+ * and vice versa.
+ */
+
+struct xilly_channel {
+	struct xilly_endpoint *endpoint;
+	int chan_num;
+	int log2_element_size;
+	int seekable;
+
+	struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */
+	int num_wr_buffers;
+	unsigned int wr_buf_size; /* In bytes */
+	int wr_fpga_buf_idx;
+	int wr_host_buf_idx;
+	int wr_host_buf_pos;
+	int wr_empty;
+	int wr_ready; /* Significant only when wr_empty == 1 */
+	int wr_sleepy;
+	int wr_eof;
+	int wr_hangup;
+	spinlock_t wr_spinlock;
+	struct mutex wr_mutex;
+	wait_queue_head_t wr_wait;
+	wait_queue_head_t wr_ready_wait;
+	int wr_ref_count;
+	int wr_synchronous;
+	int wr_allow_partial;
+	int wr_exclusive_open;
+	int wr_supports_nonempty;
+
+	struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */
+	int num_rd_buffers;
+	unsigned int rd_buf_size; /* In bytes */
+	int rd_fpga_buf_idx;
+	int rd_host_buf_pos;
+	int rd_host_buf_idx;
+	int rd_full;
+	spinlock_t rd_spinlock;
+	struct mutex rd_mutex;
+	wait_queue_head_t rd_wait;
+	int rd_ref_count;
+	int rd_allow_partial;
+	int rd_synchronous;
+	int rd_exclusive_open;
+	struct delayed_work rd_workitem;
+	unsigned char rd_leftovers[4];
+};
+
+struct xilly_endpoint {
+	/*
+	 * One of pdev and dev is always NULL, and the other is a valid
+	 * pointer, depending on the type of device
+	 */
+	struct pci_dev *pdev;
+	struct device *dev;
+	struct xilly_endpoint_hardware *ephw;
+
+	struct list_head ep_list;
+	int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
+	__iomem void *registers;
+	int fatal_error;
+
+	struct mutex register_mutex;
+	wait_queue_head_t ep_wait;
+
+	/* Channels and message handling */
+	struct cdev cdev;
+
+	int major;
+	int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */
+
+	int num_channels; /* EXCLUDING message buffer */
+	struct xilly_channel **channels;
+	int msg_counter;
+	int failed_messages;
+	int idtlen;
+
+	u32 *msgbuf_addr;
+	dma_addr_t msgbuf_dma_addr;
+	unsigned int msg_buf_size;
+};
+
+struct xilly_endpoint_hardware {
+	struct module *owner;
+	void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
+				    dma_addr_t,
+				    size_t,
+				    int);
+	void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
+				       dma_addr_t,
+				       size_t,
+				       int);
+	int (*map_single)(struct xilly_endpoint *,
+			  void *,
+			  size_t,
+			  int,
+			  dma_addr_t *);
+};
+
+struct xilly_mapping {
+	void *device;
+	dma_addr_t dma_addr;
+	size_t size;
+	int direction;
+};
+
+irqreturn_t xillybus_isr(int irq, void *data);
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+					      struct device *dev,
+					      struct xilly_endpoint_hardware
+					      *ephw);
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint);
+
+#endif /* __XILLYBUS_H */
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
new file mode 100644
index 0000000..a11af94
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -0,0 +1,2107 @@
+/*
+ * linux/drivers/misc/xillybus_core.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus core functions");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.07");
+MODULE_ALIAS("xillybus_core");
+MODULE_LICENSE("GPL v2");
+
+/* General timeout is 100 ms, rx timeout is 10 ms */
+#define XILLY_RX_TIMEOUT (10*HZ/1000)
+#define XILLY_TIMEOUT (100*HZ/1000)
+
+#define fpga_msg_ctrl_reg              0x0008
+#define fpga_dma_control_reg           0x0020
+#define fpga_dma_bufno_reg             0x0024
+#define fpga_dma_bufaddr_lowaddr_reg   0x0028
+#define fpga_dma_bufaddr_highaddr_reg  0x002c
+#define fpga_buf_ctrl_reg              0x0030
+#define fpga_buf_offset_reg            0x0034
+#define fpga_endian_reg                0x0040
+
+#define XILLYMSG_OPCODE_RELEASEBUF 1
+#define XILLYMSG_OPCODE_QUIESCEACK 2
+#define XILLYMSG_OPCODE_FIFOEOF 3
+#define XILLYMSG_OPCODE_FATAL_ERROR 4
+#define XILLYMSG_OPCODE_NONEMPTY 5
+
+static const char xillyname[] = "xillybus";
+
+static struct class *xillybus_class;
+
+/*
+ * ep_list_lock is the last lock to be taken; No other lock requests are
+ * allowed while holding it. It merely protects list_of_endpoints, and not
+ * the endpoints listed in it.
+ */
+
+static LIST_HEAD(list_of_endpoints);
+static struct mutex ep_list_lock;
+static struct workqueue_struct *xillybus_wq;
+
+/*
+ * Locking scheme: Mutexes protect invocations of character device methods.
+ * If both locks are taken, wr_mutex is taken first, rd_mutex second.
+ *
+ * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
+ * buffers' end_offset fields against changes made by IRQ handler (and in
+ * theory, other file request handlers, but the mutex handles that). Nothing
+ * else.
+ * They are held for short direct memory manipulations. Needless to say,
+ * no mutex locking is allowed when a spinlock is held.
+ *
+ * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
+ *
+ * register_mutex is endpoint-specific, and is held when non-atomic
+ * register operations are performed. wr_mutex and rd_mutex may be
+ * held when register_mutex is taken, but none of the spinlocks. Note that
+ * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
+ * which are unrelated to buf_offset_reg, since they are harmless.
+ *
+ * Blocking on the wait queues is allowed with mutexes held, but not with
+ * spinlocks.
+ *
+ * Only interruptible blocking is allowed on mutexes and wait queues.
+ *
+ * All in all, the locking order goes (with skips allowed, of course):
+ * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
+ */
+
+static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
+{
+	int opcode;
+	int msg_channel, msg_bufno, msg_data, msg_dir;
+
+	opcode = (buf[0] >> 24) & 0xff;
+	msg_dir = buf[0] & 1;
+	msg_channel = (buf[0] >> 1) & 0x7ff;
+	msg_bufno = (buf[0] >> 12) & 0x3ff;
+	msg_data = buf[1] & 0xfffffff;
+
+	dev_warn(endpoint->dev,
+		 "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
+		 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
+}
+
+/*
+ * xillybus_isr assumes the interrupt is allocated exclusively to it,
+ * which is the natural case MSI and several other hardware-oriented
+ * interrupts. Sharing is not allowed.
+ */
+
+irqreturn_t xillybus_isr(int irq, void *data)
+{
+	struct xilly_endpoint *ep = data;
+	u32 *buf;
+	unsigned int buf_size;
+	int i;
+	int opcode;
+	unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
+	struct xilly_channel *channel;
+
+	buf = ep->msgbuf_addr;
+	buf_size = ep->msg_buf_size/sizeof(u32);
+
+	ep->ephw->hw_sync_sgl_for_cpu(ep,
+				      ep->msgbuf_dma_addr,
+				      ep->msg_buf_size,
+				      DMA_FROM_DEVICE);
+
+	for (i = 0; i < buf_size; i += 2) {
+		if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
+			malformed_message(ep, &buf[i]);
+			dev_warn(ep->dev,
+				 "Sending a NACK on counter %x (instead of %x) on entry %d\n",
+				 ((buf[i+1] >> 28) & 0xf),
+				 ep->msg_counter,
+				 i/2);
+
+			if (++ep->failed_messages > 10) {
+				dev_err(ep->dev,
+					"Lost sync with interrupt messages. Stopping.\n");
+			} else {
+				ep->ephw->hw_sync_sgl_for_device(
+					ep,
+					ep->msgbuf_dma_addr,
+					ep->msg_buf_size,
+					DMA_FROM_DEVICE);
+
+				iowrite32(0x01,  /* Message NACK */
+					  ep->registers + fpga_msg_ctrl_reg);
+			}
+			return IRQ_HANDLED;
+		} else if (buf[i] & (1 << 22)) /* Last message */
+			break;
+	}
+
+	if (i >= buf_size) {
+		dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
+		return IRQ_HANDLED;
+	}
+
+	buf_size = i + 2;
+
+	for (i = 0; i < buf_size; i += 2) { /* Scan through messages */
+		opcode = (buf[i] >> 24) & 0xff;
+
+		msg_dir = buf[i] & 1;
+		msg_channel = (buf[i] >> 1) & 0x7ff;
+		msg_bufno = (buf[i] >> 12) & 0x3ff;
+		msg_data = buf[i+1] & 0xfffffff;
+
+		switch (opcode) {
+		case XILLYMSG_OPCODE_RELEASEBUF:
+			if ((msg_channel > ep->num_channels) ||
+			    (msg_channel == 0)) {
+				malformed_message(ep, &buf[i]);
+				break;
+			}
+
+			channel = ep->channels[msg_channel];
+
+			if (msg_dir) { /* Write channel */
+				if (msg_bufno >= channel->num_wr_buffers) {
+					malformed_message(ep, &buf[i]);
+					break;
+				}
+				spin_lock(&channel->wr_spinlock);
+				channel->wr_buffers[msg_bufno]->end_offset =
+					msg_data;
+				channel->wr_fpga_buf_idx = msg_bufno;
+				channel->wr_empty = 0;
+				channel->wr_sleepy = 0;
+				spin_unlock(&channel->wr_spinlock);
+
+				wake_up_interruptible(&channel->wr_wait);
+
+			} else {
+				/* Read channel */
+
+				if (msg_bufno >= channel->num_rd_buffers) {
+					malformed_message(ep, &buf[i]);
+					break;
+				}
+
+				spin_lock(&channel->rd_spinlock);
+				channel->rd_fpga_buf_idx = msg_bufno;
+				channel->rd_full = 0;
+				spin_unlock(&channel->rd_spinlock);
+
+				wake_up_interruptible(&channel->rd_wait);
+				if (!channel->rd_synchronous)
+					queue_delayed_work(
+						xillybus_wq,
+						&channel->rd_workitem,
+						XILLY_RX_TIMEOUT);
+			}
+
+			break;
+		case XILLYMSG_OPCODE_NONEMPTY:
+			if ((msg_channel > ep->num_channels) ||
+			    (msg_channel == 0) || (!msg_dir) ||
+			    !ep->channels[msg_channel]->wr_supports_nonempty) {
+				malformed_message(ep, &buf[i]);
+				break;
+			}
+
+			channel = ep->channels[msg_channel];
+
+			if (msg_bufno >= channel->num_wr_buffers) {
+				malformed_message(ep, &buf[i]);
+				break;
+			}
+			spin_lock(&channel->wr_spinlock);
+			if (msg_bufno == channel->wr_host_buf_idx)
+				channel->wr_ready = 1;
+			spin_unlock(&channel->wr_spinlock);
+
+			wake_up_interruptible(&channel->wr_ready_wait);
+
+			break;
+		case XILLYMSG_OPCODE_QUIESCEACK:
+			ep->idtlen = msg_data;
+			wake_up_interruptible(&ep->ep_wait);
+
+			break;
+		case XILLYMSG_OPCODE_FIFOEOF:
+			if ((msg_channel > ep->num_channels) ||
+			    (msg_channel == 0) || (!msg_dir) ||
+			    !ep->channels[msg_channel]->num_wr_buffers) {
+				malformed_message(ep, &buf[i]);
+				break;
+			}
+			channel = ep->channels[msg_channel];
+			spin_lock(&channel->wr_spinlock);
+			channel->wr_eof = msg_bufno;
+			channel->wr_sleepy = 0;
+
+			channel->wr_hangup = channel->wr_empty &&
+				(channel->wr_host_buf_idx == msg_bufno);
+
+			spin_unlock(&channel->wr_spinlock);
+
+			wake_up_interruptible(&channel->wr_wait);
+
+			break;
+		case XILLYMSG_OPCODE_FATAL_ERROR:
+			ep->fatal_error = 1;
+			wake_up_interruptible(&ep->ep_wait); /* For select() */
+			dev_err(ep->dev,
+				"FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
+			break;
+		default:
+			malformed_message(ep, &buf[i]);
+			break;
+		}
+	}
+
+	ep->ephw->hw_sync_sgl_for_device(ep,
+					 ep->msgbuf_dma_addr,
+					 ep->msg_buf_size,
+					 DMA_FROM_DEVICE);
+
+	ep->msg_counter = (ep->msg_counter + 1) & 0xf;
+	ep->failed_messages = 0;
+	iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(xillybus_isr);
+
+/*
+ * A few trivial memory management functions.
+ * NOTE: These functions are used only on probe and remove, and therefore
+ * no locks are applied!
+ */
+
+static void xillybus_autoflush(struct work_struct *work);
+
+struct xilly_alloc_state {
+	void *salami;
+	int left_of_salami;
+	int nbuffer;
+	enum dma_data_direction direction;
+	u32 regdirection;
+};
+
+static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
+				 struct xilly_alloc_state *s,
+				 struct xilly_buffer **buffers,
+				 int bufnum, int bytebufsize)
+{
+	int i, rc;
+	dma_addr_t dma_addr;
+	struct device *dev = ep->dev;
+	struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
+
+	if (buffers) { /* Not the message buffer */
+		this_buffer = devm_kcalloc(dev, bufnum,
+					   sizeof(struct xilly_buffer),
+					   GFP_KERNEL);
+		if (!this_buffer)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < bufnum; i++) {
+		/*
+		 * Buffers are expected in descending size order, so there
+		 * is either enough space for this buffer or none at all.
+		 */
+
+		if ((s->left_of_salami < bytebufsize) &&
+		    (s->left_of_salami > 0)) {
+			dev_err(ep->dev,
+				"Corrupt buffer allocation in IDT. Aborting.\n");
+			return -ENODEV;
+		}
+
+		if (s->left_of_salami == 0) {
+			int allocorder, allocsize;
+
+			allocsize = PAGE_SIZE;
+			allocorder = 0;
+			while (bytebufsize > allocsize) {
+				allocsize *= 2;
+				allocorder++;
+			}
+
+			s->salami = (void *) devm_get_free_pages(
+				dev,
+				GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
+				allocorder);
+			if (!s->salami)
+				return -ENOMEM;
+
+			s->left_of_salami = allocsize;
+		}
+
+		rc = ep->ephw->map_single(ep, s->salami,
+					  bytebufsize, s->direction,
+					  &dma_addr);
+		if (rc)
+			return rc;
+
+		iowrite32((u32) (dma_addr & 0xffffffff),
+			  ep->registers + fpga_dma_bufaddr_lowaddr_reg);
+		iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
+			  ep->registers + fpga_dma_bufaddr_highaddr_reg);
+
+		if (buffers) { /* Not the message buffer */
+			this_buffer->addr = s->salami;
+			this_buffer->dma_addr = dma_addr;
+			buffers[i] = this_buffer++;
+
+			iowrite32(s->regdirection | s->nbuffer++,
+				  ep->registers + fpga_dma_bufno_reg);
+		} else {
+			ep->msgbuf_addr = s->salami;
+			ep->msgbuf_dma_addr = dma_addr;
+			ep->msg_buf_size = bytebufsize;
+
+			iowrite32(s->regdirection,
+				  ep->registers + fpga_dma_bufno_reg);
+		}
+
+		s->left_of_salami -= bytebufsize;
+		s->salami += bytebufsize;
+	}
+	return 0;
+}
+
+static int xilly_setupchannels(struct xilly_endpoint *ep,
+			       unsigned char *chandesc,
+			       int entries)
+{
+	struct device *dev = ep->dev;
+	int i, entry, rc;
+	struct xilly_channel *channel;
+	int channelnum, bufnum, bufsize, format, is_writebuf;
+	int bytebufsize;
+	int synchronous, allowpartial, exclusive_open, seekable;
+	int supports_nonempty;
+	int msg_buf_done = 0;
+
+	struct xilly_alloc_state rd_alloc = {
+		.salami = NULL,
+		.left_of_salami = 0,
+		.nbuffer = 1,
+		.direction = DMA_TO_DEVICE,
+		.regdirection = 0,
+	};
+
+	struct xilly_alloc_state wr_alloc = {
+		.salami = NULL,
+		.left_of_salami = 0,
+		.nbuffer = 1,
+		.direction = DMA_FROM_DEVICE,
+		.regdirection = 0x80000000,
+	};
+
+	channel = devm_kcalloc(dev, ep->num_channels,
+			       sizeof(struct xilly_channel), GFP_KERNEL);
+	if (!channel)
+		return -ENOMEM;
+
+	ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
+				    sizeof(struct xilly_channel *),
+				    GFP_KERNEL);
+	if (!ep->channels)
+		return -ENOMEM;
+
+	ep->channels[0] = NULL; /* Channel 0 is message buf. */
+
+	/* Initialize all channels with defaults */
+
+	for (i = 1; i <= ep->num_channels; i++) {
+		channel->wr_buffers = NULL;
+		channel->rd_buffers = NULL;
+		channel->num_wr_buffers = 0;
+		channel->num_rd_buffers = 0;
+		channel->wr_fpga_buf_idx = -1;
+		channel->wr_host_buf_idx = 0;
+		channel->wr_host_buf_pos = 0;
+		channel->wr_empty = 1;
+		channel->wr_ready = 0;
+		channel->wr_sleepy = 1;
+		channel->rd_fpga_buf_idx = 0;
+		channel->rd_host_buf_idx = 0;
+		channel->rd_host_buf_pos = 0;
+		channel->rd_full = 0;
+		channel->wr_ref_count = 0;
+		channel->rd_ref_count = 0;
+
+		spin_lock_init(&channel->wr_spinlock);
+		spin_lock_init(&channel->rd_spinlock);
+		mutex_init(&channel->wr_mutex);
+		mutex_init(&channel->rd_mutex);
+		init_waitqueue_head(&channel->rd_wait);
+		init_waitqueue_head(&channel->wr_wait);
+		init_waitqueue_head(&channel->wr_ready_wait);
+
+		INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
+
+		channel->endpoint = ep;
+		channel->chan_num = i;
+
+		channel->log2_element_size = 0;
+
+		ep->channels[i] = channel++;
+	}
+
+	for (entry = 0; entry < entries; entry++, chandesc += 4) {
+		struct xilly_buffer **buffers = NULL;
+
+		is_writebuf = chandesc[0] & 0x01;
+		channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
+		format = (chandesc[1] >> 4) & 0x03;
+		allowpartial = (chandesc[1] >> 6) & 0x01;
+		synchronous = (chandesc[1] >> 7) & 0x01;
+		bufsize = 1 << (chandesc[2] & 0x1f);
+		bufnum = 1 << (chandesc[3] & 0x0f);
+		exclusive_open = (chandesc[2] >> 7) & 0x01;
+		seekable = (chandesc[2] >> 6) & 0x01;
+		supports_nonempty = (chandesc[2] >> 5) & 0x01;
+
+		if ((channelnum > ep->num_channels) ||
+		    ((channelnum == 0) && !is_writebuf)) {
+			dev_err(ep->dev,
+				"IDT requests channel out of range. Aborting.\n");
+			return -ENODEV;
+		}
+
+		channel = ep->channels[channelnum]; /* NULL for msg channel */
+
+		if (!is_writebuf || channelnum > 0) {
+			channel->log2_element_size = ((format > 2) ?
+						      2 : format);
+
+			bytebufsize = bufsize *
+				(1 << channel->log2_element_size);
+
+			buffers = devm_kcalloc(dev, bufnum,
+					       sizeof(struct xilly_buffer *),
+					       GFP_KERNEL);
+			if (!buffers)
+				return -ENOMEM;
+		} else {
+			bytebufsize = bufsize << 2;
+		}
+
+		if (!is_writebuf) {
+			channel->num_rd_buffers = bufnum;
+			channel->rd_buf_size = bytebufsize;
+			channel->rd_allow_partial = allowpartial;
+			channel->rd_synchronous = synchronous;
+			channel->rd_exclusive_open = exclusive_open;
+			channel->seekable = seekable;
+
+			channel->rd_buffers = buffers;
+			rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
+						   bufnum, bytebufsize);
+		} else if (channelnum > 0) {
+			channel->num_wr_buffers = bufnum;
+			channel->wr_buf_size = bytebufsize;
+
+			channel->seekable = seekable;
+			channel->wr_supports_nonempty = supports_nonempty;
+
+			channel->wr_allow_partial = allowpartial;
+			channel->wr_synchronous = synchronous;
+			channel->wr_exclusive_open = exclusive_open;
+
+			channel->wr_buffers = buffers;
+			rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
+						   bufnum, bytebufsize);
+		} else {
+			rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
+						   bufnum, bytebufsize);
+			msg_buf_done++;
+		}
+
+		if (rc)
+			return -ENOMEM;
+	}
+
+	if (!msg_buf_done) {
+		dev_err(ep->dev,
+			"Corrupt IDT: No message buffer. Aborting.\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int xilly_scan_idt(struct xilly_endpoint *endpoint,
+			  struct xilly_idt_handle *idt_handle)
+{
+	int count = 0;
+	unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
+	unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
+	unsigned char *scan;
+	int len;
+
+	scan = idt;
+	idt_handle->idt = idt;
+
+	scan++; /* Skip version number */
+
+	while ((scan <= end_of_idt) && *scan) {
+		while ((scan <= end_of_idt) && *scan++)
+			/* Do nothing, just scan thru string */;
+		count++;
+	}
+
+	scan++;
+
+	if (scan > end_of_idt) {
+		dev_err(endpoint->dev,
+			"IDT device name list overflow. Aborting.\n");
+		return -ENODEV;
+	}
+	idt_handle->chandesc = scan;
+
+	len = endpoint->idtlen - (3 + ((int) (scan - idt)));
+
+	if (len & 0x03) {
+		dev_err(endpoint->dev,
+			"Corrupt IDT device name list. Aborting.\n");
+		return -ENODEV;
+	}
+
+	idt_handle->entries = len >> 2;
+	endpoint->num_channels = count;
+
+	return 0;
+}
+
+static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
+{
+	struct xilly_channel *channel;
+	unsigned char *version;
+	long t;
+
+	channel = endpoint->channels[1]; /* This should be generated ad-hoc */
+
+	channel->wr_sleepy = 1;
+
+	iowrite32(1 |
+		  (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
+		  endpoint->registers + fpga_buf_ctrl_reg);
+
+	t = wait_event_interruptible_timeout(channel->wr_wait,
+					     (!channel->wr_sleepy),
+					     XILLY_TIMEOUT);
+
+	if (t <= 0) {
+		dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
+
+		if (endpoint->fatal_error)
+			return -EIO;
+
+		return -ENODEV;
+	}
+
+	endpoint->ephw->hw_sync_sgl_for_cpu(
+		channel->endpoint,
+		channel->wr_buffers[0]->dma_addr,
+		channel->wr_buf_size,
+		DMA_FROM_DEVICE);
+
+	if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
+		dev_err(endpoint->dev,
+			"IDT length mismatch (%d != %d). Aborting.\n",
+			channel->wr_buffers[0]->end_offset, endpoint->idtlen);
+		return -ENODEV;
+	}
+
+	if (crc32_le(~0, channel->wr_buffers[0]->addr,
+		     endpoint->idtlen+1) != 0) {
+		dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
+		return -ENODEV;
+	}
+
+	version = channel->wr_buffers[0]->addr;
+
+	/* Check version number. Reject anything above 0x82. */
+	if (*version > 0x82) {
+		dev_err(endpoint->dev,
+			"No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
+			*version);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
+			     size_t count, loff_t *f_pos)
+{
+	ssize_t rc;
+	unsigned long flags;
+	int bytes_done = 0;
+	int no_time_left = 0;
+	long deadline, left_to_sleep;
+	struct xilly_channel *channel = filp->private_data;
+
+	int empty, reached_eof, exhausted, ready;
+	/* Initializations are there only to silence warnings */
+
+	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+	int waiting_bufidx;
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+	rc = mutex_lock_interruptible(&channel->wr_mutex);
+	if (rc)
+		return rc;
+
+	while (1) { /* Note that we may drop mutex within this loop */
+		int bytes_to_do = count - bytes_done;
+
+		spin_lock_irqsave(&channel->wr_spinlock, flags);
+
+		empty = channel->wr_empty;
+		ready = !empty || channel->wr_ready;
+
+		if (!empty) {
+			bufidx = channel->wr_host_buf_idx;
+			bufpos = channel->wr_host_buf_pos;
+			howmany = ((channel->wr_buffers[bufidx]->end_offset
+				    + 1) << channel->log2_element_size)
+				- bufpos;
+
+			/* Update wr_host_* to its post-operation state */
+			if (howmany > bytes_to_do) {
+				bufferdone = 0;
+
+				howmany = bytes_to_do;
+				channel->wr_host_buf_pos += howmany;
+			} else {
+				bufferdone = 1;
+
+				channel->wr_host_buf_pos = 0;
+
+				if (bufidx == channel->wr_fpga_buf_idx) {
+					channel->wr_empty = 1;
+					channel->wr_sleepy = 1;
+					channel->wr_ready = 0;
+				}
+
+				if (bufidx >= (channel->num_wr_buffers - 1))
+					channel->wr_host_buf_idx = 0;
+				else
+					channel->wr_host_buf_idx++;
+			}
+		}
+
+		/*
+		 * Marking our situation after the possible changes above,
+		 * for use after releasing the spinlock.
+		 *
+		 * empty = empty before change
+		 * exhasted = empty after possible change
+		 */
+
+		reached_eof = channel->wr_empty &&
+			(channel->wr_host_buf_idx == channel->wr_eof);
+		channel->wr_hangup = reached_eof;
+		exhausted = channel->wr_empty;
+		waiting_bufidx = channel->wr_host_buf_idx;
+
+		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+		if (!empty) { /* Go on, now without the spinlock */
+
+			if (bufpos == 0) /* Position zero means it's virgin */
+				channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+					channel->endpoint,
+					channel->wr_buffers[bufidx]->dma_addr,
+					channel->wr_buf_size,
+					DMA_FROM_DEVICE);
+
+			if (copy_to_user(
+				    userbuf,
+				    channel->wr_buffers[bufidx]->addr
+				    + bufpos, howmany))
+				rc = -EFAULT;
+
+			userbuf += howmany;
+			bytes_done += howmany;
+
+			if (bufferdone) {
+				channel->endpoint->ephw->hw_sync_sgl_for_device(
+					channel->endpoint,
+					channel->wr_buffers[bufidx]->dma_addr,
+					channel->wr_buf_size,
+					DMA_FROM_DEVICE);
+
+				/*
+				 * Tell FPGA the buffer is done with. It's an
+				 * atomic operation to the FPGA, so what
+				 * happens with other channels doesn't matter,
+				 * and the certain channel is protected with
+				 * the channel-specific mutex.
+				 */
+
+				iowrite32(1 | (channel->chan_num << 1) |
+					  (bufidx << 12),
+					  channel->endpoint->registers +
+					  fpga_buf_ctrl_reg);
+			}
+
+			if (rc) {
+				mutex_unlock(&channel->wr_mutex);
+				return rc;
+			}
+		}
+
+		/* This includes a zero-count return = EOF */
+		if ((bytes_done >= count) || reached_eof)
+			break;
+
+		if (!exhausted)
+			continue; /* More in RAM buffer(s)? Just go on. */
+
+		if ((bytes_done > 0) &&
+		    (no_time_left ||
+		     (channel->wr_synchronous && channel->wr_allow_partial)))
+			break;
+
+		/*
+		 * Nonblocking read: The "ready" flag tells us that the FPGA
+		 * has data to send. In non-blocking mode, if it isn't on,
+		 * just return. But if there is, we jump directly to the point
+		 * where we ask for the FPGA to send all it has, and wait
+		 * until that data arrives. So in a sense, we *do* block in
+		 * nonblocking mode, but only for a very short time.
+		 */
+
+		if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
+			if (bytes_done > 0)
+				break;
+
+			if (ready)
+				goto desperate;
+
+			rc = -EAGAIN;
+			break;
+		}
+
+		if (!no_time_left || (bytes_done > 0)) {
+			/*
+			 * Note that in case of an element-misaligned read
+			 * request, offsetlimit will include the last element,
+			 * which will be partially read from.
+			 */
+			int offsetlimit = ((count - bytes_done) - 1) >>
+				channel->log2_element_size;
+			int buf_elements = channel->wr_buf_size >>
+				channel->log2_element_size;
+
+			/*
+			 * In synchronous mode, always send an offset limit.
+			 * Just don't send a value too big.
+			 */
+
+			if (channel->wr_synchronous) {
+				/* Don't request more than one buffer */
+				if (channel->wr_allow_partial &&
+				    (offsetlimit >= buf_elements))
+					offsetlimit = buf_elements - 1;
+
+				/* Don't request more than all buffers */
+				if (!channel->wr_allow_partial &&
+				    (offsetlimit >=
+				     (buf_elements * channel->num_wr_buffers)))
+					offsetlimit = buf_elements *
+						channel->num_wr_buffers - 1;
+			}
+
+			/*
+			 * In asynchronous mode, force early flush of a buffer
+			 * only if that will allow returning a full count. The
+			 * "offsetlimit < ( ... )" rather than "<=" excludes
+			 * requesting a full buffer, which would obviously
+			 * cause a buffer transmission anyhow
+			 */
+
+			if (channel->wr_synchronous ||
+			    (offsetlimit < (buf_elements - 1))) {
+				mutex_lock(&channel->endpoint->register_mutex);
+
+				iowrite32(offsetlimit,
+					  channel->endpoint->registers +
+					  fpga_buf_offset_reg);
+
+				iowrite32(1 | (channel->chan_num << 1) |
+					  (2 << 24) |  /* 2 = offset limit */
+					  (waiting_bufidx << 12),
+					  channel->endpoint->registers +
+					  fpga_buf_ctrl_reg);
+
+				mutex_unlock(&channel->endpoint->
+					     register_mutex);
+			}
+		}
+
+		/*
+		 * If partial completion is disallowed, there is no point in
+		 * timeout sleeping. Neither if no_time_left is set and
+		 * there's no data.
+		 */
+
+		if (!channel->wr_allow_partial ||
+		    (no_time_left && (bytes_done == 0))) {
+			/*
+			 * This do-loop will run more than once if another
+			 * thread reasserted wr_sleepy before we got the mutex
+			 * back, so we try again.
+			 */
+
+			do {
+				mutex_unlock(&channel->wr_mutex);
+
+				if (wait_event_interruptible(
+					    channel->wr_wait,
+					    (!channel->wr_sleepy)))
+					goto interrupted;
+
+				if (mutex_lock_interruptible(
+					    &channel->wr_mutex))
+					goto interrupted;
+			} while (channel->wr_sleepy);
+
+			continue;
+
+interrupted: /* Mutex is not held if got here */
+			if (channel->endpoint->fatal_error)
+				return -EIO;
+			if (bytes_done)
+				return bytes_done;
+			if (filp->f_flags & O_NONBLOCK)
+				return -EAGAIN; /* Don't admit snoozing */
+			return -EINTR;
+		}
+
+		left_to_sleep = deadline - ((long) jiffies);
+
+		/*
+		 * If our time is out, skip the waiting. We may miss wr_sleepy
+		 * being deasserted but hey, almost missing the train is like
+		 * missing it.
+		 */
+
+		if (left_to_sleep > 0) {
+			left_to_sleep =
+				wait_event_interruptible_timeout(
+					channel->wr_wait,
+					(!channel->wr_sleepy),
+					left_to_sleep);
+
+			if (left_to_sleep > 0) /* wr_sleepy deasserted */
+				continue;
+
+			if (left_to_sleep < 0) { /* Interrupt */
+				mutex_unlock(&channel->wr_mutex);
+				if (channel->endpoint->fatal_error)
+					return -EIO;
+				if (bytes_done)
+					return bytes_done;
+				return -EINTR;
+			}
+		}
+
+desperate:
+		no_time_left = 1; /* We're out of sleeping time. Desperate! */
+
+		if (bytes_done == 0) {
+			/*
+			 * Reaching here means that we allow partial return,
+			 * that we've run out of time, and that we have
+			 * nothing to return.
+			 * So tell the FPGA to send anything it has or gets.
+			 */
+
+			iowrite32(1 | (channel->chan_num << 1) |
+				  (3 << 24) |  /* Opcode 3, flush it all! */
+				  (waiting_bufidx << 12),
+				  channel->endpoint->registers +
+				  fpga_buf_ctrl_reg);
+		}
+
+		/*
+		 * Reaching here means that we *do* have data in the buffer,
+		 * but the "partial" flag disallows returning less than
+		 * required. And we don't have as much. So loop again,
+		 * which is likely to end up blocking indefinitely until
+		 * enough data has arrived.
+		 */
+	}
+
+	mutex_unlock(&channel->wr_mutex);
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	if (rc)
+		return rc;
+
+	return bytes_done;
+}
+
+/*
+ * The timeout argument takes values as follows:
+ *  >0 : Flush with timeout
+ * ==0 : Flush, and wait idefinitely for the flush to complete
+ *  <0 : Autoflush: Flush only if there's a single buffer occupied
+ */
+
+static int xillybus_myflush(struct xilly_channel *channel, long timeout)
+{
+	int rc;
+	unsigned long flags;
+
+	int end_offset_plus1;
+	int bufidx, bufidx_minus1;
+	int i;
+	int empty;
+	int new_rd_host_buf_pos;
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+	rc = mutex_lock_interruptible(&channel->rd_mutex);
+	if (rc)
+		return rc;
+
+	/*
+	 * Don't flush a closed channel. This can happen when the work queued
+	 * autoflush thread fires off after the file has closed. This is not
+	 * an error, just something to dismiss.
+	 */
+
+	if (!channel->rd_ref_count)
+		goto done;
+
+	bufidx = channel->rd_host_buf_idx;
+
+	bufidx_minus1 = (bufidx == 0) ?
+		channel->num_rd_buffers - 1 :
+		bufidx - 1;
+
+	end_offset_plus1 = channel->rd_host_buf_pos >>
+		channel->log2_element_size;
+
+	new_rd_host_buf_pos = channel->rd_host_buf_pos -
+		(end_offset_plus1 << channel->log2_element_size);
+
+	/* Submit the current buffer if it's nonempty */
+	if (end_offset_plus1) {
+		unsigned char *tail = channel->rd_buffers[bufidx]->addr +
+			(end_offset_plus1 << channel->log2_element_size);
+
+		/* Copy  unflushed data, so we can put it in next buffer */
+		for (i = 0; i < new_rd_host_buf_pos; i++)
+			channel->rd_leftovers[i] = *tail++;
+
+		spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+		/* Autoflush only if a single buffer is occupied */
+
+		if ((timeout < 0) &&
+		    (channel->rd_full ||
+		     (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
+			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+			/*
+			 * A new work item may be queued by the ISR exactly
+			 * now, since the execution of a work item allows the
+			 * queuing of a new one while it's running.
+			 */
+			goto done;
+		}
+
+		/* The 4th element is never needed for data, so it's a flag */
+		channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
+
+		/* Set up rd_full to reflect a certain moment's state */
+
+		if (bufidx == channel->rd_fpga_buf_idx)
+			channel->rd_full = 1;
+		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+		if (bufidx >= (channel->num_rd_buffers - 1))
+			channel->rd_host_buf_idx = 0;
+		else
+			channel->rd_host_buf_idx++;
+
+		channel->endpoint->ephw->hw_sync_sgl_for_device(
+			channel->endpoint,
+			channel->rd_buffers[bufidx]->dma_addr,
+			channel->rd_buf_size,
+			DMA_TO_DEVICE);
+
+		mutex_lock(&channel->endpoint->register_mutex);
+
+		iowrite32(end_offset_plus1 - 1,
+			  channel->endpoint->registers + fpga_buf_offset_reg);
+
+		iowrite32((channel->chan_num << 1) | /* Channel ID */
+			  (2 << 24) |  /* Opcode 2, submit buffer */
+			  (bufidx << 12),
+			  channel->endpoint->registers + fpga_buf_ctrl_reg);
+
+		mutex_unlock(&channel->endpoint->register_mutex);
+	} else if (bufidx == 0) {
+		bufidx = channel->num_rd_buffers - 1;
+	} else {
+		bufidx--;
+	}
+
+	channel->rd_host_buf_pos = new_rd_host_buf_pos;
+
+	if (timeout < 0)
+		goto done; /* Autoflush */
+
+	/*
+	 * bufidx is now the last buffer written to (or equal to
+	 * rd_fpga_buf_idx if buffer was never written to), and
+	 * channel->rd_host_buf_idx the one after it.
+	 *
+	 * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
+	 */
+
+	while (1) { /* Loop waiting for draining of buffers */
+		spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+		if (bufidx != channel->rd_fpga_buf_idx)
+			channel->rd_full = 1; /*
+					       * Not really full,
+					       * but needs waiting.
+					       */
+
+		empty = !channel->rd_full;
+
+		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+		if (empty)
+			break;
+
+		/*
+		 * Indefinite sleep with mutex taken. With data waiting for
+		 * flushing user should not be surprised if open() for write
+		 * sleeps.
+		 */
+		if (timeout == 0)
+			wait_event_interruptible(channel->rd_wait,
+						 (!channel->rd_full));
+
+		else if (wait_event_interruptible_timeout(
+				 channel->rd_wait,
+				 (!channel->rd_full),
+				 timeout) == 0) {
+			dev_warn(channel->endpoint->dev,
+				 "Timed out while flushing. Output data may be lost.\n");
+
+			rc = -ETIMEDOUT;
+			break;
+		}
+
+		if (channel->rd_full) {
+			rc = -EINTR;
+			break;
+		}
+	}
+
+done:
+	mutex_unlock(&channel->rd_mutex);
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	return rc;
+}
+
+static int xillybus_flush(struct file *filp, fl_owner_t id)
+{
+	if (!(filp->f_mode & FMODE_WRITE))
+		return 0;
+
+	return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
+}
+
+static void xillybus_autoflush(struct work_struct *work)
+{
+	struct delayed_work *workitem = container_of(
+		work, struct delayed_work, work);
+	struct xilly_channel *channel = container_of(
+		workitem, struct xilly_channel, rd_workitem);
+	int rc;
+
+	rc = xillybus_myflush(channel, -1);
+	if (rc == -EINTR)
+		dev_warn(channel->endpoint->dev,
+			 "Autoflush failed because work queue thread got a signal.\n");
+	else if (rc)
+		dev_err(channel->endpoint->dev,
+			"Autoflush failed under weird circumstances.\n");
+}
+
+static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
+			      size_t count, loff_t *f_pos)
+{
+	ssize_t rc;
+	unsigned long flags;
+	int bytes_done = 0;
+	struct xilly_channel *channel = filp->private_data;
+
+	int full, exhausted;
+	/* Initializations are there only to silence warnings */
+
+	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+	int end_offset_plus1 = 0;
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	rc = mutex_lock_interruptible(&channel->rd_mutex);
+	if (rc)
+		return rc;
+
+	while (1) {
+		int bytes_to_do = count - bytes_done;
+
+		spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+		full = channel->rd_full;
+
+		if (!full) {
+			bufidx = channel->rd_host_buf_idx;
+			bufpos = channel->rd_host_buf_pos;
+			howmany = channel->rd_buf_size - bufpos;
+
+			/*
+			 * Update rd_host_* to its state after this operation.
+			 * count=0 means committing the buffer immediately,
+			 * which is like flushing, but not necessarily block.
+			 */
+
+			if ((howmany > bytes_to_do) &&
+			    (count ||
+			     ((bufpos >> channel->log2_element_size) == 0))) {
+				bufferdone = 0;
+
+				howmany = bytes_to_do;
+				channel->rd_host_buf_pos += howmany;
+			} else {
+				bufferdone = 1;
+
+				if (count) {
+					end_offset_plus1 =
+						channel->rd_buf_size >>
+						channel->log2_element_size;
+					channel->rd_host_buf_pos = 0;
+				} else {
+					unsigned char *tail;
+					int i;
+
+					howmany = 0;
+
+					end_offset_plus1 = bufpos >>
+						channel->log2_element_size;
+
+					channel->rd_host_buf_pos -=
+						end_offset_plus1 <<
+						channel->log2_element_size;
+
+					tail = channel->
+						rd_buffers[bufidx]->addr +
+						(end_offset_plus1 <<
+						 channel->log2_element_size);
+
+					for (i = 0;
+					     i < channel->rd_host_buf_pos;
+					     i++)
+						channel->rd_leftovers[i] =
+							*tail++;
+				}
+
+				if (bufidx == channel->rd_fpga_buf_idx)
+					channel->rd_full = 1;
+
+				if (bufidx >= (channel->num_rd_buffers - 1))
+					channel->rd_host_buf_idx = 0;
+				else
+					channel->rd_host_buf_idx++;
+			}
+		}
+
+		/*
+		 * Marking our situation after the possible changes above,
+		 * for use  after releasing the spinlock.
+		 *
+		 * full = full before change
+		 * exhasted = full after possible change
+		 */
+
+		exhausted = channel->rd_full;
+
+		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+		if (!full) { /* Go on, now without the spinlock */
+			unsigned char *head =
+				channel->rd_buffers[bufidx]->addr;
+			int i;
+
+			if ((bufpos == 0) || /* Zero means it's virgin */
+			    (channel->rd_leftovers[3] != 0)) {
+				channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+					channel->endpoint,
+					channel->rd_buffers[bufidx]->dma_addr,
+					channel->rd_buf_size,
+					DMA_TO_DEVICE);
+
+				/* Virgin, but leftovers are due */
+				for (i = 0; i < bufpos; i++)
+					*head++ = channel->rd_leftovers[i];
+
+				channel->rd_leftovers[3] = 0; /* Clear flag */
+			}
+
+			if (copy_from_user(
+				    channel->rd_buffers[bufidx]->addr + bufpos,
+				    userbuf, howmany))
+				rc = -EFAULT;
+
+			userbuf += howmany;
+			bytes_done += howmany;
+
+			if (bufferdone) {
+				channel->endpoint->ephw->hw_sync_sgl_for_device(
+					channel->endpoint,
+					channel->rd_buffers[bufidx]->dma_addr,
+					channel->rd_buf_size,
+					DMA_TO_DEVICE);
+
+				mutex_lock(&channel->endpoint->register_mutex);
+
+				iowrite32(end_offset_plus1 - 1,
+					  channel->endpoint->registers +
+					  fpga_buf_offset_reg);
+
+				iowrite32((channel->chan_num << 1) |
+					  (2 << 24) |  /* 2 = submit buffer */
+					  (bufidx << 12),
+					  channel->endpoint->registers +
+					  fpga_buf_ctrl_reg);
+
+				mutex_unlock(&channel->endpoint->
+					     register_mutex);
+
+				channel->rd_leftovers[3] =
+					(channel->rd_host_buf_pos != 0);
+			}
+
+			if (rc) {
+				mutex_unlock(&channel->rd_mutex);
+
+				if (channel->endpoint->fatal_error)
+					return -EIO;
+
+				if (!channel->rd_synchronous)
+					queue_delayed_work(
+						xillybus_wq,
+						&channel->rd_workitem,
+						XILLY_RX_TIMEOUT);
+
+				return rc;
+			}
+		}
+
+		if (bytes_done >= count)
+			break;
+
+		if (!exhausted)
+			continue; /* If there's more space, just go on */
+
+		if ((bytes_done > 0) && channel->rd_allow_partial)
+			break;
+
+		/*
+		 * Indefinite sleep with mutex taken. With data waiting for
+		 * flushing, user should not be surprised if open() for write
+		 * sleeps.
+		 */
+
+		if (filp->f_flags & O_NONBLOCK) {
+			rc = -EAGAIN;
+			break;
+		}
+
+		if (wait_event_interruptible(channel->rd_wait,
+					     (!channel->rd_full))) {
+			mutex_unlock(&channel->rd_mutex);
+
+			if (channel->endpoint->fatal_error)
+				return -EIO;
+
+			if (bytes_done)
+				return bytes_done;
+			return -EINTR;
+		}
+	}
+
+	mutex_unlock(&channel->rd_mutex);
+
+	if (!channel->rd_synchronous)
+		queue_delayed_work(xillybus_wq,
+				   &channel->rd_workitem,
+				   XILLY_RX_TIMEOUT);
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	if (rc)
+		return rc;
+
+	if ((channel->rd_synchronous) && (bytes_done > 0)) {
+		rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
+
+		if (rc && (rc != -EINTR))
+			return rc;
+	}
+
+	return bytes_done;
+}
+
+static int xillybus_open(struct inode *inode, struct file *filp)
+{
+	int rc = 0;
+	unsigned long flags;
+	int minor = iminor(inode);
+	int major = imajor(inode);
+	struct xilly_endpoint *ep_iter, *endpoint = NULL;
+	struct xilly_channel *channel;
+
+	mutex_lock(&ep_list_lock);
+
+	list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
+		if ((ep_iter->major == major) &&
+		    (minor >= ep_iter->lowest_minor) &&
+		    (minor < (ep_iter->lowest_minor +
+			      ep_iter->num_channels))) {
+			endpoint = ep_iter;
+			break;
+		}
+	}
+	mutex_unlock(&ep_list_lock);
+
+	if (!endpoint) {
+		pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
+		       major, minor);
+		return -ENODEV;
+	}
+
+	if (endpoint->fatal_error)
+		return -EIO;
+
+	channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
+	filp->private_data = channel;
+
+	/*
+	 * It gets complicated because:
+	 * 1. We don't want to take a mutex we don't have to
+	 * 2. We don't want to open one direction if the other will fail.
+	 */
+
+	if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
+		return -ENODEV;
+
+	if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
+		return -ENODEV;
+
+	if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
+	    (channel->wr_synchronous || !channel->wr_allow_partial ||
+	     !channel->wr_supports_nonempty)) {
+		dev_err(endpoint->dev,
+			"open() failed: O_NONBLOCK not allowed for read on this device\n");
+		return -ENODEV;
+	}
+
+	if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
+	    (channel->rd_synchronous || !channel->rd_allow_partial)) {
+		dev_err(endpoint->dev,
+			"open() failed: O_NONBLOCK not allowed for write on this device\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Note: open() may block on getting mutexes despite O_NONBLOCK.
+	 * This shouldn't occur normally, since multiple open of the same
+	 * file descriptor is almost always prohibited anyhow
+	 * (*_exclusive_open is normally set in real-life systems).
+	 */
+
+	if (filp->f_mode & FMODE_READ) {
+		rc = mutex_lock_interruptible(&channel->wr_mutex);
+		if (rc)
+			return rc;
+	}
+
+	if (filp->f_mode & FMODE_WRITE) {
+		rc = mutex_lock_interruptible(&channel->rd_mutex);
+		if (rc)
+			goto unlock_wr;
+	}
+
+	if ((filp->f_mode & FMODE_READ) &&
+	    (channel->wr_ref_count != 0) &&
+	    (channel->wr_exclusive_open)) {
+		rc = -EBUSY;
+		goto unlock;
+	}
+
+	if ((filp->f_mode & FMODE_WRITE) &&
+	    (channel->rd_ref_count != 0) &&
+	    (channel->rd_exclusive_open)) {
+		rc = -EBUSY;
+		goto unlock;
+	}
+
+	if (filp->f_mode & FMODE_READ) {
+		if (channel->wr_ref_count == 0) { /* First open of file */
+			/* Move the host to first buffer */
+			spin_lock_irqsave(&channel->wr_spinlock, flags);
+			channel->wr_host_buf_idx = 0;
+			channel->wr_host_buf_pos = 0;
+			channel->wr_fpga_buf_idx = -1;
+			channel->wr_empty = 1;
+			channel->wr_ready = 0;
+			channel->wr_sleepy = 1;
+			channel->wr_eof = -1;
+			channel->wr_hangup = 0;
+
+			spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+			iowrite32(1 | (channel->chan_num << 1) |
+				  (4 << 24) |  /* Opcode 4, open channel */
+				  ((channel->wr_synchronous & 1) << 23),
+				  channel->endpoint->registers +
+				  fpga_buf_ctrl_reg);
+		}
+
+		channel->wr_ref_count++;
+	}
+
+	if (filp->f_mode & FMODE_WRITE) {
+		if (channel->rd_ref_count == 0) { /* First open of file */
+			/* Move the host to first buffer */
+			spin_lock_irqsave(&channel->rd_spinlock, flags);
+			channel->rd_host_buf_idx = 0;
+			channel->rd_host_buf_pos = 0;
+			channel->rd_leftovers[3] = 0; /* No leftovers. */
+			channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
+			channel->rd_full = 0;
+
+			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+			iowrite32((channel->chan_num << 1) |
+				  (4 << 24),   /* Opcode 4, open channel */
+				  channel->endpoint->registers +
+				  fpga_buf_ctrl_reg);
+		}
+
+		channel->rd_ref_count++;
+	}
+
+unlock:
+	if (filp->f_mode & FMODE_WRITE)
+		mutex_unlock(&channel->rd_mutex);
+unlock_wr:
+	if (filp->f_mode & FMODE_READ)
+		mutex_unlock(&channel->wr_mutex);
+
+	if (!rc && (!channel->seekable))
+		return nonseekable_open(inode, filp);
+
+	return rc;
+}
+
+static int xillybus_release(struct inode *inode, struct file *filp)
+{
+	unsigned long flags;
+	struct xilly_channel *channel = filp->private_data;
+
+	int buf_idx;
+	int eof;
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	if (filp->f_mode & FMODE_WRITE) {
+		mutex_lock(&channel->rd_mutex);
+
+		channel->rd_ref_count--;
+
+		if (channel->rd_ref_count == 0) {
+			/*
+			 * We rely on the kernel calling flush()
+			 * before we get here.
+			 */
+
+			iowrite32((channel->chan_num << 1) | /* Channel ID */
+				  (5 << 24),  /* Opcode 5, close channel */
+				  channel->endpoint->registers +
+				  fpga_buf_ctrl_reg);
+		}
+		mutex_unlock(&channel->rd_mutex);
+	}
+
+	if (filp->f_mode & FMODE_READ) {
+		mutex_lock(&channel->wr_mutex);
+
+		channel->wr_ref_count--;
+
+		if (channel->wr_ref_count == 0) {
+			iowrite32(1 | (channel->chan_num << 1) |
+				  (5 << 24),  /* Opcode 5, close channel */
+				  channel->endpoint->registers +
+				  fpga_buf_ctrl_reg);
+
+			/*
+			 * This is crazily cautious: We make sure that not
+			 * only that we got an EOF (be it because we closed
+			 * the channel or because of a user's EOF), but verify
+			 * that it's one beyond the last buffer arrived, so
+			 * we have no leftover buffers pending before wrapping
+			 * up (which can only happen in asynchronous channels,
+			 * BTW)
+			 */
+
+			while (1) {
+				spin_lock_irqsave(&channel->wr_spinlock,
+						  flags);
+				buf_idx = channel->wr_fpga_buf_idx;
+				eof = channel->wr_eof;
+				channel->wr_sleepy = 1;
+				spin_unlock_irqrestore(&channel->wr_spinlock,
+						       flags);
+
+				/*
+				 * Check if eof points at the buffer after
+				 * the last one the FPGA submitted. Note that
+				 * no EOF is marked by negative eof.
+				 */
+
+				buf_idx++;
+				if (buf_idx == channel->num_wr_buffers)
+					buf_idx = 0;
+
+				if (buf_idx == eof)
+					break;
+
+				/*
+				 * Steal extra 100 ms if awaken by interrupt.
+				 * This is a simple workaround for an
+				 * interrupt pending when entering, which would
+				 * otherwise result in declaring the hardware
+				 * non-responsive.
+				 */
+
+				if (wait_event_interruptible(
+					    channel->wr_wait,
+					    (!channel->wr_sleepy)))
+					msleep(100);
+
+				if (channel->wr_sleepy) {
+					mutex_unlock(&channel->wr_mutex);
+					dev_warn(channel->endpoint->dev,
+						 "Hardware failed to respond to close command, therefore left in messy state.\n");
+					return -EINTR;
+				}
+			}
+		}
+
+		mutex_unlock(&channel->wr_mutex);
+	}
+
+	return 0;
+}
+
+static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
+{
+	struct xilly_channel *channel = filp->private_data;
+	loff_t pos = filp->f_pos;
+	int rc = 0;
+
+	/*
+	 * Take both mutexes not allowing interrupts, since it seems like
+	 * common applications don't expect an -EINTR here. Besides, multiple
+	 * access to a single file descriptor on seekable devices is a mess
+	 * anyhow.
+	 */
+
+	if (channel->endpoint->fatal_error)
+		return -EIO;
+
+	mutex_lock(&channel->wr_mutex);
+	mutex_lock(&channel->rd_mutex);
+
+	switch (whence) {
+	case SEEK_SET:
+		pos = offset;
+		break;
+	case SEEK_CUR:
+		pos += offset;
+		break;
+	case SEEK_END:
+		pos = offset; /* Going to the end => to the beginning */
+		break;
+	default:
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* In any case, we must finish on an element boundary */
+	if (pos & ((1 << channel->log2_element_size) - 1)) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&channel->endpoint->register_mutex);
+
+	iowrite32(pos >> channel->log2_element_size,
+		  channel->endpoint->registers + fpga_buf_offset_reg);
+
+	iowrite32((channel->chan_num << 1) |
+		  (6 << 24),  /* Opcode 6, set address */
+		  channel->endpoint->registers + fpga_buf_ctrl_reg);
+
+	mutex_unlock(&channel->endpoint->register_mutex);
+
+end:
+	mutex_unlock(&channel->rd_mutex);
+	mutex_unlock(&channel->wr_mutex);
+
+	if (rc) /* Return error after releasing mutexes */
+		return rc;
+
+	filp->f_pos = pos;
+
+	/*
+	 * Since seekable devices are allowed only when the channel is
+	 * synchronous, we assume that there is no data pending in either
+	 * direction (which holds true as long as no concurrent access on the
+	 * file descriptor takes place).
+	 * The only thing we may need to throw away is leftovers from partial
+	 * write() flush.
+	 */
+
+	channel->rd_leftovers[3] = 0;
+
+	return pos;
+}
+
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
+{
+	struct xilly_channel *channel = filp->private_data;
+	__poll_t mask = 0;
+	unsigned long flags;
+
+	poll_wait(filp, &channel->endpoint->ep_wait, wait);
+
+	/*
+	 * poll() won't play ball regarding read() channels which
+	 * aren't asynchronous and support the nonempty message. Allowing
+	 * that will create situations where data has been delivered at
+	 * the FPGA, and users expecting select() to wake up, which it may
+	 * not.
+	 */
+
+	if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
+		poll_wait(filp, &channel->wr_wait, wait);
+		poll_wait(filp, &channel->wr_ready_wait, wait);
+
+		spin_lock_irqsave(&channel->wr_spinlock, flags);
+		if (!channel->wr_empty || channel->wr_ready)
+			mask |= EPOLLIN | EPOLLRDNORM;
+
+		if (channel->wr_hangup)
+			/*
+			 * Not EPOLLHUP, because its behavior is in the
+			 * mist, and EPOLLIN does what we want: Wake up
+			 * the read file descriptor so it sees EOF.
+			 */
+			mask |=  EPOLLIN | EPOLLRDNORM;
+		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+	}
+
+	/*
+	 * If partial data write is disallowed on a write() channel,
+	 * it's pointless to ever signal OK to write, because is could
+	 * block despite some space being available.
+	 */
+
+	if (channel->rd_allow_partial) {
+		poll_wait(filp, &channel->rd_wait, wait);
+
+		spin_lock_irqsave(&channel->rd_spinlock, flags);
+		if (!channel->rd_full)
+			mask |= EPOLLOUT | EPOLLWRNORM;
+		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+	}
+
+	if (channel->endpoint->fatal_error)
+		mask |= EPOLLERR;
+
+	return mask;
+}
+
+static const struct file_operations xillybus_fops = {
+	.owner      = THIS_MODULE,
+	.read       = xillybus_read,
+	.write      = xillybus_write,
+	.open       = xillybus_open,
+	.flush      = xillybus_flush,
+	.release    = xillybus_release,
+	.llseek     = xillybus_llseek,
+	.poll       = xillybus_poll,
+};
+
+static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
+				const unsigned char *idt)
+{
+	int rc;
+	dev_t dev;
+	int devnum, i, minor, major;
+	char devname[48];
+	struct device *device;
+
+	rc = alloc_chrdev_region(&dev, 0, /* minor start */
+				 endpoint->num_channels,
+				 xillyname);
+	if (rc) {
+		dev_warn(endpoint->dev, "Failed to obtain major/minors");
+		return rc;
+	}
+
+	endpoint->major = major = MAJOR(dev);
+	endpoint->lowest_minor = minor = MINOR(dev);
+
+	cdev_init(&endpoint->cdev, &xillybus_fops);
+	endpoint->cdev.owner = endpoint->ephw->owner;
+	rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
+		      endpoint->num_channels);
+	if (rc) {
+		dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
+		goto unregister_chrdev;
+	}
+
+	idt++;
+
+	for (i = minor, devnum = 0;
+	     devnum < endpoint->num_channels;
+	     devnum++, i++) {
+		snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
+
+		devname[sizeof(devname)-1] = 0; /* Should never matter */
+
+		while (*idt++)
+			/* Skip to next */;
+
+		device = device_create(xillybus_class,
+				       NULL,
+				       MKDEV(major, i),
+				       NULL,
+				       "%s", devname);
+
+		if (IS_ERR(device)) {
+			dev_warn(endpoint->dev,
+				 "Failed to create %s device. Aborting.\n",
+				 devname);
+			rc = -ENODEV;
+			goto unroll_device_create;
+		}
+	}
+
+	dev_info(endpoint->dev, "Created %d device files.\n",
+		 endpoint->num_channels);
+	return 0; /* succeed */
+
+unroll_device_create:
+	devnum--; i--;
+	for (; devnum >= 0; devnum--, i--)
+		device_destroy(xillybus_class, MKDEV(major, i));
+
+	cdev_del(&endpoint->cdev);
+unregister_chrdev:
+	unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
+
+	return rc;
+}
+
+static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
+{
+	int minor;
+
+	for (minor = endpoint->lowest_minor;
+	     minor < (endpoint->lowest_minor + endpoint->num_channels);
+	     minor++)
+		device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
+	cdev_del(&endpoint->cdev);
+	unregister_chrdev_region(MKDEV(endpoint->major,
+				       endpoint->lowest_minor),
+				 endpoint->num_channels);
+
+	dev_info(endpoint->dev, "Removed %d device files.\n",
+		 endpoint->num_channels);
+}
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+					      struct device *dev,
+					      struct xilly_endpoint_hardware
+					      *ephw)
+{
+	struct xilly_endpoint *endpoint;
+
+	endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
+	if (!endpoint)
+		return NULL;
+
+	endpoint->pdev = pdev;
+	endpoint->dev = dev;
+	endpoint->ephw = ephw;
+	endpoint->msg_counter = 0x0b;
+	endpoint->failed_messages = 0;
+	endpoint->fatal_error = 0;
+
+	init_waitqueue_head(&endpoint->ep_wait);
+	mutex_init(&endpoint->register_mutex);
+
+	return endpoint;
+}
+EXPORT_SYMBOL(xillybus_init_endpoint);
+
+static int xilly_quiesce(struct xilly_endpoint *endpoint)
+{
+	long t;
+
+	endpoint->idtlen = -1;
+
+	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+		  endpoint->registers + fpga_dma_control_reg);
+
+	t = wait_event_interruptible_timeout(endpoint->ep_wait,
+					     (endpoint->idtlen >= 0),
+					     XILLY_TIMEOUT);
+	if (t <= 0) {
+		dev_err(endpoint->dev,
+			"Failed to quiesce the device on exit.\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
+{
+	int rc;
+	long t;
+
+	void *bootstrap_resources;
+	int idtbuffersize = (1 << PAGE_SHIFT);
+	struct device *dev = endpoint->dev;
+
+	/*
+	 * The bogus IDT is used during bootstrap for allocating the initial
+	 * message buffer, and then the message buffer and space for the IDT
+	 * itself. The initial message buffer is of a single page's size, but
+	 * it's soon replaced with a more modest one (and memory is freed).
+	 */
+
+	unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
+				       3, 192, PAGE_SHIFT, 0 };
+	struct xilly_idt_handle idt_handle;
+
+	/*
+	 * Writing the value 0x00000001 to Endianness register signals which
+	 * endianness this processor is using, so the FPGA can swap words as
+	 * necessary.
+	 */
+
+	iowrite32(1, endpoint->registers + fpga_endian_reg);
+
+	/* Bootstrap phase I: Allocate temporary message buffer */
+
+	bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
+	if (!bootstrap_resources)
+		return -ENOMEM;
+
+	endpoint->num_channels = 0;
+
+	rc = xilly_setupchannels(endpoint, bogus_idt, 1);
+	if (rc)
+		return rc;
+
+	/* Clear the message subsystem (and counter in particular) */
+	iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
+
+	endpoint->idtlen = -1;
+
+	/*
+	 * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
+	 * buffer size.
+	 */
+	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+		  endpoint->registers + fpga_dma_control_reg);
+
+	t = wait_event_interruptible_timeout(endpoint->ep_wait,
+					     (endpoint->idtlen >= 0),
+					     XILLY_TIMEOUT);
+	if (t <= 0) {
+		dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
+		return -ENODEV;
+	}
+
+	/* Enable DMA */
+	iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
+		  endpoint->registers + fpga_dma_control_reg);
+
+	/* Bootstrap phase II: Allocate buffer for IDT and obtain it */
+	while (endpoint->idtlen >= idtbuffersize) {
+		idtbuffersize *= 2;
+		bogus_idt[6]++;
+	}
+
+	endpoint->num_channels = 1;
+
+	rc = xilly_setupchannels(endpoint, bogus_idt, 2);
+	if (rc)
+		goto failed_idt;
+
+	rc = xilly_obtain_idt(endpoint);
+	if (rc)
+		goto failed_idt;
+
+	rc = xilly_scan_idt(endpoint, &idt_handle);
+	if (rc)
+		goto failed_idt;
+
+	devres_close_group(dev, bootstrap_resources);
+
+	/* Bootstrap phase III: Allocate buffers according to IDT */
+
+	rc = xilly_setupchannels(endpoint,
+				 idt_handle.chandesc,
+				 idt_handle.entries);
+	if (rc)
+		goto failed_idt;
+
+	/*
+	 * endpoint is now completely configured. We put it on the list
+	 * available to open() before registering the char device(s)
+	 */
+
+	mutex_lock(&ep_list_lock);
+	list_add_tail(&endpoint->ep_list, &list_of_endpoints);
+	mutex_unlock(&ep_list_lock);
+
+	rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
+	if (rc)
+		goto failed_chrdevs;
+
+	devres_release_group(dev, bootstrap_resources);
+
+	return 0;
+
+failed_chrdevs:
+	mutex_lock(&ep_list_lock);
+	list_del(&endpoint->ep_list);
+	mutex_unlock(&ep_list_lock);
+
+failed_idt:
+	xilly_quiesce(endpoint);
+	flush_workqueue(xillybus_wq);
+
+	return rc;
+}
+EXPORT_SYMBOL(xillybus_endpoint_discovery);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
+{
+	xillybus_cleanup_chrdev(endpoint);
+
+	mutex_lock(&ep_list_lock);
+	list_del(&endpoint->ep_list);
+	mutex_unlock(&ep_list_lock);
+
+	xilly_quiesce(endpoint);
+
+	/*
+	 * Flushing is done upon endpoint release to prevent access to memory
+	 * just about to be released. This makes the quiesce complete.
+	 */
+	flush_workqueue(xillybus_wq);
+}
+EXPORT_SYMBOL(xillybus_endpoint_remove);
+
+static int __init xillybus_init(void)
+{
+	mutex_init(&ep_list_lock);
+
+	xillybus_class = class_create(THIS_MODULE, xillyname);
+	if (IS_ERR(xillybus_class))
+		return PTR_ERR(xillybus_class);
+
+	xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+	if (!xillybus_wq) {
+		class_destroy(xillybus_class);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit xillybus_exit(void)
+{
+	/* flush_workqueue() was called for each endpoint released */
+	destroy_workqueue(xillybus_wq);
+
+	class_destroy(xillybus_class);
+}
+
+module_init(xillybus_init);
+module_exit(xillybus_exit);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
new file mode 100644
index 0000000..4d6625c
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -0,0 +1,173 @@
+/*
+ * linux/drivers/misc/xillybus_of.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using Open Firmware.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_of");
+MODULE_LICENSE("GPL v2");
+
+static const char xillyname[] = "xillybus_of";
+
+/* Match table for of_platform binding */
+static const struct of_device_id xillybus_of_match[] = {
+	{ .compatible = "xillybus,xillybus-1.00.a", },
+	{ .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, xillybus_of_match);
+
+static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
+					     dma_addr_t dma_handle,
+					     size_t size,
+					     int direction)
+{
+	dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
+}
+
+static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
+						dma_addr_t dma_handle,
+						size_t size,
+						int direction)
+{
+	dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
+}
+
+static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
+				      dma_addr_t dma_handle,
+				      size_t size,
+				      int direction)
+{
+}
+
+static void xilly_of_unmap(void *ptr)
+{
+	struct xilly_mapping *data = ptr;
+
+	dma_unmap_single(data->device, data->dma_addr,
+			 data->size, data->direction);
+
+	kfree(ptr);
+}
+
+static int xilly_map_single_of(struct xilly_endpoint *ep,
+			       void *ptr,
+			       size_t size,
+			       int direction,
+			       dma_addr_t *ret_dma_handle
+	)
+{
+	dma_addr_t addr;
+	struct xilly_mapping *this;
+
+	this = kzalloc(sizeof(*this), GFP_KERNEL);
+	if (!this)
+		return -ENOMEM;
+
+	addr = dma_map_single(ep->dev, ptr, size, direction);
+
+	if (dma_mapping_error(ep->dev, addr)) {
+		kfree(this);
+		return -ENODEV;
+	}
+
+	this->device = ep->dev;
+	this->dma_addr = addr;
+	this->size = size;
+	this->direction = direction;
+
+	*ret_dma_handle = addr;
+
+	return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this);
+}
+
+static struct xilly_endpoint_hardware of_hw = {
+	.owner = THIS_MODULE,
+	.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
+	.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
+	.map_single = xilly_map_single_of,
+};
+
+static struct xilly_endpoint_hardware of_hw_coherent = {
+	.owner = THIS_MODULE,
+	.hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
+	.hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
+	.map_single = xilly_map_single_of,
+};
+
+static int xilly_drv_probe(struct platform_device *op)
+{
+	struct device *dev = &op->dev;
+	struct xilly_endpoint *endpoint;
+	int rc;
+	int irq;
+	struct resource *res;
+	struct xilly_endpoint_hardware *ephw = &of_hw;
+
+	if (of_property_read_bool(dev->of_node, "dma-coherent"))
+		ephw = &of_hw_coherent;
+
+	endpoint = xillybus_init_endpoint(NULL, dev, ephw);
+
+	if (!endpoint)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, endpoint);
+
+	res = platform_get_resource(op, IORESOURCE_MEM, 0);
+	endpoint->registers = devm_ioremap_resource(dev, res);
+
+	if (IS_ERR(endpoint->registers))
+		return PTR_ERR(endpoint->registers);
+
+	irq = platform_get_irq(op, 0);
+
+	rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint);
+
+	if (rc) {
+		dev_err(endpoint->dev,
+			"Failed to register IRQ handler. Aborting.\n");
+		return -ENODEV;
+	}
+
+	return xillybus_endpoint_discovery(endpoint);
+}
+
+static int xilly_drv_remove(struct platform_device *op)
+{
+	struct device *dev = &op->dev;
+	struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
+
+	xillybus_endpoint_remove(endpoint);
+
+	return 0;
+}
+
+static struct platform_driver xillybus_platform_driver = {
+	.probe = xilly_drv_probe,
+	.remove = xilly_drv_remove,
+	.driver = {
+		.name = xillyname,
+		.of_match_table = xillybus_of_match,
+	},
+};
+
+module_platform_driver(xillybus_platform_driver);
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
new file mode 100644
index 0000000..05e5324
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -0,0 +1,221 @@
+/*
+ * linux/drivers/misc/xillybus_pcie.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using PCI Express.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for PCIe");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_pcie");
+MODULE_LICENSE("GPL v2");
+
+#define PCI_DEVICE_ID_XILLYBUS		0xebeb
+
+#define PCI_VENDOR_ID_ACTEL		0x11aa
+#define PCI_VENDOR_ID_LATTICE		0x1204
+
+static const char xillyname[] = "xillybus_pcie";
+
+static const struct pci_device_id xillyids[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)},
+	{ /* End: all zeroes */ }
+};
+
+static int xilly_pci_direction(int direction)
+{
+	switch (direction) {
+	case DMA_TO_DEVICE:
+		return PCI_DMA_TODEVICE;
+	case DMA_FROM_DEVICE:
+		return PCI_DMA_FROMDEVICE;
+	default:
+		return PCI_DMA_BIDIRECTIONAL;
+	}
+}
+
+static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
+					      dma_addr_t dma_handle,
+					      size_t size,
+					      int direction)
+{
+	pci_dma_sync_single_for_cpu(ep->pdev,
+				    dma_handle,
+				    size,
+				    xilly_pci_direction(direction));
+}
+
+static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
+						 dma_addr_t dma_handle,
+						 size_t size,
+						 int direction)
+{
+	pci_dma_sync_single_for_device(ep->pdev,
+				       dma_handle,
+				       size,
+				       xilly_pci_direction(direction));
+}
+
+static void xilly_pci_unmap(void *ptr)
+{
+	struct xilly_mapping *data = ptr;
+
+	pci_unmap_single(data->device, data->dma_addr,
+			 data->size, data->direction);
+
+	kfree(ptr);
+}
+
+/*
+ * Map either through the PCI DMA mapper or the non_PCI one. Behind the
+ * scenes exactly the same functions are called with the same parameters,
+ * but that can change.
+ */
+
+static int xilly_map_single_pci(struct xilly_endpoint *ep,
+				void *ptr,
+				size_t size,
+				int direction,
+				dma_addr_t *ret_dma_handle
+	)
+{
+	int pci_direction;
+	dma_addr_t addr;
+	struct xilly_mapping *this;
+
+	this = kzalloc(sizeof(*this), GFP_KERNEL);
+	if (!this)
+		return -ENOMEM;
+
+	pci_direction = xilly_pci_direction(direction);
+
+	addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
+
+	if (pci_dma_mapping_error(ep->pdev, addr)) {
+		kfree(this);
+		return -ENODEV;
+	}
+
+	this->device = ep->pdev;
+	this->dma_addr = addr;
+	this->size = size;
+	this->direction = pci_direction;
+
+	*ret_dma_handle = addr;
+
+	return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this);
+}
+
+static struct xilly_endpoint_hardware pci_hw = {
+	.owner = THIS_MODULE,
+	.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
+	.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
+	.map_single = xilly_map_single_pci,
+};
+
+static int xilly_probe(struct pci_dev *pdev,
+		       const struct pci_device_id *ent)
+{
+	struct xilly_endpoint *endpoint;
+	int rc;
+
+	endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);
+
+	if (!endpoint)
+		return -ENOMEM;
+
+	pci_set_drvdata(pdev, endpoint);
+
+	rc = pcim_enable_device(pdev);
+	if (rc) {
+		dev_err(endpoint->dev,
+			"pcim_enable_device() failed. Aborting.\n");
+		return rc;
+	}
+
+	/* L0s has caused packet drops. No power saving, thank you. */
+
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(endpoint->dev,
+			"Incorrect BAR configuration. Aborting.\n");
+		return -ENODEV;
+	}
+
+	rc = pcim_iomap_regions(pdev, 0x01, xillyname);
+	if (rc) {
+		dev_err(endpoint->dev,
+			"pcim_iomap_regions() failed. Aborting.\n");
+		return rc;
+	}
+
+	endpoint->registers = pcim_iomap_table(pdev)[0];
+
+	pci_set_master(pdev);
+
+	/* Set up a single MSI interrupt */
+	if (pci_enable_msi(pdev)) {
+		dev_err(endpoint->dev,
+			"Failed to enable MSI interrupts. Aborting.\n");
+		return -ENODEV;
+	}
+	rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
+			      xillyname, endpoint);
+	if (rc) {
+		dev_err(endpoint->dev,
+			"Failed to register MSI handler. Aborting.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
+	 * even when the PCIe driver claims that a 64-bit mask is OK. On the
+	 * other hand, on some architectures, 64-bit addressing is mandatory.
+	 * So go for the 64-bit mask only when failing is the other option.
+	 */
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		endpoint->dma_using_dac = 0;
+	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		endpoint->dma_using_dac = 1;
+	} else {
+		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
+		return -ENODEV;
+	}
+
+	return xillybus_endpoint_discovery(endpoint);
+}
+
+static void xilly_remove(struct pci_dev *pdev)
+{
+	struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
+
+	xillybus_endpoint_remove(endpoint);
+}
+
+MODULE_DEVICE_TABLE(pci, xillyids);
+
+static struct pci_driver xillybus_driver = {
+	.name = xillyname,
+	.id_table = xillyids,
+	.probe = xilly_probe,
+	.remove = xilly_remove,
+};
+
+module_pci_driver(xillybus_driver);