v4.19.13 snapshot.
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
new file mode 100644
index 0000000..383e7b7
--- /dev/null
+++ b/drivers/irqchip/Kconfig
@@ -0,0 +1,386 @@
+menu "IRQ chip support"
+
+config IRQCHIP
+	def_bool y
+	depends on OF_IRQ
+
+config ARM_GIC
+	bool
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+	select GENERIC_IRQ_MULTI_HANDLER
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config ARM_GIC_PM
+	bool
+	depends on PM
+	select ARM_GIC
+	select PM_CLK
+
+config ARM_GIC_MAX_NR
+	int
+	default 2 if ARCH_REALVIEW
+	default 1
+
+config ARM_GIC_V2M
+	bool
+	depends on PCI
+	select ARM_GIC
+	select PCI_MSI
+
+config GIC_NON_BANKED
+	bool
+
+config ARM_GIC_V3
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+	select IRQ_DOMAIN_HIERARCHY
+	select PARTITION_PERCPU
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config ARM_GIC_V3_ITS
+	bool
+	select GENERIC_MSI_IRQ_DOMAIN
+	default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+	bool
+	depends on ARM_GIC_V3_ITS
+	depends on PCI
+	depends on PCI_MSI
+	default ARM_GIC_V3_ITS
+
+config ARM_GIC_V3_ITS_FSL_MC
+	bool
+	depends on ARM_GIC_V3_ITS
+	depends on FSL_MC_BUS
+	default ARM_GIC_V3_ITS
+
+config ARM_NVIC
+	bool
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+	select GENERIC_IRQ_CHIP
+
+config ARM_VIC
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+
+config ARM_VIC_NR
+	int
+	default 4 if ARCH_S5PV210
+	default 2
+	depends on ARM_VIC
+	help
+	  The maximum number of VICs available in the system, for
+	  power management.
+
+config ARMADA_370_XP_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select PCI_MSI if PCI
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config ALPINE_MSI
+	bool
+	depends on PCI
+	select PCI_MSI
+	select GENERIC_IRQ_CHIP
+
+config ATMEL_AIC_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+	select SPARSE_IRQ
+
+config ATMEL_AIC5_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+	select SPARSE_IRQ
+
+config I8259
+	bool
+	select IRQ_DOMAIN
+
+config BCM6345_L1_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config BCM7038_L1_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config BCM7120_L2_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config BRCMSTB_L2_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config DW_APB_ICTL
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config FARADAY_FTINTC010
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+	select SPARSE_IRQ
+
+config HISILICON_IRQ_MBIGEN
+	bool
+	select ARM_GIC_V3
+	select ARM_GIC_V3_ITS
+
+config IMGPDC_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config IRQ_MIPS_CPU
+	bool
+	select GENERIC_IRQ_CHIP
+	select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config CLPS711X_IRQCHIP
+	bool
+	depends on ARCH_CLPS711X
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+	select SPARSE_IRQ
+	default y
+
+config OMPIC
+	bool
+
+config OR1K_PIC
+	bool
+	select IRQ_DOMAIN
+
+config OMAP_IRQCHIP
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config ORION_IRQCHIP
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_MULTI_HANDLER
+
+config PIC32_EVIC
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config JCORE_AIC
+	bool "J-Core integrated AIC" if COMPILE_TEST
+	depends on OF
+	select IRQ_DOMAIN
+	help
+	  Support for the J-Core integrated AIC.
+
+config RENESAS_INTC_IRQPIN
+	bool
+	select IRQ_DOMAIN
+
+config RENESAS_IRQC
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
+config ST_IRQCHIP
+	bool
+	select REGMAP
+	select MFD_SYSCON
+	help
+	  Enables SysCfg Controlled IRQs on STi based platforms.
+
+config TANGO_IRQ
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_CHIP
+
+config TB10X_IRQC
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_CHIP
+
+config TS4800_IRQ
+	tristate "TS-4800 IRQ controller"
+	select IRQ_DOMAIN
+	depends on HAS_IOMEM
+	depends on SOC_IMX51 || COMPILE_TEST
+	help
+	  Support for the TS-4800 FPGA IRQ controller
+
+config VERSATILE_FPGA_IRQ
+	bool
+	select IRQ_DOMAIN
+
+config VERSATILE_FPGA_IRQ_NR
+       int
+       default 4
+       depends on VERSATILE_FPGA_IRQ
+
+config XTENSA_MX
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+
+config XILINX_INTC
+	bool
+	select IRQ_DOMAIN
+
+config IRQ_CROSSBAR
+	bool
+	help
+	  Support for a CROSSBAR ip that precedes the main interrupt controller.
+	  The primary irqchip invokes the crossbar's callback which inturn allocates
+	  a free irq and configures the IP. Thus the peripheral interrupts are
+	  routed to one of the free irqchip interrupt lines.
+
+config KEYSTONE_IRQ
+	tristate "Keystone 2 IRQ controller IP"
+	depends on ARCH_KEYSTONE
+	help
+		Support for Texas Instruments Keystone 2 IRQ controller IP which
+		is part of the Keystone 2 IPC mechanism
+
+config MIPS_GIC
+	bool
+	select GENERIC_IRQ_IPI
+	select IRQ_DOMAIN_HIERARCHY
+	select MIPS_CM
+
+config INGENIC_IRQ
+	bool
+	depends on MACH_INGENIC
+	default y
+
+config RENESAS_H8300H_INTC
+        bool
+	select IRQ_DOMAIN
+
+config RENESAS_H8S_INTC
+        bool
+	select IRQ_DOMAIN
+
+config IMX_GPCV2
+	bool
+	select IRQ_DOMAIN
+	help
+	  Enables the wakeup IRQs for IMX platforms with GPCv2 block
+
+config IRQ_MXS
+	def_bool y if MACH_ASM9260 || ARCH_MXS
+	select IRQ_DOMAIN
+	select STMP_DEVICE
+
+config MSCC_OCELOT_IRQ
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_CHIP
+
+config MVEBU_GICP
+	bool
+
+config MVEBU_ICU
+	bool
+
+config MVEBU_ODMI
+	bool
+	select GENERIC_MSI_IRQ_DOMAIN
+
+config MVEBU_PIC
+	bool
+
+config LS_SCFG_MSI
+	def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+	depends on PCI && PCI_MSI
+
+config PARTITION_PERCPU
+	bool
+
+config EZNPS_GIC
+	bool "NPS400 Global Interrupt Manager (GIM)"
+	depends on ARC || (COMPILE_TEST && !64BIT)
+	select IRQ_DOMAIN
+	help
+	  Support the EZchip NPS400 global interrupt controller
+
+config STM32_EXTI
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_CHIP
+
+config QCOM_IRQ_COMBINER
+	bool "QCOM IRQ combiner support"
+	depends on ARCH_QCOM && ACPI
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+	help
+	  Say yes here to add support for the IRQ combiner devices embedded
+	  in Qualcomm Technologies chips.
+
+config IRQ_UNIPHIER_AIDET
+	bool "UniPhier AIDET support" if COMPILE_TEST
+	depends on ARCH_UNIPHIER || COMPILE_TEST
+	default ARCH_UNIPHIER
+	select IRQ_DOMAIN_HIERARCHY
+	help
+	  Support for the UniPhier AIDET (ARM Interrupt Detector).
+
+config MESON_IRQ_GPIO
+       bool "Meson GPIO Interrupt Multiplexer"
+       depends on ARCH_MESON
+       select IRQ_DOMAIN
+       select IRQ_DOMAIN_HIERARCHY
+       help
+         Support Meson SoC Family GPIO Interrupt Multiplexer
+
+config GOLDFISH_PIC
+       bool "Goldfish programmable interrupt controller"
+       depends on MIPS && (GOLDFISH || COMPILE_TEST)
+       select IRQ_DOMAIN
+       help
+         Say yes here to enable Goldfish interrupt controller driver used
+         for Goldfish based virtual platforms.
+
+config QCOM_PDC
+	bool "QCOM PDC"
+	depends on ARCH_QCOM
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+	help
+	  Power Domain Controller driver to manage and configure wakeup
+	  IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+
+endmenu
+
+config SIFIVE_PLIC
+	bool "SiFive Platform-Level Interrupt Controller"
+	depends on RISCV
+	help
+	   This enables support for the PLIC chip found in SiFive (and
+	   potentially other) RISC-V systems.  The PLIC controls devices
+	   interrupts and connects them to each core's local interrupt
+	   controller.  Aside from timer and software interrupts, all other
+	   interrupt sources are subordinate to the PLIC.
+
+	   If you don't know what to do here, say Y.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
new file mode 100644
index 0000000..fbd1ec8
--- /dev/null
+++ b/drivers/irqchip/Makefile
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IRQCHIP)			+= irqchip.o
+
+obj-$(CONFIG_ALPINE_MSI)		+= irq-alpine-msi.o
+obj-$(CONFIG_ATH79)			+= irq-ath79-cpu.o
+obj-$(CONFIG_ATH79)			+= irq-ath79-misc.o
+obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2836.o
+obj-$(CONFIG_ARCH_EXYNOS)		+= exynos-combiner.o
+obj-$(CONFIG_FARADAY_FTINTC010)		+= irq-ftintc010.o
+obj-$(CONFIG_ARCH_HIP04)		+= irq-hip04.o
+obj-$(CONFIG_ARCH_LPC32XX)		+= irq-lpc32xx.o
+obj-$(CONFIG_ARCH_MMP)			+= irq-mmp.o
+obj-$(CONFIG_IRQ_MXS)			+= irq-mxs.o
+obj-$(CONFIG_ARCH_TEGRA)		+= irq-tegra.o
+obj-$(CONFIG_ARCH_S3C24XX)		+= irq-s3c24xx.o
+obj-$(CONFIG_DW_APB_ICTL)		+= irq-dw-apb-ictl.o
+obj-$(CONFIG_CLPS711X_IRQCHIP)		+= irq-clps711x.o
+obj-$(CONFIG_OMPIC)			+= irq-ompic.o
+obj-$(CONFIG_OR1K_PIC)			+= irq-or1k-pic.o
+obj-$(CONFIG_ORION_IRQCHIP)		+= irq-orion.o
+obj-$(CONFIG_OMAP_IRQCHIP)		+= irq-omap-intc.o
+obj-$(CONFIG_ARCH_SUNXI)		+= irq-sun4i.o
+obj-$(CONFIG_ARCH_SUNXI)		+= irq-sunxi-nmi.o
+obj-$(CONFIG_ARCH_SPEAR3XX)		+= spear-shirq.o
+obj-$(CONFIG_ARM_GIC)			+= irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_PM)		+= irq-gic-pm.o
+obj-$(CONFIG_ARCH_REALVIEW)		+= irq-gic-realview.o
+obj-$(CONFIG_ARM_GIC_V2M)		+= irq-gic-v2m.o
+obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI)	+= irq-gic-v3-its-pci-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC)	+= irq-gic-v3-its-fsl-mc-msi.o
+obj-$(CONFIG_PARTITION_PERCPU)		+= irq-partition-percpu.o
+obj-$(CONFIG_HISILICON_IRQ_MBIGEN)	+= irq-mbigen.o
+obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
+obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
+obj-$(CONFIG_ARMADA_370_XP_IRQ)		+= irq-armada-370-xp.o
+obj-$(CONFIG_ATMEL_AIC_IRQ)		+= irq-atmel-aic-common.o irq-atmel-aic.o
+obj-$(CONFIG_ATMEL_AIC5_IRQ)	+= irq-atmel-aic-common.o irq-atmel-aic5.o
+obj-$(CONFIG_I8259)			+= irq-i8259.o
+obj-$(CONFIG_IMGPDC_IRQ)		+= irq-imgpdc.o
+obj-$(CONFIG_IRQ_MIPS_CPU)		+= irq-mips-cpu.o
+obj-$(CONFIG_SIRF_IRQ)			+= irq-sirfsoc.o
+obj-$(CONFIG_JCORE_AIC)			+= irq-jcore-aic.o
+obj-$(CONFIG_RENESAS_INTC_IRQPIN)	+= irq-renesas-intc-irqpin.o
+obj-$(CONFIG_RENESAS_IRQC)		+= irq-renesas-irqc.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ)	+= irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE)		+= irq-zevio.o
+obj-$(CONFIG_ARCH_VT8500)		+= irq-vt8500.o
+obj-$(CONFIG_ST_IRQCHIP)		+= irq-st.o
+obj-$(CONFIG_TANGO_IRQ)			+= irq-tango.o
+obj-$(CONFIG_TB10X_IRQC)		+= irq-tb10x.o
+obj-$(CONFIG_TS4800_IRQ)		+= irq-ts4800.o
+obj-$(CONFIG_XTENSA)			+= irq-xtensa-pic.o
+obj-$(CONFIG_XTENSA_MX)			+= irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC)		+= irq-xilinx-intc.o
+obj-$(CONFIG_IRQ_CROSSBAR)		+= irq-crossbar.o
+obj-$(CONFIG_SOC_VF610)			+= irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ)		+= irq-bcm6345-l1.o
+obj-$(CONFIG_BCM7038_L1_IRQ)		+= irq-bcm7038-l1.o
+obj-$(CONFIG_BCM7120_L2_IRQ)		+= irq-bcm7120-l2.o
+obj-$(CONFIG_BRCMSTB_L2_IRQ)		+= irq-brcmstb-l2.o
+obj-$(CONFIG_KEYSTONE_IRQ)		+= irq-keystone.o
+obj-$(CONFIG_MIPS_GIC)			+= irq-mips-gic.o
+obj-$(CONFIG_ARCH_MEDIATEK)		+= irq-mtk-sysirq.o irq-mtk-cirq.o
+obj-$(CONFIG_ARCH_DIGICOLOR)		+= irq-digicolor.o
+obj-$(CONFIG_RENESAS_H8300H_INTC)	+= irq-renesas-h8300h.o
+obj-$(CONFIG_RENESAS_H8S_INTC)		+= irq-renesas-h8s.o
+obj-$(CONFIG_ARCH_SA1100)		+= irq-sa11x0.o
+obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
+obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
+obj-$(CONFIG_PIC32_EVIC)		+= irq-pic32-evic.o
+obj-$(CONFIG_MSCC_OCELOT_IRQ)		+= irq-mscc-ocelot.o
+obj-$(CONFIG_MVEBU_GICP)		+= irq-mvebu-gicp.o
+obj-$(CONFIG_MVEBU_ICU)			+= irq-mvebu-icu.o
+obj-$(CONFIG_MVEBU_ODMI)		+= irq-mvebu-odmi.o
+obj-$(CONFIG_MVEBU_PIC)			+= irq-mvebu-pic.o
+obj-$(CONFIG_LS_SCFG_MSI)		+= irq-ls-scfg-msi.o
+obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
+obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o irq-aspeed-i2c-ic.o
+obj-$(CONFIG_STM32_EXTI) 		+= irq-stm32-exti.o
+obj-$(CONFIG_QCOM_IRQ_COMBINER)		+= qcom-irq-combiner.o
+obj-$(CONFIG_IRQ_UNIPHIER_AIDET)	+= irq-uniphier-aidet.o
+obj-$(CONFIG_ARCH_SYNQUACER)		+= irq-sni-exiu.o
+obj-$(CONFIG_MESON_IRQ_GPIO)		+= irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) 		+= irq-goldfish-pic.o
+obj-$(CONFIG_NDS32)			+= irq-ativic32.o
+obj-$(CONFIG_QCOM_PDC)			+= qcom-pdc.o
+obj-$(CONFIG_SIFIVE_PLIC)		+= irq-sifive-plic.o
diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h
new file mode 100644
index 0000000..5cec108
--- /dev/null
+++ b/drivers/irqchip/alphascale_asm9260-icoll.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ALPHASCALE_ASM9260_ICOLL_H
+#define _ALPHASCALE_ASM9260_ICOLL_H
+
+#define ASM9260_NUM_IRQS		64
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+
+#define ASM9260_HW_ICOLL_VECTOR				0x0000
+/*
+ * bits 31:2
+ * This register presents the vector address for the interrupt currently
+ * active on the CPU IRQ input. Writing to this register notifies the
+ * interrupt collector that the interrupt service routine for the current
+ * interrupt has been entered.
+ * The exception trap should have a LDPC instruction from this address:
+ * LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018
+ */
+
+/*
+ * The Interrupt Collector Level Acknowledge Register is used by software to
+ * indicate the completion of an interrupt on a specific level.
+ * This register is written at the very end of an interrupt service routine. If
+ * nesting is used then the CPU irq must be turned on before writing to this
+ * register to avoid a race condition in the CPU interrupt hardware.
+ */
+#define ASM9260_HW_ICOLL_LEVELACK			0x0010
+#define ASM9260_BM_LEVELn(nr)				BIT(nr)
+
+#define ASM9260_HW_ICOLL_CTRL				0x0020
+/*
+ * ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on
+ * asm9260.
+ */
+#define ASM9260_BM_CTRL_SFTRST				BIT(31)
+#define ASM9260_BM_CTRL_CLKGATE				BIT(30)
+/* disable interrupt level nesting */
+#define ASM9260_BM_CTRL_NO_NESTING			BIT(19)
+/*
+ * Set this bit to one enable the RISC32-style read side effect associated with
+ * the vector address register. In this mode, interrupt in-service is signaled
+ * by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt
+ * vector address. Set this bit to zero for normal operation, in which the ISR
+ * signals in-service explicitly by means of a write to the
+ * ASM9260_HW_ICOLL_VECTOR register.
+ * 0 - Must Write to Vector register to go in-service.
+ * 1 - Go in-service as a read side effect
+ */
+#define ASM9260_BM_CTRL_ARM_RSE_MODE			BIT(18)
+#define ASM9260_BM_CTRL_IRQ_ENABLE			BIT(16)
+
+#define ASM9260_HW_ICOLL_STAT_OFFSET			0x0030
+/*
+ * bits 5:0
+ * Vector number of current interrupt. Multiply by 4 and add to vector base
+ * address to obtain the value in ASM9260_HW_ICOLL_VECTOR.
+ */
+
+/*
+ * RAW0 and RAW1 provides a read-only view of the raw interrupt request lines
+ * coming from various parts of the chip. Its purpose is to improve diagnostic
+ * observability.
+ */
+#define ASM9260_HW_ICOLL_RAW0				0x0040
+#define ASM9260_HW_ICOLL_RAW1				0x0050
+
+#define ASM9260_HW_ICOLL_INTERRUPT0			0x0060
+#define ASM9260_HW_ICOLL_INTERRUPTn(n)		(0x0060 + ((n) >> 2) * 0x10)
+/*
+ * WARNING: Modifying the priority of an enabled interrupt may result in
+ * undefined behavior.
+ */
+#define ASM9260_BM_INT_PRIORITY_MASK			0x3
+#define ASM9260_BM_INT_ENABLE				BIT(2)
+#define ASM9260_BM_INT_SOFTIRQ				BIT(3)
+
+#define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)		(((n) & 0x3) << 3)
+#define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n)		(1 << (2 + \
+			ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)))
+
+#define ASM9260_HW_ICOLL_VBASE				0x0160
+/*
+ * bits 31:2
+ * This bitfield holds the upper 30 bits of the base address of the vector
+ * table.
+ */
+
+#define ASM9260_HW_ICOLL_CLEAR0				0x01d0
+#define ASM9260_HW_ICOLL_CLEAR1				0x01e0
+#define ASM9260_HW_ICOLL_CLEARn(n)			(((n >> 5) * 0x10) \
+							+ SET_REG)
+#define ASM9260_BM_CLEAR_BIT(n)				BIT(n & 0x1f)
+
+/* Scratchpad */
+#define ASM9260_HW_ICOLL_UNDEF_VECTOR			0x01f0
+#endif
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
new file mode 100644
index 0000000..b78a169
--- /dev/null
+++ b/drivers/irqchip/exynos-combiner.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Combiner irqchip for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define COMBINER_ENABLE_SET	0x0
+#define COMBINER_ENABLE_CLEAR	0x4
+#define COMBINER_INT_STATUS	0xC
+
+#define IRQ_IN_COMBINER		8
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+	unsigned int hwirq_offset;
+	unsigned int irq_mask;
+	void __iomem *base;
+	unsigned int parent_irq;
+#ifdef CONFIG_PM
+	u32 pm_save;
+#endif
+};
+
+static struct combiner_chip_data *combiner_data;
+static struct irq_domain *combiner_irq_domain;
+static unsigned int max_nr = 20;
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+	struct combiner_chip_data *combiner_data =
+		irq_data_get_irq_chip_data(data);
+
+	return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+	u32 mask = 1 << (data->hwirq % 32);
+
+	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+	u32 mask = 1 << (data->hwirq % 32);
+
+	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(struct irq_desc *desc)
+{
+	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int cascade_irq, combiner_irq;
+	unsigned long status;
+
+	chained_irq_enter(chip, desc);
+
+	spin_lock(&irq_controller_lock);
+	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
+	spin_unlock(&irq_controller_lock);
+	status &= chip_data->irq_mask;
+
+	if (status == 0)
+		goto out;
+
+	combiner_irq = chip_data->hwirq_offset + __ffs(status);
+	cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
+
+	if (unlikely(!cascade_irq))
+		handle_bad_irq(desc);
+	else
+		generic_handle_irq(cascade_irq);
+
+ out:
+	chained_irq_exit(chip, desc);
+}
+
+#ifdef CONFIG_SMP
+static int combiner_set_affinity(struct irq_data *d,
+				 const struct cpumask *mask_val, bool force)
+{
+	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
+	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
+
+	if (chip && chip->irq_set_affinity)
+		return chip->irq_set_affinity(data, mask_val, force);
+	else
+		return -EINVAL;
+}
+#endif
+
+static struct irq_chip combiner_chip = {
+	.name			= "COMBINER",
+	.irq_mask		= combiner_mask_irq,
+	.irq_unmask		= combiner_unmask_irq,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= combiner_set_affinity,
+#endif
+};
+
+static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
+					unsigned int irq)
+{
+	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
+					 combiner_data);
+}
+
+static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
+				     unsigned int combiner_nr,
+				     void __iomem *base, unsigned int irq)
+{
+	combiner_data->base = base;
+	combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
+	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
+	combiner_data->parent_irq = irq;
+
+	/* Disable all interrupts */
+	writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
+}
+
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec, unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
+{
+	if (irq_domain_get_of_node(d) != controller)
+		return -EINVAL;
+
+	if (intsize < 2)
+		return -EINVAL;
+
+	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
+	*out_type = 0;
+
+	return 0;
+}
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				   irq_hw_number_t hw)
+{
+	struct combiner_chip_data *combiner_data = d->host_data;
+
+	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+	irq_set_probe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops combiner_irq_domain_ops = {
+	.xlate	= combiner_irq_domain_xlate,
+	.map	= combiner_irq_domain_map,
+};
+
+static void __init combiner_init(void __iomem *combiner_base,
+				 struct device_node *np)
+{
+	int i, irq;
+	unsigned int nr_irq;
+
+	nr_irq = max_nr * IRQ_IN_COMBINER;
+
+	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
+	if (!combiner_data) {
+		pr_warn("%s: could not allocate combiner data\n", __func__);
+		return;
+	}
+
+	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
+				&combiner_irq_domain_ops, combiner_data);
+	if (WARN_ON(!combiner_irq_domain)) {
+		pr_warn("%s: irq domain init failed\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < max_nr; i++) {
+		irq = irq_of_parse_and_map(np, i);
+
+		combiner_init_one(&combiner_data[i], i,
+				  combiner_base + (i >> 2) * 0x10, irq);
+		combiner_cascade_irq(&combiner_data[i], irq);
+	}
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * combiner_suspend - save interrupt combiner state before suspend
+ *
+ * Save the interrupt enable set register for all combiner groups since
+ * the state is lost when the system enters into a sleep state.
+ *
+ */
+static int combiner_suspend(void)
+{
+	int i;
+
+	for (i = 0; i < max_nr; i++)
+		combiner_data[i].pm_save =
+			readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
+
+	return 0;
+}
+
+/**
+ * combiner_resume - restore interrupt combiner state after resume
+ *
+ * Restore the interrupt enable set register for all combiner groups since
+ * the state is lost when the system enters into a sleep state on suspend.
+ *
+ */
+static void combiner_resume(void)
+{
+	int i;
+
+	for (i = 0; i < max_nr; i++) {
+		writel_relaxed(combiner_data[i].irq_mask,
+			     combiner_data[i].base + COMBINER_ENABLE_CLEAR);
+		writel_relaxed(combiner_data[i].pm_save,
+			     combiner_data[i].base + COMBINER_ENABLE_SET);
+	}
+}
+
+#else
+#define combiner_suspend	NULL
+#define combiner_resume		NULL
+#endif
+
+static struct syscore_ops combiner_syscore_ops = {
+	.suspend	= combiner_suspend,
+	.resume		= combiner_resume,
+};
+
+static int __init combiner_of_init(struct device_node *np,
+				   struct device_node *parent)
+{
+	void __iomem *combiner_base;
+
+	combiner_base = of_iomap(np, 0);
+	if (!combiner_base) {
+		pr_err("%s: failed to map combiner registers\n", __func__);
+		return -ENXIO;
+	}
+
+	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+		pr_info("%s: number of combiners not specified, "
+			"setting default as %d.\n",
+			__func__, max_nr);
+	}
+
+	combiner_init(combiner_base, np);
+
+	register_syscore_ops(&combiner_syscore_ops);
+
+	return 0;
+}
+IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
+		combiner_of_init);
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 0000000..23a3b87
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,294 @@
+/*
+ * Annapurna Labs MSIX support services
+ *
+ * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm/msi.h>
+
+/* MSIX message address format: local GIC target */
+#define ALPINE_MSIX_SPI_TARGET_CLUSTER0		BIT(16)
+
+struct alpine_msix_data {
+	spinlock_t msi_map_lock;
+	phys_addr_t addr;
+	u32 spi_first;		/* The SGI number that MSIs start */
+	u32 num_spis;		/* The number of SGIs for MSIs */
+	unsigned long *msi_map;
+};
+
+static void alpine_msix_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void alpine_msix_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip alpine_msix_irq_chip = {
+	.name			= "MSIx",
+	.irq_mask		= alpine_msix_mask_msi_irq,
+	.irq_unmask		= alpine_msix_unmask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
+{
+	int first;
+
+	spin_lock(&priv->msi_map_lock);
+
+	first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
+					   num_req, 0);
+	if (first >= priv->num_spis) {
+		spin_unlock(&priv->msi_map_lock);
+		return -ENOSPC;
+	}
+
+	bitmap_set(priv->msi_map, first, num_req);
+
+	spin_unlock(&priv->msi_map_lock);
+
+	return priv->spi_first + first;
+}
+
+static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
+				 int num_req)
+{
+	int first = sgi - priv->spi_first;
+
+	spin_lock(&priv->msi_map_lock);
+
+	bitmap_clear(priv->msi_map, first, num_req);
+
+	spin_unlock(&priv->msi_map_lock);
+}
+
+static void alpine_msix_compose_msi_msg(struct irq_data *data,
+					struct msi_msg *msg)
+{
+	struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
+	phys_addr_t msg_addr = priv->addr;
+
+	msg_addr |= (data->hwirq << 3);
+
+	msg->address_hi = upper_32_bits(msg_addr);
+	msg->address_lo = lower_32_bits(msg_addr);
+	msg->data = 0;
+}
+
+static struct msi_domain_info alpine_msix_domain_info = {
+	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		  MSI_FLAG_PCI_MSIX,
+	.chip	= &alpine_msix_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+	.name			= "alpine_msix_middle",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_compose_msi_msg	= alpine_msix_compose_msi_msg,
+};
+
+static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
+					unsigned int virq, int sgi)
+{
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	int ret;
+
+	if (!is_of_node(domain->parent->fwnode))
+		return -EINVAL;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = 0;
+	fwspec.param[1] = sgi;
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret)
+		return ret;
+
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+	return 0;
+}
+
+static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+					   unsigned int virq,
+					   unsigned int nr_irqs, void *args)
+{
+	struct alpine_msix_data *priv = domain->host_data;
+	int sgi, err, i;
+
+	sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
+	if (sgi < 0)
+		return sgi;
+
+	for (i = 0; i < nr_irqs; i++) {
+		err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
+		if (err)
+			goto err_sgi;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
+					      &middle_irq_chip, priv);
+	}
+
+	return 0;
+
+err_sgi:
+	while (--i >= 0)
+		irq_domain_free_irqs_parent(domain, virq, i);
+	alpine_msix_free_sgi(priv, sgi, nr_irqs);
+	return err;
+}
+
+static void alpine_msix_middle_domain_free(struct irq_domain *domain,
+					   unsigned int virq,
+					   unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+	alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
+	.alloc	= alpine_msix_middle_domain_alloc,
+	.free	= alpine_msix_middle_domain_free,
+};
+
+static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+				    struct device_node *node)
+{
+	struct irq_domain *middle_domain, *msi_domain, *gic_domain;
+	struct device_node *gic_node;
+
+	gic_node = of_irq_find_parent(node);
+	if (!gic_node) {
+		pr_err("Failed to find the GIC node\n");
+		return -ENODEV;
+	}
+
+	gic_domain = irq_find_host(gic_node);
+	if (!gic_domain) {
+		pr_err("Failed to find the GIC domain\n");
+		return -ENXIO;
+	}
+
+	middle_domain = irq_domain_add_tree(NULL,
+					    &alpine_msix_middle_domain_ops,
+					    priv);
+	if (!middle_domain) {
+		pr_err("Failed to create the MSIX middle domain\n");
+		return -ENOMEM;
+	}
+
+	middle_domain->parent = gic_domain;
+
+	msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					       &alpine_msix_domain_info,
+					       middle_domain);
+	if (!msi_domain) {
+		pr_err("Failed to create MSI domain\n");
+		irq_domain_remove(middle_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int alpine_msix_init(struct device_node *node,
+			    struct device_node *parent)
+{
+	struct alpine_msix_data *priv;
+	struct resource res;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	spin_lock_init(&priv->msi_map_lock);
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		pr_err("Failed to allocate resource\n");
+		goto err_priv;
+	}
+
+	/*
+	 * The 20 least significant bits of addr provide direct information
+	 * regarding the interrupt destination.
+	 *
+	 * To select the primary GIC as the target GIC, bits [18:17] must be set
+	 * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
+	 */
+	priv->addr = res.start & GENMASK_ULL(63,20);
+	priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
+
+	if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
+		pr_err("Unable to parse MSI base\n");
+		ret = -EINVAL;
+		goto err_priv;
+	}
+
+	if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
+		pr_err("Unable to parse MSI numbers\n");
+		ret = -EINVAL;
+		goto err_priv;
+	}
+
+	priv->msi_map = kcalloc(BITS_TO_LONGS(priv->num_spis),
+				sizeof(*priv->msi_map),
+				GFP_KERNEL);
+	if (!priv->msi_map) {
+		ret = -ENOMEM;
+		goto err_priv;
+	}
+
+	pr_debug("Registering %d msixs, starting at %d\n",
+		 priv->num_spis, priv->spi_first);
+
+	ret = alpine_msix_init_domains(priv, node);
+	if (ret)
+		goto err_map;
+
+	return 0;
+
+err_map:
+	kfree(priv->msi_map);
+err_priv:
+	kfree(priv);
+	return ret;
+}
+IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
new file mode 100644
index 0000000..c9bdc52
--- /dev/null
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -0,0 +1,714 @@
+/*
+ * Marvell Armada 370 and Armada XP SoC IRQ handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/msi.h>
+#include <asm/mach/arch.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+/*
+ * Overall diagram of the Armada XP interrupt controller:
+ *
+ *    To CPU 0                 To CPU 1
+ *
+ *       /\                       /\
+ *       ||                       ||
+ * +---------------+     +---------------+
+ * |               |	 |               |
+ * |    per-CPU    |	 |    per-CPU    |
+ * |  mask/unmask  |	 |  mask/unmask  |
+ * |     CPU0      |	 |     CPU1      |
+ * |               |	 |               |
+ * +---------------+	 +---------------+
+ *        /\                       /\
+ *        ||                       ||
+ *        \\_______________________//
+ *                     ||
+ *            +-------------------+
+ *            |                   |
+ *            | Global interrupt  |
+ *            |    mask/unmask    |
+ *            |                   |
+ *            +-------------------+
+ *                     /\
+ *                     ||
+ *               interrupt from
+ *                   device
+ *
+ * The "global interrupt mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
+ * to "main_int_base".
+ *
+ * The "per-CPU mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_MASK_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
+ * "per_cpu_int_base". This base address points to a special address,
+ * which automatically accesses the registers of the current CPU.
+ *
+ * The per-CPU mask/unmask can also be adjusted using the global
+ * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
+ * to configure interrupt affinity.
+ *
+ * Due to this model, all interrupts need to be mask/unmasked at two
+ * different levels: at the global level and at the per-CPU level.
+ *
+ * This driver takes the following approach to deal with this:
+ *
+ *  - For global interrupts:
+ *
+ *    At ->map() time, a global interrupt is unmasked at the per-CPU
+ *    mask/unmask level. It is therefore unmasked at this level for
+ *    the current CPU, running the ->map() code. This allows to have
+ *    the interrupt unmasked at this level in non-SMP
+ *    configurations. In SMP configurations, the ->set_affinity()
+ *    callback is called, which using the
+ *    ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
+ *    for the interrupt.
+ *
+ *    The ->mask() and ->unmask() operations only mask/unmask the
+ *    interrupt at the "global" level.
+ *
+ *    So, a global interrupt is enabled at the per-CPU level as soon
+ *    as it is mapped. At run time, the masking/unmasking takes place
+ *    at the global level.
+ *
+ *  - For per-CPU interrupts
+ *
+ *    At ->map() time, a per-CPU interrupt is unmasked at the global
+ *    mask/unmask level.
+ *
+ *    The ->mask() and ->unmask() operations mask/unmask the interrupt
+ *    at the per-CPU level.
+ *
+ *    So, a per-CPU interrupt is enabled at the global level as soon
+ *    as it is mapped. At run time, the masking/unmasking takes place
+ *    at the per-CPU level.
+ */
+
+/* Registers relative to main_int_base */
+#define ARMADA_370_XP_INT_CONTROL		(0x00)
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
+#define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
+#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK	0xF
+#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)	((BIT(0) | BIT(8)) << cpuid)
+
+/* Registers relative to per_cpu_int_base */
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS	(0x08)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS		(0x0c)
+#define ARMADA_375_PPI_CAUSE			(0x10)
+#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
+#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
+#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
+#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
+#define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
+
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
+
+#define IPI_DOORBELL_START                      (0)
+#define IPI_DOORBELL_END                        (8)
+#define IPI_DOORBELL_MASK                       0xFF
+#define PCI_MSI_DOORBELL_START                  (16)
+#define PCI_MSI_DOORBELL_NR                     (16)
+#define PCI_MSI_DOORBELL_END                    (32)
+#define PCI_MSI_DOORBELL_MASK                   0xFFFF0000
+
+static void __iomem *per_cpu_int_base;
+static void __iomem *main_int_base;
+static struct irq_domain *armada_370_xp_mpic_domain;
+static u32 doorbell_mask_reg;
+static int parent_irq;
+#ifdef CONFIG_PCI_MSI
+static struct irq_domain *armada_370_xp_msi_domain;
+static struct irq_domain *armada_370_xp_msi_inner_domain;
+static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DEFINE_MUTEX(msi_used_lock);
+static phys_addr_t msi_doorbell_addr;
+#endif
+
+static inline bool is_percpu_irq(irq_hw_number_t irq)
+{
+	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
+		return true;
+
+	return false;
+}
+
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrupts, mask/unmask the calling CPU's bit
+ */
+static void armada_370_xp_irq_mask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+	if (!is_percpu_irq(hwirq))
+		writel(hwirq, main_int_base +
+				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+	else
+		writel(hwirq, per_cpu_int_base +
+				ARMADA_370_XP_INT_SET_MASK_OFFS);
+}
+
+static void armada_370_xp_irq_unmask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+	if (!is_percpu_irq(hwirq))
+		writel(hwirq, main_int_base +
+				ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+	else
+		writel(hwirq, per_cpu_int_base +
+				ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+#ifdef CONFIG_PCI_MSI
+
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+	.name = "MPIC MSI",
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info armada_370_xp_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+	.chip	= &armada_370_xp_msi_irq_chip,
+};
+
+static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	msg->address_lo = lower_32_bits(msi_doorbell_addr);
+	msg->address_hi = upper_32_bits(msi_doorbell_addr);
+	msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
+}
+
+static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
+					  const struct cpumask *mask, bool force)
+{
+	 return -EINVAL;
+}
+
+static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+	.name			= "MPIC MSI",
+	.irq_compose_msi_msg	= armada_370_xp_compose_msi_msg,
+	.irq_set_affinity	= armada_370_xp_msi_set_affinity,
+};
+
+static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *args)
+{
+	int hwirq, i;
+
+	mutex_lock(&msi_used_lock);
+
+	hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+					   0, nr_irqs, 0);
+	if (hwirq >= PCI_MSI_DOORBELL_NR) {
+		mutex_unlock(&msi_used_lock);
+		return -ENOSPC;
+	}
+
+	bitmap_set(msi_used, hwirq, nr_irqs);
+	mutex_unlock(&msi_used_lock);
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &armada_370_xp_msi_bottom_irq_chip,
+				    domain->host_data, handle_simple_irq,
+				    NULL, NULL);
+	}
+
+	return hwirq;
+}
+
+static void armada_370_xp_msi_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	mutex_lock(&msi_used_lock);
+	bitmap_clear(msi_used, d->hwirq, nr_irqs);
+	mutex_unlock(&msi_used_lock);
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
+	.alloc	= armada_370_xp_msi_alloc,
+	.free	= armada_370_xp_msi_free,
+};
+
+static int armada_370_xp_msi_init(struct device_node *node,
+				  phys_addr_t main_int_phys_base)
+{
+	u32 reg;
+
+	msi_doorbell_addr = main_int_phys_base +
+		ARMADA_370_XP_SW_TRIG_INT_OFFS;
+
+	armada_370_xp_msi_inner_domain =
+		irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+				      &armada_370_xp_msi_domain_ops, NULL);
+	if (!armada_370_xp_msi_inner_domain)
+		return -ENOMEM;
+
+	armada_370_xp_msi_domain =
+		pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					  &armada_370_xp_msi_domain_info,
+					  armada_370_xp_msi_inner_domain);
+	if (!armada_370_xp_msi_domain) {
+		irq_domain_remove(armada_370_xp_msi_inner_domain);
+		return -ENOMEM;
+	}
+
+	reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+		| PCI_MSI_DOORBELL_MASK;
+
+	writel(reg, per_cpu_int_base +
+	       ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+	/* Unmask IPI interrupt */
+	writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+	return 0;
+}
+#else
+static inline int armada_370_xp_msi_init(struct device_node *node,
+					 phys_addr_t main_int_phys_base)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+static int armada_xp_set_affinity(struct irq_data *d,
+				  const struct cpumask *mask_val, bool force)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned long reg, mask;
+	int cpu;
+
+	/* Select a single core from the affinity mask which is online */
+	cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	mask = 1UL << cpu_logical_map(cpu);
+
+	raw_spin_lock(&irq_controller_lock);
+	reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+	reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
+	writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+	raw_spin_unlock(&irq_controller_lock);
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip armada_370_xp_irq_chip = {
+	.name		= "MPIC",
+	.irq_mask       = armada_370_xp_irq_mask,
+	.irq_mask_ack   = armada_370_xp_irq_mask,
+	.irq_unmask     = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = armada_xp_set_affinity,
+#endif
+	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+				      unsigned int virq, irq_hw_number_t hw)
+{
+	armada_370_xp_irq_mask(irq_get_irq_data(virq));
+	if (!is_percpu_irq(hw))
+		writel(hw, per_cpu_int_base +
+			ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+	else
+		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+
+	if (is_percpu_irq(hw)) {
+		irq_set_percpu_devid(virq);
+		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+					handle_percpu_devid_irq);
+	} else {
+		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+					handle_level_irq);
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+	}
+	irq_set_probe(virq);
+
+	return 0;
+}
+
+static void armada_xp_mpic_smp_cpu_init(void)
+{
+	u32 control;
+	int nr_irqs, i;
+
+	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+	nr_irqs = (control >> 2) & 0x3ff;
+
+	for (i = 0; i < nr_irqs; i++)
+		writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+
+	/* Clear pending IPIs */
+	writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+	/* Enable first 8 IPIs */
+	writel(IPI_DOORBELL_MASK, per_cpu_int_base +
+		ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+	/* Unmask IPI interrupt */
+	writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static void armada_xp_mpic_perf_init(void)
+{
+	unsigned long cpuid = cpu_logical_map(smp_processor_id());
+
+	/* Enable Performance Counter Overflow interrupts */
+	writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
+	       per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
+}
+
+#ifdef CONFIG_SMP
+static void armada_mpic_send_doorbell(const struct cpumask *mask,
+				      unsigned int irq)
+{
+	int cpu;
+	unsigned long map = 0;
+
+	/* Convert our logical CPU mask into a physical one. */
+	for_each_cpu(cpu, mask)
+		map |= 1 << cpu_logical_map(cpu);
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	dsb();
+
+	/* submit softirq */
+	writel((map << 8) | irq, main_int_base +
+		ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+static void armada_xp_mpic_reenable_percpu(void)
+{
+	unsigned int irq;
+
+	/* Re-enable per-CPU interrupts that were enabled before suspend */
+	for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+		if (virq == 0)
+			continue;
+
+		data = irq_get_irq_data(virq);
+
+		if (!irq_percpu_is_enabled(virq))
+			continue;
+
+		armada_370_xp_irq_unmask(data);
+	}
+}
+
+static int armada_xp_mpic_starting_cpu(unsigned int cpu)
+{
+	armada_xp_mpic_perf_init();
+	armada_xp_mpic_smp_cpu_init();
+	armada_xp_mpic_reenable_percpu();
+	return 0;
+}
+
+static int mpic_cascaded_starting_cpu(unsigned int cpu)
+{
+	armada_xp_mpic_perf_init();
+	armada_xp_mpic_reenable_percpu();
+	enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+	return 0;
+}
+#endif
+
+static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+	.map = armada_370_xp_mpic_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+#ifdef CONFIG_PCI_MSI
+static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+{
+	u32 msimask, msinr;
+
+	msimask = readl_relaxed(per_cpu_int_base +
+				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+		& PCI_MSI_DOORBELL_MASK;
+
+	writel(~msimask, per_cpu_int_base +
+	       ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+	for (msinr = PCI_MSI_DOORBELL_START;
+	     msinr < PCI_MSI_DOORBELL_END; msinr++) {
+		int irq;
+
+		if (!(msimask & BIT(msinr)))
+			continue;
+
+		if (is_chained) {
+			irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
+					       msinr - PCI_MSI_DOORBELL_START);
+			generic_handle_irq(irq);
+		} else {
+			irq = msinr - PCI_MSI_DOORBELL_START;
+			handle_domain_irq(armada_370_xp_msi_inner_domain,
+					  irq, regs);
+		}
+	}
+}
+#else
+static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long irqmap, irqn, irqsrc, cpuid;
+	unsigned int cascade_irq;
+
+	chained_irq_enter(chip, desc);
+
+	irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+	cpuid = cpu_logical_map(smp_processor_id());
+
+	for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+		irqsrc = readl_relaxed(main_int_base +
+				       ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+
+		/* Check if the interrupt is not masked on current CPU.
+		 * Test IRQ (0-1) and FIQ (8-9) mask bits.
+		 */
+		if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
+			continue;
+
+		if (irqn == 1) {
+			armada_370_xp_handle_msi_irq(NULL, true);
+			continue;
+		}
+
+		cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+		generic_handle_irq(cascade_irq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void __exception_irq_entry
+armada_370_xp_handle_irq(struct pt_regs *regs)
+{
+	u32 irqstat, irqnr;
+
+	do {
+		irqstat = readl_relaxed(per_cpu_int_base +
+					ARMADA_370_XP_CPU_INTACK_OFFS);
+		irqnr = irqstat & 0x3FF;
+
+		if (irqnr > 1022)
+			break;
+
+		if (irqnr > 1) {
+			handle_domain_irq(armada_370_xp_mpic_domain,
+					  irqnr, regs);
+			continue;
+		}
+
+		/* MSI handling */
+		if (irqnr == 1)
+			armada_370_xp_handle_msi_irq(regs, false);
+
+#ifdef CONFIG_SMP
+		/* IPI Handling */
+		if (irqnr == 0) {
+			u32 ipimask, ipinr;
+
+			ipimask = readl_relaxed(per_cpu_int_base +
+						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+				& IPI_DOORBELL_MASK;
+
+			writel(~ipimask, per_cpu_int_base +
+				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+			/* Handle all pending doorbells */
+			for (ipinr = IPI_DOORBELL_START;
+			     ipinr < IPI_DOORBELL_END; ipinr++) {
+				if (ipimask & (0x1 << ipinr))
+					handle_IPI(ipinr, regs);
+			}
+			continue;
+		}
+#endif
+
+	} while (1);
+}
+
+static int armada_370_xp_mpic_suspend(void)
+{
+	doorbell_mask_reg = readl(per_cpu_int_base +
+				  ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+	return 0;
+}
+
+static void armada_370_xp_mpic_resume(void)
+{
+	int nirqs;
+	irq_hw_number_t irq;
+
+	/* Re-enable interrupts */
+	nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
+	for (irq = 0; irq < nirqs; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+		if (virq == 0)
+			continue;
+
+		data = irq_get_irq_data(virq);
+
+		if (!is_percpu_irq(irq)) {
+			/* Non per-CPU interrupts */
+			writel(irq, per_cpu_int_base +
+			       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+			if (!irqd_irq_disabled(data))
+				armada_370_xp_irq_unmask(data);
+		} else {
+			/* Per-CPU interrupts */
+			writel(irq, main_int_base +
+			       ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+
+			/*
+			 * Re-enable on the current CPU,
+			 * armada_xp_mpic_reenable_percpu() will take
+			 * care of secondary CPUs when they come up.
+			 */
+			if (irq_percpu_is_enabled(virq))
+				armada_370_xp_irq_unmask(data);
+		}
+	}
+
+	/* Reconfigure doorbells for IPIs and MSIs */
+	writel(doorbell_mask_reg,
+	       per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+	if (doorbell_mask_reg & IPI_DOORBELL_MASK)
+		writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+	if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
+		writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
+	.suspend	= armada_370_xp_mpic_suspend,
+	.resume		= armada_370_xp_mpic_resume,
+};
+
+static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+					     struct device_node *parent)
+{
+	struct resource main_int_res, per_cpu_int_res;
+	int nr_irqs, i;
+	u32 control;
+
+	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+	BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+
+	BUG_ON(!request_mem_region(main_int_res.start,
+				   resource_size(&main_int_res),
+				   node->full_name));
+	BUG_ON(!request_mem_region(per_cpu_int_res.start,
+				   resource_size(&per_cpu_int_res),
+				   node->full_name));
+
+	main_int_base = ioremap(main_int_res.start,
+				resource_size(&main_int_res));
+	BUG_ON(!main_int_base);
+
+	per_cpu_int_base = ioremap(per_cpu_int_res.start,
+				   resource_size(&per_cpu_int_res));
+	BUG_ON(!per_cpu_int_base);
+
+	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+	nr_irqs = (control >> 2) & 0x3ff;
+
+	for (i = 0; i < nr_irqs; i++)
+		writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+
+	armada_370_xp_mpic_domain =
+		irq_domain_add_linear(node, nr_irqs,
+				&armada_370_xp_mpic_irq_ops, NULL);
+	BUG_ON(!armada_370_xp_mpic_domain);
+	irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+
+	/* Setup for the boot CPU */
+	armada_xp_mpic_perf_init();
+	armada_xp_mpic_smp_cpu_init();
+
+	armada_370_xp_msi_init(node, main_int_res.start);
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (parent_irq <= 0) {
+		irq_set_default_host(armada_370_xp_mpic_domain);
+		set_handle_irq(armada_370_xp_handle_irq);
+#ifdef CONFIG_SMP
+		set_smp_cross_call(armada_mpic_send_doorbell);
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+					  "irqchip/armada/ipi:starting",
+					  armada_xp_mpic_starting_cpu, NULL);
+#endif
+	} else {
+#ifdef CONFIG_SMP
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+					  "irqchip/armada/cascade:starting",
+					  mpic_cascaded_starting_cpu, NULL);
+#endif
+		irq_set_chained_handler(parent_irq,
+					armada_370_xp_mpic_handle_cascade_irq);
+	}
+
+	register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
new file mode 100644
index 0000000..f20200a
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -0,0 +1,115 @@
+/*
+ *  Aspeed 24XX/25XX I2C Interrupt Controller.
+ *
+ *  Copyright (C) 2012-2017 ASPEED Technology Inc.
+ *  Copyright 2017 IBM Corporation
+ *  Copyright 2017 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+
+#define ASPEED_I2C_IC_NUM_BUS 14
+
+struct aspeed_i2c_ic {
+	void __iomem		*base;
+	int			parent_irq;
+	struct irq_domain	*irq_domain;
+};
+
+/*
+ * The aspeed chip provides a single hardware interrupt for all of the I2C
+ * busses, so we use a dummy interrupt chip to translate this single interrupt
+ * into multiple interrupts, each associated with a single I2C bus.
+ */
+static void aspeed_i2c_ic_irq_handler(struct irq_desc *desc)
+{
+	struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long bit, status;
+	unsigned int bus_irq;
+
+	chained_irq_enter(chip, desc);
+	status = readl(i2c_ic->base);
+	for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) {
+		bus_irq = irq_find_mapping(i2c_ic->irq_domain, bit);
+		generic_handle_irq(bus_irq);
+	}
+	chained_irq_exit(chip, desc);
+}
+
+/*
+ * Set simple handler and mark IRQ as valid. Nothing interesting to do here
+ * since we are using a dummy interrupt chip.
+ */
+static int aspeed_i2c_ic_map_irq_domain(struct irq_domain *domain,
+					unsigned int irq, irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops aspeed_i2c_ic_irq_domain_ops = {
+	.map = aspeed_i2c_ic_map_irq_domain,
+};
+
+static int __init aspeed_i2c_ic_of_init(struct device_node *node,
+					struct device_node *parent)
+{
+	struct aspeed_i2c_ic *i2c_ic;
+	int ret = 0;
+
+	i2c_ic = kzalloc(sizeof(*i2c_ic), GFP_KERNEL);
+	if (!i2c_ic)
+		return -ENOMEM;
+
+	i2c_ic->base = of_iomap(node, 0);
+	if (!i2c_ic->base) {
+		ret = -ENOMEM;
+		goto err_free_ic;
+	}
+
+	i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
+	if (i2c_ic->parent_irq < 0) {
+		ret = i2c_ic->parent_irq;
+		goto err_iounmap;
+	}
+
+	i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS,
+						   &aspeed_i2c_ic_irq_domain_ops,
+						   NULL);
+	if (!i2c_ic->irq_domain) {
+		ret = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	i2c_ic->irq_domain->name = "aspeed-i2c-domain";
+
+	irq_set_chained_handler_and_data(i2c_ic->parent_irq,
+					 aspeed_i2c_ic_irq_handler, i2c_ic);
+
+	pr_info("i2c controller registered, irq %d\n", i2c_ic->parent_irq);
+
+	return 0;
+
+err_iounmap:
+	iounmap(i2c_ic->base);
+err_free_ic:
+	kfree(i2c_ic);
+	return ret;
+}
+
+IRQCHIP_DECLARE(ast2400_i2c_ic, "aspeed,ast2400-i2c-ic", aspeed_i2c_ic_of_init);
+IRQCHIP_DECLARE(ast2500_i2c_ic, "aspeed,ast2500-i2c-ic", aspeed_i2c_ic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
new file mode 100644
index 0000000..03ba477
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -0,0 +1,231 @@
+/*
+ *  Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp.
+ *
+ *  Driver for Aspeed "new" VIC as found in SoC generation 3 and later
+ *
+ *  Based on irq-vic.c:
+ *
+ *  Copyright (C) 1999 - 2003 ARM Limited
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+/* These definitions correspond to the "new mapping" of the
+ * register set that interleaves "high" and "low". The offsets
+ * below are for the "low" register, add 4 to get to the high one
+ */
+#define AVIC_IRQ_STATUS		0x00
+#define AVIC_FIQ_STATUS		0x08
+#define AVIC_RAW_STATUS		0x10
+#define AVIC_INT_SELECT		0x18
+#define AVIC_INT_ENABLE		0x20
+#define AVIC_INT_ENABLE_CLR	0x28
+#define AVIC_INT_TRIGGER	0x30
+#define AVIC_INT_TRIGGER_CLR	0x38
+#define AVIC_INT_SENSE		0x40
+#define AVIC_INT_DUAL_EDGE	0x48
+#define AVIC_INT_EVENT		0x50
+#define AVIC_EDGE_CLR		0x58
+#define AVIC_EDGE_STATUS	0x60
+
+#define NUM_IRQS		64
+
+struct aspeed_vic {
+	void __iomem		*base;
+	u32			edge_sources[2];
+	struct irq_domain	*dom;
+};
+static struct aspeed_vic *system_avic;
+
+static void vic_init_hw(struct aspeed_vic *vic)
+{
+	u32 sense;
+
+	/* Disable all interrupts */
+	writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR);
+	writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4);
+
+	/* Make sure no soft trigger is on */
+	writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR);
+	writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4);
+
+	/* Set everything to be IRQ */
+	writel(0, vic->base + AVIC_INT_SELECT);
+	writel(0, vic->base + AVIC_INT_SELECT + 4);
+
+	/* Some interrupts have a programable high/low level trigger
+	 * (4 GPIO direct inputs), for now we assume this was configured
+	 * by firmware. We read which ones are edge now.
+	 */
+	sense = readl(vic->base + AVIC_INT_SENSE);
+	vic->edge_sources[0] = ~sense;
+	sense = readl(vic->base + AVIC_INT_SENSE + 4);
+	vic->edge_sources[1] = ~sense;
+
+	/* Clear edge detection latches */
+	writel(0xffffffff, vic->base + AVIC_EDGE_CLR);
+	writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4);
+}
+
+static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
+{
+	struct aspeed_vic *vic = system_avic;
+	u32 stat, irq;
+
+	for (;;) {
+		irq = 0;
+		stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS);
+		if (!stat) {
+			stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4);
+			irq = 32;
+		}
+		if (stat == 0)
+			break;
+		irq += ffs(stat) - 1;
+		handle_domain_irq(vic->dom, irq, regs);
+	}
+}
+
+static void avic_ack_irq(struct irq_data *d)
+{
+	struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+	unsigned int sidx = d->hwirq >> 5;
+	unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+	/* Clear edge latch for edge interrupts, nop for level */
+	if (vic->edge_sources[sidx] & sbit)
+		writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static void avic_mask_irq(struct irq_data *d)
+{
+	struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+	unsigned int sidx = d->hwirq >> 5;
+	unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+	writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+}
+
+static void avic_unmask_irq(struct irq_data *d)
+{
+	struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+	unsigned int sidx = d->hwirq >> 5;
+	unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+	writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4);
+}
+
+/* For level irq, faster than going through a nop "ack" and mask */
+static void avic_mask_ack_irq(struct irq_data *d)
+{
+	struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+	unsigned int sidx = d->hwirq >> 5;
+	unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+	/* First mask */
+	writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+
+	/* Then clear edge latch for edge interrupts */
+	if (vic->edge_sources[sidx] & sbit)
+		writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static struct irq_chip avic_chip = {
+	.name		= "AVIC",
+	.irq_ack	= avic_ack_irq,
+	.irq_mask	= avic_mask_irq,
+	.irq_unmask	= avic_unmask_irq,
+	.irq_mask_ack	= avic_mask_ack_irq,
+};
+
+static int avic_map(struct irq_domain *d, unsigned int irq,
+		    irq_hw_number_t hwirq)
+{
+	struct aspeed_vic *vic = d->host_data;
+	unsigned int sidx = hwirq >> 5;
+	unsigned int sbit = 1u << (hwirq & 0x1f);
+
+	/* Check if interrupt exists */
+	if (sidx > 1)
+		return -EPERM;
+
+	if (vic->edge_sources[sidx] & sbit)
+		irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq);
+	else
+		irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq);
+	irq_set_chip_data(irq, vic);
+	irq_set_probe(irq);
+	return 0;
+}
+
+static const struct irq_domain_ops avic_dom_ops = {
+	.map = avic_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init avic_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	void __iomem *regs;
+	struct aspeed_vic *vic;
+
+	if (WARN(parent, "non-root Aspeed VIC not supported"))
+		return -EINVAL;
+	if (WARN(system_avic, "duplicate Aspeed VIC not supported"))
+		return -EINVAL;
+
+	regs = of_iomap(node, 0);
+	if (WARN_ON(!regs))
+		return -EIO;
+
+	vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL);
+	if (WARN_ON(!vic)) {
+		iounmap(regs);
+		return -ENOMEM;
+	}
+	vic->base = regs;
+
+	/* Initialize soures, all masked */
+	vic_init_hw(vic);
+
+	/* Ready to receive interrupts */
+	system_avic = vic;
+	set_handle_irq(avic_handle_irq);
+
+	/* Register our domain */
+	vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
+					 &avic_dom_ops, vic);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(ast2400_vic, "aspeed,ast2400-vic", avic_of_init);
+IRQCHIP_DECLARE(ast2500_vic, "aspeed,ast2500-vic", avic_of_init);
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 0000000..befe93c
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,97 @@
+/*
+ *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ *  Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
+ */
+
+static unsigned irq_wb_chan[8] = {
+	-1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	unsigned long pending;
+	int irq;
+
+	pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+	if (!pending) {
+		spurious_interrupt();
+		return;
+	}
+
+	pending >>= CAUSEB_IP;
+	while (pending) {
+		irq = fls(pending) - 1;
+		if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+			ath79_ddr_wb_flush(irq_wb_chan[irq]);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+		pending &= ~BIT(irq);
+	}
+}
+
+static int __init ar79_cpu_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	int err, i, count;
+
+	/* Fill the irq_wb_chan table */
+	count = of_count_phandle_with_args(
+		node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
+
+	for (i = 0; i < count; i++) {
+		struct of_phandle_args args;
+		u32 irq = i;
+
+		of_property_read_u32_index(
+			node, "qca,ddr-wb-channel-interrupts", i, &irq);
+		if (irq >= ARRAY_SIZE(irq_wb_chan))
+			continue;
+
+		err = of_parse_phandle_with_args(
+			node, "qca,ddr-wb-channels",
+			"#qca,ddr-wb-channel-cells",
+			i, &args);
+		if (err)
+			return err;
+
+		irq_wb_chan[irq] = args.args[0];
+	}
+
+	return mips_cpu_irq_of_init(node, parent);
+}
+IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
+		ar79_cpu_intc_of_init);
+
+void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
+{
+	irq_wb_chan[2] = irq_wb_chan2;
+	irq_wb_chan[3] = irq_wb_chan3;
+	mips_cpu_irq_init();
+}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 0000000..aa72907
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,189 @@
+/*
+ *  Atheros AR71xx/AR724x/AR913x MISC interrupt controller
+ *
+ *  Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define AR71XX_RESET_REG_MISC_INT_STATUS	0
+#define AR71XX_RESET_REG_MISC_INT_ENABLE	4
+
+#define ATH79_MISC_IRQ_COUNT			32
+
+static void ath79_misc_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	void __iomem *base = domain->host_data;
+	u32 pending;
+
+	chained_irq_enter(chip, desc);
+
+	pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+		  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	if (!pending) {
+		spurious_interrupt();
+		chained_irq_exit(chip, desc);
+		return;
+	}
+
+	while (pending) {
+		int bit = __ffs(pending);
+
+		generic_handle_irq(irq_linear_revmap(domain, bit));
+		pending &= ~BIT(bit);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void ar71xx_misc_irq_unmask(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+	.name		= "MISC",
+	.irq_unmask	= ar71xx_misc_irq_unmask,
+	.irq_mask	= ar71xx_misc_irq_mask,
+};
+
+static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static const struct irq_domain_ops misc_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = misc_map,
+};
+
+static void __init ath79_misc_intc_domain_init(
+	struct irq_domain *domain, int irq)
+{
+	void __iomem *base = domain->host_data;
+
+	/* Disable and clear all interrupts */
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
+
+static int __init ath79_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	struct irq_domain *domain;
+	void __iomem *base;
+	int irq;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq) {
+		pr_err("Failed to get MISC IRQ\n");
+		return -EINVAL;
+	}
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("Failed to get MISC IRQ registers\n");
+		return -ENOMEM;
+	}
+
+	domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+				&misc_irq_domain_ops, base);
+	if (!domain) {
+		pr_err("Failed to add MISC irqdomain\n");
+		return -EINVAL;
+	}
+
+	ath79_misc_intc_domain_init(domain, irq);
+	return 0;
+}
+
+static int __init ar7100_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+		ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+		ar7240_misc_intc_of_init);
+
+void __init ath79_misc_irq_init(void __iomem *regs, int irq,
+				int irq_base, bool is_ar71xx)
+{
+	struct irq_domain *domain;
+
+	if (is_ar71xx)
+		ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	else
+		ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+
+	domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+			irq_base, 0, &misc_irq_domain_ops, regs);
+	if (!domain)
+		panic("Failed to create MISC irqdomain");
+
+	ath79_misc_intc_domain_init(domain, irq);
+}
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
new file mode 100644
index 0000000..f69a858
--- /dev/null
+++ b/drivers/irqchip/irq-ativic32.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <nds32_intrinsic.h>
+
+static void ativic32_ack_irq(struct irq_data *data)
+{
+	__nds32__mtsr_dsb(BIT(data->hwirq), NDS32_SR_INT_PEND2);
+}
+
+static void ativic32_mask_irq(struct irq_data *data)
+{
+	unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
+	__nds32__mtsr_dsb(int_mask2 & (~(BIT(data->hwirq))), NDS32_SR_INT_MASK2);
+}
+
+static void ativic32_unmask_irq(struct irq_data *data)
+{
+	unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
+	__nds32__mtsr_dsb(int_mask2 | (BIT(data->hwirq)), NDS32_SR_INT_MASK2);
+}
+
+static struct irq_chip ativic32_chip = {
+	.name = "ativic32",
+	.irq_ack = ativic32_ack_irq,
+	.irq_mask = ativic32_mask_irq,
+	.irq_unmask = ativic32_unmask_irq,
+};
+
+static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 };
+
+static struct irq_domain *root_domain;
+static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq,
+				  irq_hw_number_t hw)
+{
+
+	unsigned long int_trigger_type;
+	u32 type;
+	struct irq_data *irq_data;
+	int_trigger_type = __nds32__mfsr(NDS32_SR_INT_TRIGGER);
+	irq_data = irq_get_irq_data(virq);
+	if (!irq_data)
+		return -EINVAL;
+
+	if (int_trigger_type & (BIT(hw))) {
+		irq_set_chip_and_handler(virq, &ativic32_chip, handle_edge_irq);
+		type = IRQ_TYPE_EDGE_RISING;
+	} else {
+		irq_set_chip_and_handler(virq, &ativic32_chip, handle_level_irq);
+		type = IRQ_TYPE_LEVEL_HIGH;
+	}
+
+	irqd_set_trigger_type(irq_data, type);
+	return 0;
+}
+
+static struct irq_domain_ops ativic32_ops = {
+	.map = ativic32_irq_domain_map,
+	.xlate = irq_domain_xlate_onecell
+};
+
+static irq_hw_number_t get_intr_src(void)
+{
+	return ((__nds32__mfsr(NDS32_SR_ITYPE) & ITYPE_mskVECTOR) >> ITYPE_offVECTOR)
+		- NDS32_VECTOR_offINTERRUPT;
+}
+
+asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+{
+	irq_hw_number_t hwirq = get_intr_src();
+	handle_domain_irq(root_domain, hwirq, regs);
+}
+
+int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
+{
+	unsigned long int_vec_base, nivic, nr_ints;
+
+	if (WARN(parent, "non-root ativic32 are not supported"))
+		return -EINVAL;
+
+	int_vec_base = __nds32__mfsr(NDS32_SR_IVB);
+
+	if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0)
+		panic("Unable to use atcivic32 for this cpu.\n");
+
+	nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC;
+	if (nivic >= ARRAY_SIZE(nivic_map))
+		panic("The number of input for ativic32 is not supported.\n");
+
+	nr_ints = nivic_map[nivic];
+
+	root_domain = irq_domain_add_linear(node, nr_ints,
+			&ativic32_ops, NULL);
+
+	if (!root_domain)
+		panic("%s: unable to create IRQ domain\n", node->full_name);
+
+	return 0;
+}
+IRQCHIP_DECLARE(ativic32, "andestech,ativic32", ativic32_init_irq);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
new file mode 100644
index 0000000..072bd22
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -0,0 +1,275 @@
+/*
+ * Atmel AT91 common AIC (Advanced Interrupt Controller) code shared by
+ * irq-atmel-aic and irq-atmel-aic5 drivers
+ *
+ *  Copyright (C) 2004 SAN People
+ *  Copyright (C) 2004 ATMEL
+ *  Copyright (C) Rick Bronson
+ *  Copyright (C) 2014 Free Electrons
+ *
+ *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "irq-atmel-aic-common.h"
+
+#define AT91_AIC_PRIOR			GENMASK(2, 0)
+#define AT91_AIC_IRQ_MIN_PRIORITY	0
+#define AT91_AIC_IRQ_MAX_PRIORITY	7
+
+#define AT91_AIC_SRCTYPE		GENMASK(6, 5)
+#define AT91_AIC_SRCTYPE_LOW		(0 << 5)
+#define AT91_AIC_SRCTYPE_FALLING	(1 << 5)
+#define AT91_AIC_SRCTYPE_HIGH		(2 << 5)
+#define AT91_AIC_SRCTYPE_RISING		(3 << 5)
+
+struct aic_chip_data {
+	u32 ext_irqs;
+};
+
+static void aic_common_shutdown(struct irq_data *d)
+{
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+	ct->chip.irq_mask(d);
+}
+
+int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct aic_chip_data *aic = gc->private;
+	unsigned aic_type;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_HIGH:
+		aic_type = AT91_AIC_SRCTYPE_HIGH;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		aic_type = AT91_AIC_SRCTYPE_RISING;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		if (!(d->mask & aic->ext_irqs))
+			return -EINVAL;
+
+		aic_type = AT91_AIC_SRCTYPE_LOW;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		if (!(d->mask & aic->ext_irqs))
+			return -EINVAL;
+
+		aic_type = AT91_AIC_SRCTYPE_FALLING;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*val &= ~AT91_AIC_SRCTYPE;
+	*val |= aic_type;
+
+	return 0;
+}
+
+void aic_common_set_priority(int priority, unsigned *val)
+{
+	*val &= ~AT91_AIC_PRIOR;
+	*val |= priority;
+}
+
+int aic_common_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec,
+				unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 3))
+		return -EINVAL;
+
+	if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY) ||
+		    (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
+		return -EINVAL;
+
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
+{
+	struct device_node *node = irq_domain_get_of_node(domain);
+	struct irq_chip_generic *gc;
+	struct aic_chip_data *aic;
+	struct property *prop;
+	const __be32 *p;
+	u32 hwirq;
+
+	gc = irq_get_domain_generic_chip(domain, 0);
+
+	aic = gc->private;
+	aic->ext_irqs |= 1;
+
+	of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
+		gc = irq_get_domain_generic_chip(domain, hwirq);
+		if (!gc) {
+			pr_warn("AIC: external irq %d >= %d skip it\n",
+				hwirq, domain->revmap_size);
+			continue;
+		}
+
+		aic = gc->private;
+		aic->ext_irqs |= (1 << (hwirq % 32));
+	}
+}
+
+#define AT91_RTC_IDR           0x24
+#define AT91_RTC_IMR           0x28
+#define AT91_RTC_IRQ_MASK      0x1f
+
+void __init aic_common_rtc_irq_fixup(void)
+{
+	struct device_node *np;
+	void __iomem *regs;
+
+	np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
+	if (!np)
+		np = of_find_compatible_node(NULL, NULL,
+					     "atmel,at91sam9x5-rtc");
+
+	if (!np)
+		return;
+
+	regs = of_iomap(np, 0);
+	of_node_put(np);
+
+	if (!regs)
+		return;
+
+	writel(AT91_RTC_IRQ_MASK, regs + AT91_RTC_IDR);
+
+	iounmap(regs);
+}
+
+#define AT91_RTT_MR		0x00			/* Real-time Mode Register */
+#define AT91_RTT_ALMIEN		(1 << 16)		/* Alarm Interrupt Enable */
+#define AT91_RTT_RTTINCIEN	(1 << 17)		/* Real Time Timer Increment Interrupt Enable */
+
+void __init aic_common_rtt_irq_fixup(void)
+{
+	struct device_node *np;
+	void __iomem *regs;
+
+	/*
+	 * The at91sam9263 SoC has 2 instances of the RTT block, hence we
+	 * iterate over the DT to find each occurrence.
+	 */
+	for_each_compatible_node(np, NULL, "atmel,at91sam9260-rtt") {
+		regs = of_iomap(np, 0);
+		if (!regs)
+			continue;
+
+		writel(readl(regs + AT91_RTT_MR) &
+		       ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN),
+		       regs + AT91_RTT_MR);
+
+		iounmap(regs);
+	}
+}
+
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
+{
+	struct device_node *root = of_find_node_by_path("/");
+	const struct of_device_id *match;
+
+	if (!root)
+		return;
+
+	match = of_match_node(matches, root);
+
+	if (match) {
+		void (*fixup)(void) = match->data;
+		fixup();
+	}
+
+	of_node_put(root);
+}
+
+struct irq_domain *__init aic_common_of_init(struct device_node *node,
+					     const struct irq_domain_ops *ops,
+					     const char *name, int nirqs,
+					     const struct of_device_id *matches)
+{
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+	struct aic_chip_data *aic;
+	void __iomem *reg_base;
+	int nchips;
+	int ret;
+	int i;
+
+	nchips = DIV_ROUND_UP(nirqs, 32);
+
+	reg_base = of_iomap(node, 0);
+	if (!reg_base)
+		return ERR_PTR(-ENOMEM);
+
+	aic = kcalloc(nchips, sizeof(*aic), GFP_KERNEL);
+	if (!aic) {
+		ret = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
+	if (!domain) {
+		ret = -ENOMEM;
+		goto err_free_aic;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
+					     handle_fasteoi_irq,
+					     IRQ_NOREQUEST | IRQ_NOPROBE |
+					     IRQ_NOAUTOEN, 0, 0);
+	if (ret)
+		goto err_domain_remove;
+
+	for (i = 0; i < nchips; i++) {
+		gc = irq_get_domain_generic_chip(domain, i * 32);
+
+		gc->reg_base = reg_base;
+
+		gc->unused = 0;
+		gc->wake_enabled = ~0;
+		gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
+		gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
+		gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+		gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
+		gc->private = &aic[i];
+	}
+
+	aic_common_ext_irq_of_init(domain);
+	aic_common_irq_fixup(matches);
+
+	return domain;
+
+err_domain_remove:
+	irq_domain_remove(domain);
+
+err_free_aic:
+	kfree(aic);
+
+err_iounmap:
+	iounmap(reg_base);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
new file mode 100644
index 0000000..242e62c
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -0,0 +1,40 @@
+/*
+ * Atmel AT91 common AIC (Advanced Interrupt Controller) header file
+ *
+ *  Copyright (C) 2004 SAN People
+ *  Copyright (C) 2004 ATMEL
+ *  Copyright (C) Rick Bronson
+ *  Copyright (C) 2014 Free Electrons
+ *
+ *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __IRQ_ATMEL_AIC_COMMON_H
+#define __IRQ_ATMEL_AIC_COMMON_H
+
+
+int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
+
+void aic_common_set_priority(int priority, unsigned *val);
+
+int aic_common_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec,
+				unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type);
+
+struct irq_domain *__init aic_common_of_init(struct device_node *node,
+					     const struct irq_domain_ops *ops,
+					     const char *name, int nirqs,
+					     const struct of_device_id *matches);
+
+void __init aic_common_rtc_irq_fixup(void);
+
+void __init aic_common_rtt_irq_fixup(void);
+
+#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
new file mode 100644
index 0000000..bb1ad45
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -0,0 +1,274 @@
+/*
+ * Atmel AT91 AIC (Advanced Interrupt Controller) driver
+ *
+ *  Copyright (C) 2004 SAN People
+ *  Copyright (C) 2004 ATMEL
+ *  Copyright (C) Rick Bronson
+ *  Copyright (C) 2014 Free Electrons
+ *
+ *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irq-atmel-aic-common.h"
+
+/* Number of irq lines managed by AIC */
+#define NR_AIC_IRQS	32
+
+#define AT91_AIC_SMR(n)			((n) * 4)
+
+#define AT91_AIC_SVR(n)			(0x80 + ((n) * 4))
+#define AT91_AIC_IVR			0x100
+#define AT91_AIC_FVR			0x104
+#define AT91_AIC_ISR			0x108
+
+#define AT91_AIC_IPR			0x10c
+#define AT91_AIC_IMR			0x110
+#define AT91_AIC_CISR			0x114
+
+#define AT91_AIC_IECR			0x120
+#define AT91_AIC_IDCR			0x124
+#define AT91_AIC_ICCR			0x128
+#define AT91_AIC_ISCR			0x12c
+#define AT91_AIC_EOICR			0x130
+#define AT91_AIC_SPU			0x134
+#define AT91_AIC_DCR			0x138
+
+static struct irq_domain *aic_domain;
+
+static asmlinkage void __exception_irq_entry
+aic_handle(struct pt_regs *regs)
+{
+	struct irq_domain_chip_generic *dgc = aic_domain->gc;
+	struct irq_chip_generic *gc = dgc->gc[0];
+	u32 irqnr;
+	u32 irqstat;
+
+	irqnr = irq_reg_readl(gc, AT91_AIC_IVR);
+	irqstat = irq_reg_readl(gc, AT91_AIC_ISR);
+
+	if (!irqstat)
+		irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+	else
+		handle_domain_irq(aic_domain, irqnr, regs);
+}
+
+static int aic_retrigger(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	/* Enable interrupt on AIC5 */
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
+	irq_gc_unlock(gc);
+
+	return 0;
+}
+
+static int aic_set_type(struct irq_data *d, unsigned type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	unsigned int smr;
+	int ret;
+
+	smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq));
+	ret = aic_common_set_type(d, type, &smr);
+	if (ret)
+		return ret;
+
+	irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static void aic_suspend(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
+	irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
+	irq_gc_unlock(gc);
+}
+
+static void aic_resume(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
+	irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
+	irq_gc_unlock(gc);
+}
+
+static void aic_pm_shutdown(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+	irq_gc_unlock(gc);
+}
+#else
+#define aic_suspend		NULL
+#define aic_resume		NULL
+#define aic_pm_shutdown		NULL
+#endif /* CONFIG_PM */
+
+static void __init aic_hw_init(struct irq_domain *domain)
+{
+	struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+	int i;
+
+	/*
+	 * Perform 8 End Of Interrupt Command to make sure AIC
+	 * will not Lock out nIRQ
+	 */
+	for (i = 0; i < 8; i++)
+		irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+
+	/*
+	 * Spurious Interrupt ID in Spurious Vector Register.
+	 * When there is no current interrupt, the IRQ Vector Register
+	 * reads the value stored in AIC_SPU
+	 */
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU);
+
+	/* No debugging in AIC: Debug (Protect) Control Register */
+	irq_reg_writel(gc, 0, AT91_AIC_DCR);
+
+	/* Disable and clear all interrupts initially */
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+
+	for (i = 0; i < 32; i++)
+		irq_reg_writel(gc, i, AT91_AIC_SVR(i));
+}
+
+static int aic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type)
+{
+	struct irq_domain_chip_generic *dgc = d->gc;
+	struct irq_chip_generic *gc;
+	unsigned long flags;
+	unsigned smr;
+	int idx;
+	int ret;
+
+	if (!dgc)
+		return -EINVAL;
+
+	ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
+					  out_hwirq, out_type);
+	if (ret)
+		return ret;
+
+	idx = intspec[0] / dgc->irqs_per_chip;
+	if (idx >= dgc->num_chips)
+		return -EINVAL;
+
+	gc = dgc->gc[idx];
+
+	irq_gc_lock_irqsave(gc, flags);
+	smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
+	aic_common_set_priority(intspec[2], &smr);
+	irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+	irq_gc_unlock_irqrestore(gc, flags);
+
+	return ret;
+}
+
+static const struct irq_domain_ops aic_irq_ops = {
+	.map	= irq_map_generic_chip,
+	.xlate	= aic_irq_domain_xlate,
+};
+
+static void __init at91rm9200_aic_irq_fixup(void)
+{
+	aic_common_rtc_irq_fixup();
+}
+
+static void __init at91sam9260_aic_irq_fixup(void)
+{
+	aic_common_rtt_irq_fixup();
+}
+
+static void __init at91sam9g45_aic_irq_fixup(void)
+{
+	aic_common_rtc_irq_fixup();
+	aic_common_rtt_irq_fixup();
+}
+
+static const struct of_device_id aic_irq_fixups[] __initconst = {
+	{ .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9rl", .data = at91sam9g45_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9x5", .data = at91rm9200_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9260", .data = at91sam9260_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9261", .data = at91sam9260_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9263", .data = at91sam9260_aic_irq_fixup },
+	{ .compatible = "atmel,at91sam9g20", .data = at91sam9260_aic_irq_fixup },
+	{ /* sentinel */ },
+};
+
+static int __init aic_of_init(struct device_node *node,
+			      struct device_node *parent)
+{
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+
+	if (aic_domain)
+		return -EEXIST;
+
+	domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
+				    NR_AIC_IRQS, aic_irq_fixups);
+	if (IS_ERR(domain))
+		return PTR_ERR(domain);
+
+	aic_domain = domain;
+	gc = irq_get_domain_generic_chip(domain, 0);
+
+	gc->chip_types[0].regs.eoi = AT91_AIC_EOICR;
+	gc->chip_types[0].regs.enable = AT91_AIC_IECR;
+	gc->chip_types[0].regs.disable = AT91_AIC_IDCR;
+	gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+	gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+	gc->chip_types[0].chip.irq_retrigger = aic_retrigger;
+	gc->chip_types[0].chip.irq_set_type = aic_set_type;
+	gc->chip_types[0].chip.irq_suspend = aic_suspend;
+	gc->chip_types[0].chip.irq_resume = aic_resume;
+	gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown;
+
+	aic_hw_init(domain);
+	set_handle_irq(aic_handle);
+
+	return 0;
+}
+IRQCHIP_DECLARE(at91rm9200_aic, "atmel,at91rm9200-aic", aic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
new file mode 100644
index 0000000..6acad2e
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -0,0 +1,392 @@
+/*
+ * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
+ *
+ *  Copyright (C) 2004 SAN People
+ *  Copyright (C) 2004 ATMEL
+ *  Copyright (C) Rick Bronson
+ *  Copyright (C) 2014 Free Electrons
+ *
+ *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irq-atmel-aic-common.h"
+
+/* Number of irq lines managed by AIC */
+#define NR_AIC5_IRQS	128
+
+#define AT91_AIC5_SSR		0x0
+#define AT91_AIC5_INTSEL_MSK	(0x7f << 0)
+
+#define AT91_AIC5_SMR			0x4
+
+#define AT91_AIC5_SVR			0x8
+#define AT91_AIC5_IVR			0x10
+#define AT91_AIC5_FVR			0x14
+#define AT91_AIC5_ISR			0x18
+
+#define AT91_AIC5_IPR0			0x20
+#define AT91_AIC5_IPR1			0x24
+#define AT91_AIC5_IPR2			0x28
+#define AT91_AIC5_IPR3			0x2c
+#define AT91_AIC5_IMR			0x30
+#define AT91_AIC5_CISR			0x34
+
+#define AT91_AIC5_IECR			0x40
+#define AT91_AIC5_IDCR			0x44
+#define AT91_AIC5_ICCR			0x48
+#define AT91_AIC5_ISCR			0x4c
+#define AT91_AIC5_EOICR			0x38
+#define AT91_AIC5_SPU			0x3c
+#define AT91_AIC5_DCR			0x6c
+
+#define AT91_AIC5_FFER			0x50
+#define AT91_AIC5_FFDR			0x54
+#define AT91_AIC5_FFSR			0x58
+
+static struct irq_domain *aic5_domain;
+
+static asmlinkage void __exception_irq_entry
+aic5_handle(struct pt_regs *regs)
+{
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
+	u32 irqnr;
+	u32 irqstat;
+
+	irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
+	irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);
+
+	if (!irqstat)
+		irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
+	else
+		handle_domain_irq(aic5_domain, irqnr, regs);
+}
+
+static void aic5_mask(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	/*
+	 * Disable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
+	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+	gc->mask_cache &= ~d->mask;
+	irq_gc_unlock(bgc);
+}
+
+static void aic5_unmask(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+	/*
+	 * Enable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
+	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+	irq_reg_writel(gc, 1, AT91_AIC5_IECR);
+	gc->mask_cache |= d->mask;
+	irq_gc_unlock(bgc);
+}
+
+static int aic5_retrigger(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+
+	/* Enable interrupt on AIC5 */
+	irq_gc_lock(bgc);
+	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+	irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
+	irq_gc_unlock(bgc);
+
+	return 0;
+}
+
+static int aic5_set_type(struct irq_data *d, unsigned type)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	unsigned int smr;
+	int ret;
+
+	irq_gc_lock(bgc);
+	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
+	ret = aic_common_set_type(d, type, &smr);
+	if (!ret)
+		irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+	irq_gc_unlock(bgc);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static u32 *smr_cache;
+
+static void aic5_suspend(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_domain_chip_generic *dgc = domain->gc;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	int i;
+	u32 mask;
+
+	if (smr_cache)
+		for (i = 0; i < domain->revmap_size; i++) {
+			irq_reg_writel(bgc, i, AT91_AIC5_SSR);
+			smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
+		}
+
+	irq_gc_lock(bgc);
+	for (i = 0; i < dgc->irqs_per_chip; i++) {
+		mask = 1 << i;
+		if ((mask & gc->mask_cache) == (mask & gc->wake_active))
+			continue;
+
+		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+		if (mask & gc->wake_active)
+			irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+		else
+			irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+	}
+	irq_gc_unlock(bgc);
+}
+
+static void aic5_resume(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_domain_chip_generic *dgc = domain->gc;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	int i;
+	u32 mask;
+
+	irq_gc_lock(bgc);
+
+	if (smr_cache) {
+		irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
+		for (i = 0; i < domain->revmap_size; i++) {
+			irq_reg_writel(bgc, i, AT91_AIC5_SSR);
+			irq_reg_writel(bgc, i, AT91_AIC5_SVR);
+			irq_reg_writel(bgc, smr_cache[i], AT91_AIC5_SMR);
+		}
+	}
+
+	for (i = 0; i < dgc->irqs_per_chip; i++) {
+		mask = 1 << i;
+
+		if (!smr_cache &&
+		    ((mask & gc->mask_cache) == (mask & gc->wake_active)))
+			continue;
+
+		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+		if (mask & gc->mask_cache)
+			irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+		else
+			irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+	}
+	irq_gc_unlock(bgc);
+}
+
+static void aic5_pm_shutdown(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct irq_domain_chip_generic *dgc = domain->gc;
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	int i;
+
+	irq_gc_lock(bgc);
+	for (i = 0; i < dgc->irqs_per_chip; i++) {
+		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+		irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+		irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
+	}
+	irq_gc_unlock(bgc);
+}
+#else
+#define aic5_suspend		NULL
+#define aic5_resume		NULL
+#define aic5_pm_shutdown	NULL
+#endif /* CONFIG_PM */
+
+static void __init aic5_hw_init(struct irq_domain *domain)
+{
+	struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+	int i;
+
+	/*
+	 * Perform 8 End Of Interrupt Command to make sure AIC
+	 * will not Lock out nIRQ
+	 */
+	for (i = 0; i < 8; i++)
+		irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
+
+	/*
+	 * Spurious Interrupt ID in Spurious Vector Register.
+	 * When there is no current interrupt, the IRQ Vector Register
+	 * reads the value stored in AIC_SPU
+	 */
+	irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU);
+
+	/* No debugging in AIC: Debug (Protect) Control Register */
+	irq_reg_writel(gc, 0, AT91_AIC5_DCR);
+
+	/* Disable and clear all interrupts initially */
+	for (i = 0; i < domain->revmap_size; i++) {
+		irq_reg_writel(gc, i, AT91_AIC5_SSR);
+		irq_reg_writel(gc, i, AT91_AIC5_SVR);
+		irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+		irq_reg_writel(gc, 1, AT91_AIC5_ICCR);
+	}
+}
+
+static int aic5_irq_domain_xlate(struct irq_domain *d,
+				 struct device_node *ctrlr,
+				 const u32 *intspec, unsigned int intsize,
+				 irq_hw_number_t *out_hwirq,
+				 unsigned int *out_type)
+{
+	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+	unsigned long flags;
+	unsigned smr;
+	int ret;
+
+	if (!bgc)
+		return -EINVAL;
+
+	ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
+					  out_hwirq, out_type);
+	if (ret)
+		return ret;
+
+	irq_gc_lock_irqsave(bgc, flags);
+	irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
+	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
+	aic_common_set_priority(intspec[2], &smr);
+	irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+	irq_gc_unlock_irqrestore(bgc, flags);
+
+	return ret;
+}
+
+static const struct irq_domain_ops aic5_irq_ops = {
+	.map	= irq_map_generic_chip,
+	.xlate	= aic5_irq_domain_xlate,
+};
+
+static void __init sama5d3_aic_irq_fixup(void)
+{
+	aic_common_rtc_irq_fixup();
+}
+
+static const struct of_device_id aic5_irq_fixups[] __initconst = {
+	{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
+	{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+	{ /* sentinel */ },
+};
+
+static int __init aic5_of_init(struct device_node *node,
+			       struct device_node *parent,
+			       int nirqs)
+{
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+	int nchips;
+	int i;
+
+	if (nirqs > NR_AIC5_IRQS)
+		return -EINVAL;
+
+	if (aic5_domain)
+		return -EEXIST;
+
+	domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
+				    nirqs, aic5_irq_fixups);
+	if (IS_ERR(domain))
+		return PTR_ERR(domain);
+
+	aic5_domain = domain;
+	nchips = aic5_domain->revmap_size / 32;
+	for (i = 0; i < nchips; i++) {
+		gc = irq_get_domain_generic_chip(domain, i * 32);
+
+		gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
+		gc->chip_types[0].chip.irq_mask = aic5_mask;
+		gc->chip_types[0].chip.irq_unmask = aic5_unmask;
+		gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
+		gc->chip_types[0].chip.irq_set_type = aic5_set_type;
+		gc->chip_types[0].chip.irq_suspend = aic5_suspend;
+		gc->chip_types[0].chip.irq_resume = aic5_resume;
+		gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
+	}
+
+	aic5_hw_init(domain);
+	set_handle_irq(aic5_handle);
+
+	return 0;
+}
+
+#define NR_SAMA5D2_IRQS		77
+
+static int __init sama5d2_aic5_of_init(struct device_node *node,
+				       struct device_node *parent)
+{
+#ifdef CONFIG_PM
+	smr_cache = kcalloc(DIV_ROUND_UP(NR_SAMA5D2_IRQS, 32) * 32,
+			    sizeof(*smr_cache), GFP_KERNEL);
+	if (!smr_cache)
+		return -ENOMEM;
+#endif
+
+	return aic5_of_init(node, parent, NR_SAMA5D2_IRQS);
+}
+IRQCHIP_DECLARE(sama5d2_aic5, "atmel,sama5d2-aic", sama5d2_aic5_of_init);
+
+#define NR_SAMA5D3_IRQS		48
+
+static int __init sama5d3_aic5_of_init(struct device_node *node,
+				       struct device_node *parent)
+{
+	return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
+}
+IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
+
+#define NR_SAMA5D4_IRQS		68
+
+static int __init sama5d4_aic5_of_init(struct device_node *node,
+				       struct device_node *parent)
+{
+	return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
+}
+IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
new file mode 100644
index 0000000..d2da8a1
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2010 Broadcom
+ * Copyright 2012 Simon Arlott, Chris Boot, Stephen Warren
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Quirk 1: Shortcut interrupts don't set the bank 1/2 register pending bits
+ *
+ * If an interrupt fires on bank 1 that isn't in the shortcuts list, bit 8
+ * on bank 0 is set to signify that an interrupt in bank 1 has fired, and
+ * to look in the bank 1 status register for more information.
+ *
+ * If an interrupt fires on bank 1 that _is_ in the shortcuts list, its
+ * shortcut bit in bank 0 is set as well as its interrupt bit in the bank 1
+ * status register, but bank 0 bit 8 is _not_ set.
+ *
+ * Quirk 2: You can't mask the register 1/2 pending interrupts
+ *
+ * In a proper cascaded interrupt controller, the interrupt lines with
+ * cascaded interrupt controllers on them are just normal interrupt lines.
+ * You can mask the interrupts and get on with things. With this controller
+ * you can't do that.
+ *
+ * Quirk 3: The shortcut interrupts can't be (un)masked in bank 0
+ *
+ * Those interrupts that have shortcuts can only be masked/unmasked in
+ * their respective banks' enable/disable registers. Doing so in the bank 0
+ * enable/disable registers has no effect.
+ *
+ * The FIQ control register:
+ *  Bits 0-6: IRQ (index in order of interrupts from banks 1, 2, then 0)
+ *  Bit    7: Enable FIQ generation
+ *  Bits  8+: Unused
+ *
+ * An interrupt must be disabled before configuring it for FIQ generation
+ * otherwise both handlers will fire at the same time!
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/exception.h>
+
+/* Put the bank and irq (32 bits) into the hwirq */
+#define MAKE_HWIRQ(b, n)	((b << 5) | (n))
+#define HWIRQ_BANK(i)		(i >> 5)
+#define HWIRQ_BIT(i)		BIT(i & 0x1f)
+
+#define NR_IRQS_BANK0		8
+#define BANK0_HWIRQ_MASK	0xff
+/* Shortcuts can't be disabled so any unknown new ones need to be masked */
+#define SHORTCUT1_MASK		0x00007c00
+#define SHORTCUT2_MASK		0x001f8000
+#define SHORTCUT_SHIFT		10
+#define BANK1_HWIRQ		BIT(8)
+#define BANK2_HWIRQ		BIT(9)
+#define BANK0_VALID_MASK	(BANK0_HWIRQ_MASK | BANK1_HWIRQ | BANK2_HWIRQ \
+					| SHORTCUT1_MASK | SHORTCUT2_MASK)
+
+#define REG_FIQ_CONTROL		0x0c
+
+#define NR_BANKS		3
+#define IRQS_PER_BANK		32
+
+static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
+static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
+static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
+static const int bank_irqs[] __initconst = { 8, 32, 32 };
+
+static const int shortcuts[] = {
+	7, 9, 10, 18, 19,		/* Bank 1 */
+	21, 22, 23, 24, 25, 30		/* Bank 2 */
+};
+
+struct armctrl_ic {
+	void __iomem *base;
+	void __iomem *pending[NR_BANKS];
+	void __iomem *enable[NR_BANKS];
+	void __iomem *disable[NR_BANKS];
+	struct irq_domain *domain;
+};
+
+static struct armctrl_ic intc __read_mostly;
+static void __exception_irq_entry bcm2835_handle_irq(
+	struct pt_regs *regs);
+static void bcm2836_chained_handle_irq(struct irq_desc *desc);
+
+static void armctrl_mask_irq(struct irq_data *d)
+{
+	writel_relaxed(HWIRQ_BIT(d->hwirq), intc.disable[HWIRQ_BANK(d->hwirq)]);
+}
+
+static void armctrl_unmask_irq(struct irq_data *d)
+{
+	writel_relaxed(HWIRQ_BIT(d->hwirq), intc.enable[HWIRQ_BANK(d->hwirq)]);
+}
+
+static struct irq_chip armctrl_chip = {
+	.name = "ARMCTRL-level",
+	.irq_mask = armctrl_mask_irq,
+	.irq_unmask = armctrl_unmask_irq
+};
+
+static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
+	const u32 *intspec, unsigned int intsize,
+	unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize != 2))
+		return -EINVAL;
+
+	if (WARN_ON(intspec[0] >= NR_BANKS))
+		return -EINVAL;
+
+	if (WARN_ON(intspec[1] >= IRQS_PER_BANK))
+		return -EINVAL;
+
+	if (WARN_ON(intspec[0] == 0 && intspec[1] >= NR_IRQS_BANK0))
+		return -EINVAL;
+
+	*out_hwirq = MAKE_HWIRQ(intspec[0], intspec[1]);
+	*out_type = IRQ_TYPE_NONE;
+	return 0;
+}
+
+static const struct irq_domain_ops armctrl_ops = {
+	.xlate = armctrl_xlate
+};
+
+static int __init armctrl_of_init(struct device_node *node,
+				  struct device_node *parent,
+				  bool is_2836)
+{
+	void __iomem *base;
+	int irq, b, i;
+
+	base = of_iomap(node, 0);
+	if (!base)
+		panic("%pOF: unable to map IC registers\n", node);
+
+	intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
+			&armctrl_ops, NULL);
+	if (!intc.domain)
+		panic("%pOF: unable to create IRQ domain\n", node);
+
+	for (b = 0; b < NR_BANKS; b++) {
+		intc.pending[b] = base + reg_pending[b];
+		intc.enable[b] = base + reg_enable[b];
+		intc.disable[b] = base + reg_disable[b];
+
+		for (i = 0; i < bank_irqs[b]; i++) {
+			irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i));
+			BUG_ON(irq <= 0);
+			irq_set_chip_and_handler(irq, &armctrl_chip,
+				handle_level_irq);
+			irq_set_probe(irq);
+		}
+	}
+
+	if (is_2836) {
+		int parent_irq = irq_of_parse_and_map(node, 0);
+
+		if (!parent_irq) {
+			panic("%pOF: unable to get parent interrupt.\n",
+			      node);
+		}
+		irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
+	} else {
+		set_handle_irq(bcm2835_handle_irq);
+	}
+
+	return 0;
+}
+
+static int __init bcm2835_armctrl_of_init(struct device_node *node,
+					  struct device_node *parent)
+{
+	return armctrl_of_init(node, parent, false);
+}
+
+static int __init bcm2836_armctrl_of_init(struct device_node *node,
+					  struct device_node *parent)
+{
+	return armctrl_of_init(node, parent, true);
+}
+
+
+/*
+ * Handle each interrupt across the entire interrupt controller.  This reads the
+ * status register before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+
+static u32 armctrl_translate_bank(int bank)
+{
+	u32 stat = readl_relaxed(intc.pending[bank]);
+
+	return MAKE_HWIRQ(bank, ffs(stat) - 1);
+}
+
+static u32 armctrl_translate_shortcut(int bank, u32 stat)
+{
+	return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
+}
+
+static u32 get_next_armctrl_hwirq(void)
+{
+	u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
+
+	if (stat == 0)
+		return ~0;
+	else if (stat & BANK0_HWIRQ_MASK)
+		return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
+	else if (stat & SHORTCUT1_MASK)
+		return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
+	else if (stat & SHORTCUT2_MASK)
+		return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
+	else if (stat & BANK1_HWIRQ)
+		return armctrl_translate_bank(1);
+	else if (stat & BANK2_HWIRQ)
+		return armctrl_translate_bank(2);
+	else
+		BUG();
+}
+
+static void __exception_irq_entry bcm2835_handle_irq(
+	struct pt_regs *regs)
+{
+	u32 hwirq;
+
+	while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+		handle_domain_irq(intc.domain, hwirq, regs);
+}
+
+static void bcm2836_chained_handle_irq(struct irq_desc *desc)
+{
+	u32 hwirq;
+
+	while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+		generic_handle_irq(irq_linear_revmap(intc.domain, hwirq));
+}
+
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+		bcm2835_armctrl_of_init);
+IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
+		bcm2836_armctrl_of_init);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
new file mode 100644
index 0000000..dfe4a46
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -0,0 +1,251 @@
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/irq-bcm2836.h>
+
+#include <asm/exception.h>
+
+struct bcm2836_arm_irqchip_intc {
+	struct irq_domain *domain;
+	void __iomem *base;
+};
+
+static struct bcm2836_arm_irqchip_intc intc  __read_mostly;
+
+static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset,
+						 unsigned int bit,
+						 int cpu)
+{
+	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+	writel(readl(reg) & ~BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset,
+						   unsigned int bit,
+						 int cpu)
+{
+	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+	writel(readl(reg) | BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d)
+{
+	bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+					     d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+					     smp_processor_id());
+}
+
+static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d)
+{
+	bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+					       d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+					       smp_processor_id());
+}
+
+static struct irq_chip bcm2836_arm_irqchip_timer = {
+	.name		= "bcm2836-timer",
+	.irq_mask	= bcm2836_arm_irqchip_mask_timer_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_timer_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
+{
+	writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR);
+}
+
+static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d)
+{
+	writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_pmu = {
+	.name		= "bcm2836-pmu",
+	.irq_mask	= bcm2836_arm_irqchip_mask_pmu_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_pmu_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
+{
+}
+
+static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_gpu = {
+	.name		= "bcm2836-gpu",
+	.irq_mask	= bcm2836_arm_irqchip_mask_gpu_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_gpu_irq,
+};
+
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+		       irq_hw_number_t hw)
+{
+	struct irq_chip *chip;
+
+	switch (hw) {
+	case LOCAL_IRQ_CNTPSIRQ:
+	case LOCAL_IRQ_CNTPNSIRQ:
+	case LOCAL_IRQ_CNTHPIRQ:
+	case LOCAL_IRQ_CNTVIRQ:
+		chip = &bcm2836_arm_irqchip_timer;
+		break;
+	case LOCAL_IRQ_GPU_FAST:
+		chip = &bcm2836_arm_irqchip_gpu;
+		break;
+	case LOCAL_IRQ_PMU_FAST:
+		chip = &bcm2836_arm_irqchip_pmu;
+		break;
+	default:
+		pr_warn_once("Unexpected hw irq: %lu\n", hw);
+		return -EINVAL;
+	}
+
+	irq_set_percpu_devid(irq);
+	irq_domain_set_info(d, irq, hw, chip, d->host_data,
+			    handle_percpu_devid_irq, NULL, NULL);
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+	return 0;
+}
+
+static void
+__exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
+{
+	int cpu = smp_processor_id();
+	u32 stat;
+
+	stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
+	if (stat & BIT(LOCAL_IRQ_MAILBOX0)) {
+#ifdef CONFIG_SMP
+		void __iomem *mailbox0 = (intc.base +
+					  LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+		u32 mbox_val = readl(mailbox0);
+		u32 ipi = ffs(mbox_val) - 1;
+
+		writel(1 << ipi, mailbox0);
+		handle_IPI(ipi, regs);
+#endif
+	} else if (stat) {
+		u32 hwirq = ffs(stat) - 1;
+
+		handle_domain_irq(intc.domain, hwirq, regs);
+	}
+}
+
+#ifdef CONFIG_SMP
+static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
+					 unsigned int ipi)
+{
+	int cpu;
+	void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
+
+	/*
+	 * Ensure that stores to normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	smp_wmb();
+
+	for_each_cpu(cpu, mask)	{
+		writel(1 << ipi, mailbox0_base + 16 * cpu);
+	}
+}
+
+static int bcm2836_cpu_starting(unsigned int cpu)
+{
+	bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+					       cpu);
+	return 0;
+}
+
+static int bcm2836_cpu_dying(unsigned int cpu)
+{
+	bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+					     cpu);
+	return 0;
+}
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+	.xlate = irq_domain_xlate_onetwocell,
+	.map = bcm2836_map,
+};
+
+static void
+bcm2836_arm_irqchip_smp_init(void)
+{
+#ifdef CONFIG_SMP
+	/* Unmask IPIs to the boot CPU. */
+	cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
+			  "irqchip/bcm2836:starting", bcm2836_cpu_starting,
+			  bcm2836_cpu_dying);
+
+	set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+#endif
+}
+
+/*
+ * The LOCAL_IRQ_CNT* timer firings are based off of the external
+ * oscillator with some scaling.  The firmware sets up CNTFRQ to
+ * report 19.2Mhz, but doesn't set up the scaling registers.
+ */
+static void bcm2835_init_local_timer_frequency(void)
+{
+	/*
+	 * Set the timer to source from the 19.2Mhz crystal clock (bit
+	 * 8 unset), and only increment by 1 instead of 2 (bit 9
+	 * unset).
+	 */
+	writel(0, intc.base + LOCAL_CONTROL);
+
+	/*
+	 * Set the timer prescaler to 1:1 (timer freq = input freq *
+	 * 2**31 / prescaler)
+	 */
+	writel(0x80000000, intc.base + LOCAL_PRESCALER);
+}
+
+static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
+						      struct device_node *parent)
+{
+	intc.base = of_iomap(node, 0);
+	if (!intc.base) {
+		panic("%pOF: unable to map local interrupt registers\n", node);
+	}
+
+	bcm2835_init_local_timer_frequency();
+
+	intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+					    &bcm2836_arm_irqchip_intc_ops,
+					    NULL);
+	if (!intc.domain)
+		panic("%pOF: unable to create IRQ domain\n", node);
+
+	bcm2836_arm_irqchip_smp_init();
+
+	set_handle_irq(bcm2836_arm_irqchip_handle_irq);
+	return 0;
+}
+
+IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc",
+		bcm2836_arm_irqchip_l1_intc_of_init);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 0000000..43f8abe
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,366 @@
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W0_STATUS		IRQs 31-63
+ *   0x1000_002c: CPU0_W1_STATUS		IRQs 0-31
+ *   0x1000_0030: CPU1_W0_ENABLE
+ *   0x1000_0034: CPU1_W1_ENABLE
+ *   0x1000_0038: CPU1_W0_STATUS		IRQs 31-63
+ *   0x1000_003c: CPU1_W1_STATUS		IRQs 0-31
+ *
+ * BCM63168:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W2_ENABLE
+ *   0x1000_002c: CPU0_W3_ENABLE
+ *   0x1000_0030: CPU0_W0_STATUS	IRQs 96-127
+ *   0x1000_0034: CPU0_W1_STATUS	IRQs 64-95
+ *   0x1000_0038: CPU0_W2_STATUS	IRQs 32-63
+ *   0x1000_003c: CPU0_W3_STATUS	IRQs 0-31
+ *   0x1000_0040: CPU1_W0_ENABLE
+ *   0x1000_0044: CPU1_W1_ENABLE
+ *   0x1000_0048: CPU1_W2_ENABLE
+ *   0x1000_004c: CPU1_W3_ENABLE
+ *   0x1000_0050: CPU1_W0_STATUS	IRQs 96-127
+ *   0x1000_0054: CPU1_W1_STATUS	IRQs 64-95
+ *   0x1000_0058: CPU1_W2_STATUS	IRQs 32-63
+ *   0x1000_005c: CPU1_W3_STATUS	IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD		32
+#define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+	raw_spinlock_t		lock;
+	unsigned int		n_words;
+	struct irq_domain	*domain;
+	struct cpumask		cpumask;
+	struct bcm6345_l1_cpu	*cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+	void __iomem		*map_base;
+	unsigned int		parent_irq;
+	u32			enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+					   unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+	return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+	return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+				      unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+	return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+	return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+					struct irq_data *d)
+{
+	return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+	struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+	struct bcm6345_l1_cpu *cpu;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int idx;
+
+#ifdef CONFIG_SMP
+	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+	cpu = intc->cpus[0];
+#endif
+
+	chained_irq_enter(chip, desc);
+
+	for (idx = 0; idx < intc->n_words; idx++) {
+		int base = idx * IRQS_PER_WORD;
+		unsigned long pending;
+		irq_hw_number_t hwirq;
+		unsigned int irq;
+
+		pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+		pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+			irq = irq_linear_revmap(intc->domain, base + hwirq);
+			if (irq)
+				do_IRQ(irq);
+			else
+				spurious_interrupt();
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+	intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+	intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm6345_l1_unmask(d);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm6345_l1_mask(d);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+				   const struct cpumask *dest,
+				   bool force)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int old_cpu = cpu_for_irq(intc, d);
+	unsigned int new_cpu;
+	struct cpumask valid;
+	unsigned long flags;
+	bool enabled;
+
+	if (!cpumask_and(&valid, &intc->cpumask, dest))
+		return -EINVAL;
+
+	new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+	if (new_cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	dest = cpumask_of(new_cpu);
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	if (old_cpu != new_cpu) {
+		enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+		if (enabled)
+			__bcm6345_l1_mask(d);
+		cpumask_copy(irq_data_get_affinity_mask(d), dest);
+		if (enabled)
+			__bcm6345_l1_unmask(d);
+	} else {
+		cpumask_copy(irq_data_get_affinity_mask(d), dest);
+	}
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+	irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
+
+	return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+				      unsigned int idx,
+				      struct bcm6345_l1_chip *intc)
+{
+	struct resource res;
+	resource_size_t sz;
+	struct bcm6345_l1_cpu *cpu;
+	unsigned int i, n_words;
+
+	if (of_address_to_resource(dn, idx, &res))
+		return -EINVAL;
+	sz = resource_size(&res);
+	n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+	if (!intc->n_words)
+		intc->n_words = n_words;
+	else if (intc->n_words != n_words)
+		return -EINVAL;
+
+	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+					GFP_KERNEL);
+	if (!cpu)
+		return -ENOMEM;
+
+	cpu->map_base = ioremap(res.start, sz);
+	if (!cpu->map_base)
+		return -ENOMEM;
+
+	for (i = 0; i < n_words; i++) {
+		cpu->enable_cache[i] = 0;
+		__raw_writel(0, cpu->map_base + reg_enable(intc, i));
+	}
+
+	cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+	if (!cpu->parent_irq) {
+		pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+		return -EINVAL;
+	}
+	irq_set_chained_handler_and_data(cpu->parent_irq,
+						bcm6345_l1_irq_handle, intc);
+
+	return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+	.name			= "bcm6345-l1",
+	.irq_mask		= bcm6345_l1_mask,
+	.irq_unmask		= bcm6345_l1_unmask,
+	.irq_set_affinity	= bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+			  irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(virq,
+		&bcm6345_l1_irq_chip, handle_percpu_irq);
+	irq_set_chip_data(virq, d->host_data);
+	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+	return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+	.xlate			= irq_domain_xlate_onecell,
+	.map			= bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+			      struct device_node *parent)
+{
+	struct bcm6345_l1_chip *intc;
+	unsigned int idx;
+	int ret;
+
+	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+	if (!intc)
+		return -ENOMEM;
+
+	for_each_possible_cpu(idx) {
+		ret = bcm6345_l1_init_one(dn, idx, intc);
+		if (ret)
+			pr_err("failed to init intc L1 for cpu %d: %d\n",
+				idx, ret);
+		else
+			cpumask_set_cpu(idx, &intc->cpumask);
+	}
+
+	if (!cpumask_weight(&intc->cpumask)) {
+		ret = -ENODEV;
+		goto out_free;
+	}
+
+	raw_spin_lock_init(&intc->lock);
+
+	intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+					     &bcm6345_l1_domain_ops,
+					     intc);
+	if (!intc->domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+			IRQS_PER_WORD * intc->n_words);
+	for_each_cpu(idx, &intc->cpumask) {
+		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+		pr_info("  CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+				cpu->map_base, cpu->parent_irq);
+	}
+
+	return 0;
+
+out_unmap:
+	for_each_possible_cpu(idx) {
+		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+		if (cpu) {
+			if (cpu->map_base)
+				iounmap(cpu->map_base);
+			kfree(cpu);
+		}
+	}
+out_free:
+	kfree(intc);
+	return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
new file mode 100644
index 0000000..0f6e30e
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -0,0 +1,363 @@
+/*
+ * Broadcom BCM7038 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD		32
+#define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 4)
+#define MAX_WORDS		8
+
+struct bcm7038_l1_cpu;
+
+struct bcm7038_l1_chip {
+	raw_spinlock_t		lock;
+	unsigned int		n_words;
+	struct irq_domain	*domain;
+	struct bcm7038_l1_cpu	*cpus[NR_CPUS];
+	u8			affinity[MAX_WORDS * IRQS_PER_WORD];
+};
+
+struct bcm7038_l1_cpu {
+	void __iomem		*map_base;
+	u32			mask_cache[0];
+};
+
+/*
+ * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
+ *
+ * 7038:
+ *   0x1000_1400: W0_STATUS
+ *   0x1000_1404: W1_STATUS
+ *   0x1000_1408: W0_MASK_STATUS
+ *   0x1000_140c: W1_MASK_STATUS
+ *   0x1000_1410: W0_MASK_SET
+ *   0x1000_1414: W1_MASK_SET
+ *   0x1000_1418: W0_MASK_CLEAR
+ *   0x1000_141c: W1_MASK_CLEAR
+ *
+ * 7445:
+ *   0xf03e_1500: W0_STATUS
+ *   0xf03e_1504: W1_STATUS
+ *   0xf03e_1508: W2_STATUS
+ *   0xf03e_150c: W3_STATUS
+ *   0xf03e_1510: W4_STATUS
+ *   0xf03e_1514: W0_MASK_STATUS
+ *   0xf03e_1518: W1_MASK_STATUS
+ *   [...]
+ */
+
+static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
+				      unsigned int word)
+{
+	return (0 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
+					   unsigned int word)
+{
+	return (1 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
+					unsigned int word)
+{
+	return (2 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
+					unsigned int word)
+{
+	return (3 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline u32 l1_readl(void __iomem *reg)
+{
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		return ioread32be(reg);
+	else
+		return readl(reg);
+}
+
+static inline void l1_writel(u32 val, void __iomem *reg)
+{
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		iowrite32be(val, reg);
+	else
+		writel(val, reg);
+}
+
+static void bcm7038_l1_irq_handle(struct irq_desc *desc)
+{
+	struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
+	struct bcm7038_l1_cpu *cpu;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int idx;
+
+#ifdef CONFIG_SMP
+	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+	cpu = intc->cpus[0];
+#endif
+
+	chained_irq_enter(chip, desc);
+
+	for (idx = 0; idx < intc->n_words; idx++) {
+		int base = idx * IRQS_PER_WORD;
+		unsigned long pending, flags;
+		int hwirq;
+
+		raw_spin_lock_irqsave(&intc->lock, flags);
+		pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
+			  ~cpu->mask_cache[idx];
+		raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+			generic_handle_irq(irq_find_mapping(intc->domain,
+							    base + hwirq));
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
+{
+	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+	intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
+	l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+			reg_mask_clr(intc, word));
+}
+
+static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
+{
+	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+	intc->cpus[cpu_idx]->mask_cache[word] |= mask;
+	l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+			reg_mask_set(intc, word));
+}
+
+static void bcm7038_l1_unmask(struct irq_data *d)
+{
+	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm7038_l1_mask(struct irq_data *d)
+{
+	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm7038_l1_set_affinity(struct irq_data *d,
+				   const struct cpumask *dest,
+				   bool force)
+{
+	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+	irq_hw_number_t hw = d->hwirq;
+	u32 word = hw / IRQS_PER_WORD;
+	u32 mask = BIT(hw % IRQS_PER_WORD);
+	unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
+	bool was_disabled;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+
+	was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
+			  mask);
+	__bcm7038_l1_mask(d, intc->affinity[hw]);
+	intc->affinity[hw] = first_cpu;
+	if (!was_disabled)
+		__bcm7038_l1_unmask(d, first_cpu);
+
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+	irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
+
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static void bcm7038_l1_cpu_offline(struct irq_data *d)
+{
+	struct cpumask *mask = irq_data_get_affinity_mask(d);
+	int cpu = smp_processor_id();
+	cpumask_t new_affinity;
+
+	/* This CPU was not on the affinity mask */
+	if (!cpumask_test_cpu(cpu, mask))
+		return;
+
+	if (cpumask_weight(mask) > 1) {
+		/*
+		 * Multiple CPU affinity, remove this CPU from the affinity
+		 * mask
+		 */
+		cpumask_copy(&new_affinity, mask);
+		cpumask_clear_cpu(cpu, &new_affinity);
+	} else {
+		/* Only CPU, put on the lowest online CPU */
+		cpumask_clear(&new_affinity);
+		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+	}
+	irq_set_affinity_locked(d, &new_affinity, false);
+}
+#endif
+
+static int __init bcm7038_l1_init_one(struct device_node *dn,
+				      unsigned int idx,
+				      struct bcm7038_l1_chip *intc)
+{
+	struct resource res;
+	resource_size_t sz;
+	struct bcm7038_l1_cpu *cpu;
+	unsigned int i, n_words, parent_irq;
+
+	if (of_address_to_resource(dn, idx, &res))
+		return -EINVAL;
+	sz = resource_size(&res);
+	n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+	if (n_words > MAX_WORDS)
+		return -EINVAL;
+	else if (!intc->n_words)
+		intc->n_words = n_words;
+	else if (intc->n_words != n_words)
+		return -EINVAL;
+
+	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+					GFP_KERNEL);
+	if (!cpu)
+		return -ENOMEM;
+
+	cpu->map_base = ioremap(res.start, sz);
+	if (!cpu->map_base)
+		return -ENOMEM;
+
+	for (i = 0; i < n_words; i++) {
+		l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
+		cpu->mask_cache[i] = 0xffffffff;
+	}
+
+	parent_irq = irq_of_parse_and_map(dn, idx);
+	if (!parent_irq) {
+		pr_err("failed to map parent interrupt %d\n", parent_irq);
+		return -EINVAL;
+	}
+	irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
+					 intc);
+
+	return 0;
+}
+
+static struct irq_chip bcm7038_l1_irq_chip = {
+	.name			= "bcm7038-l1",
+	.irq_mask		= bcm7038_l1_mask,
+	.irq_unmask		= bcm7038_l1_unmask,
+	.irq_set_affinity	= bcm7038_l1_set_affinity,
+#ifdef CONFIG_SMP
+	.irq_cpu_offline	= bcm7038_l1_cpu_offline,
+#endif
+};
+
+static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
+			  irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
+	irq_set_chip_data(virq, d->host_data);
+	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+	return 0;
+}
+
+static const struct irq_domain_ops bcm7038_l1_domain_ops = {
+	.xlate			= irq_domain_xlate_onecell,
+	.map			= bcm7038_l1_map,
+};
+
+int __init bcm7038_l1_of_init(struct device_node *dn,
+			      struct device_node *parent)
+{
+	struct bcm7038_l1_chip *intc;
+	int idx, ret;
+
+	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+	if (!intc)
+		return -ENOMEM;
+
+	raw_spin_lock_init(&intc->lock);
+	for_each_possible_cpu(idx) {
+		ret = bcm7038_l1_init_one(dn, idx, intc);
+		if (ret < 0) {
+			if (idx)
+				break;
+			pr_err("failed to remap intc L1 registers\n");
+			goto out_free;
+		}
+	}
+
+	intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+					     &bcm7038_l1_domain_ops,
+					     intc);
+	if (!intc->domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	return 0;
+
+out_unmap:
+	for_each_possible_cpu(idx) {
+		struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
+
+		if (cpu) {
+			if (cpu->map_base)
+				iounmap(cpu->map_base);
+			kfree(cpu);
+		}
+	}
+out_free:
+	kfree(intc);
+	return ret;
+}
+
+IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
new file mode 100644
index 0000000..8968e5e
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -0,0 +1,354 @@
+/*
+ * Broadcom BCM7120 style Level 2 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+/* Register offset in the L2 interrupt controller */
+#define IRQEN		0x00
+#define IRQSTAT		0x04
+
+#define MAX_WORDS	4
+#define MAX_MAPPINGS	(MAX_WORDS * 2)
+#define IRQS_PER_WORD	32
+
+struct bcm7120_l1_intc_data {
+	struct bcm7120_l2_intc_data *b;
+	u32 irq_map_mask[MAX_WORDS];
+};
+
+struct bcm7120_l2_intc_data {
+	unsigned int n_words;
+	void __iomem *map_base[MAX_MAPPINGS];
+	void __iomem *pair_base[MAX_WORDS];
+	int en_offset[MAX_WORDS];
+	int stat_offset[MAX_WORDS];
+	struct irq_domain *domain;
+	bool can_wake;
+	u32 irq_fwd_mask[MAX_WORDS];
+	struct bcm7120_l1_intc_data *l1_data;
+	int num_parent_irqs;
+	const __be32 *map_mask_prop;
+};
+
+static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
+{
+	struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
+	struct bcm7120_l2_intc_data *b = data->b;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int idx;
+
+	chained_irq_enter(chip, desc);
+
+	for (idx = 0; idx < b->n_words; idx++) {
+		int base = idx * IRQS_PER_WORD;
+		struct irq_chip_generic *gc =
+			irq_get_domain_generic_chip(b->domain, base);
+		unsigned long pending;
+		int hwirq;
+
+		irq_gc_lock(gc);
+		pending = irq_reg_readl(gc, b->stat_offset[idx]) &
+					    gc->mask_cache &
+					    data->irq_map_mask[idx];
+		irq_gc_unlock(gc);
+
+		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+			generic_handle_irq(irq_find_mapping(b->domain,
+					   base + hwirq));
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
+{
+	struct bcm7120_l2_intc_data *b = gc->private;
+	struct irq_chip_type *ct = gc->chip_types;
+
+	irq_gc_lock(gc);
+	if (b->can_wake)
+		irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
+			       ct->regs.mask);
+	irq_gc_unlock(gc);
+}
+
+static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
+{
+	struct irq_chip_type *ct = gc->chip_types;
+
+	/* Restore the saved mask */
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
+	irq_gc_unlock(gc);
+}
+
+static int bcm7120_l2_intc_init_one(struct device_node *dn,
+					struct bcm7120_l2_intc_data *data,
+					int irq, u32 *valid_mask)
+{
+	struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq];
+	int parent_irq;
+	unsigned int idx;
+
+	parent_irq = irq_of_parse_and_map(dn, irq);
+	if (!parent_irq) {
+		pr_err("failed to map interrupt %d\n", irq);
+		return -EINVAL;
+	}
+
+	/* For multiple parent IRQs with multiple words, this looks like:
+	 * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+	 *
+	 * We need to associate a given parent interrupt with its corresponding
+	 * map_mask in order to mask the status register with it because we
+	 * have the same handler being called for multiple parent interrupts.
+	 *
+	 * This is typically something needed on BCM7xxx (STB chips).
+	 */
+	for (idx = 0; idx < data->n_words; idx++) {
+		if (data->map_mask_prop) {
+			l1_data->irq_map_mask[idx] |=
+				be32_to_cpup(data->map_mask_prop +
+					     irq * data->n_words + idx);
+		} else {
+			l1_data->irq_map_mask[idx] = 0xffffffff;
+		}
+		valid_mask[idx] |= l1_data->irq_map_mask[idx];
+	}
+
+	l1_data->b = data;
+
+	irq_set_chained_handler_and_data(parent_irq,
+					 bcm7120_l2_intc_irq_handle, l1_data);
+	return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
+					     struct bcm7120_l2_intc_data *data)
+{
+	int ret;
+
+	data->map_base[0] = of_iomap(dn, 0);
+	if (!data->map_base[0]) {
+		pr_err("unable to map registers\n");
+		return -ENOMEM;
+	}
+
+	data->pair_base[0] = data->map_base[0];
+	data->en_offset[0] = IRQEN;
+	data->stat_offset[0] = IRQSTAT;
+	data->n_words = 1;
+
+	ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
+					 data->irq_fwd_mask, data->n_words);
+	if (ret != 0 && ret != -EINVAL) {
+		/* property exists but has the wrong number of words */
+		pr_err("invalid brcm,int-fwd-mask property\n");
+		return -EINVAL;
+	}
+
+	data->map_mask_prop = of_get_property(dn, "brcm,int-map-mask", &ret);
+	if (!data->map_mask_prop ||
+	    (ret != (sizeof(__be32) * data->num_parent_irqs * data->n_words))) {
+		pr_err("invalid brcm,int-map-mask property\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
+					     struct bcm7120_l2_intc_data *data)
+{
+	unsigned int gc_idx;
+
+	for (gc_idx = 0; gc_idx < MAX_WORDS; gc_idx++) {
+		unsigned int map_idx = gc_idx * 2;
+		void __iomem *en = of_iomap(dn, map_idx + 0);
+		void __iomem *stat = of_iomap(dn, map_idx + 1);
+		void __iomem *base = min(en, stat);
+
+		data->map_base[map_idx + 0] = en;
+		data->map_base[map_idx + 1] = stat;
+
+		if (!base)
+			break;
+
+		data->pair_base[gc_idx] = base;
+		data->en_offset[gc_idx] = en - base;
+		data->stat_offset[gc_idx] = stat - base;
+	}
+
+	if (!gc_idx) {
+		pr_err("unable to map registers\n");
+		return -EINVAL;
+	}
+
+	data->n_words = gc_idx;
+	return 0;
+}
+
+static int __init bcm7120_l2_intc_probe(struct device_node *dn,
+				 struct device_node *parent,
+				 int (*iomap_regs_fn)(struct device_node *,
+					struct bcm7120_l2_intc_data *),
+				 const char *intc_name)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct bcm7120_l2_intc_data *data;
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	int ret = 0;
+	unsigned int idx, irq, flags;
+	u32 valid_mask[MAX_WORDS] = { };
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->num_parent_irqs = of_irq_count(dn);
+	if (data->num_parent_irqs <= 0) {
+		pr_err("invalid number of parent interrupts\n");
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data),
+				GFP_KERNEL);
+	if (!data->l1_data) {
+		ret = -ENOMEM;
+		goto out_free_l1_data;
+	}
+
+	ret = iomap_regs_fn(dn, data);
+	if (ret < 0)
+		goto out_free_l1_data;
+
+	for (irq = 0; irq < data->num_parent_irqs; irq++) {
+		ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
+		if (ret)
+			goto out_free_l1_data;
+	}
+
+	data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
+					     &irq_generic_chip_ops, NULL);
+	if (!data->domain) {
+		ret = -ENOMEM;
+		goto out_free_l1_data;
+	}
+
+	/* MIPS chips strapped for BE will automagically configure the
+	 * peripheral registers for CPU-native byte order.
+	 */
+	flags = IRQ_GC_INIT_MASK_CACHE;
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		flags |= IRQ_GC_BE_IO;
+
+	ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
+				dn->full_name, handle_level_irq, clr, 0, flags);
+	if (ret) {
+		pr_err("failed to allocate generic irq chip\n");
+		goto out_free_domain;
+	}
+
+	if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+		data->can_wake = true;
+
+	for (idx = 0; idx < data->n_words; idx++) {
+		irq = idx * IRQS_PER_WORD;
+		gc = irq_get_domain_generic_chip(data->domain, irq);
+
+		gc->unused = 0xffffffff & ~valid_mask[idx];
+		gc->private = data;
+		ct = gc->chip_types;
+
+		gc->reg_base = data->pair_base[idx];
+		ct->regs.mask = data->en_offset[idx];
+
+		/* gc->reg_base is defined and so is gc->writel */
+		irq_reg_writel(gc, data->irq_fwd_mask[idx],
+			       data->en_offset[idx]);
+
+		ct->chip.irq_mask = irq_gc_mask_clr_bit;
+		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->chip.irq_ack = irq_gc_noop;
+		gc->suspend = bcm7120_l2_intc_suspend;
+		gc->resume = bcm7120_l2_intc_resume;
+
+		/*
+		 * Initialize mask-cache, in case we need it for
+		 * saving/restoring fwd mask even w/o any child interrupts
+		 * installed
+		 */
+		gc->mask_cache = irq_reg_readl(gc, ct->regs.mask);
+
+		if (data->can_wake) {
+			/* This IRQ chip can wake the system, set all
+			 * relevant child interupts in wake_enabled mask
+			 */
+			gc->wake_enabled = 0xffffffff;
+			gc->wake_enabled &= ~gc->unused;
+			ct->chip.irq_set_wake = irq_gc_set_wake;
+		}
+	}
+
+	return 0;
+
+out_free_domain:
+	irq_domain_remove(data->domain);
+out_free_l1_data:
+	kfree(data->l1_data);
+out_unmap:
+	for (idx = 0; idx < MAX_MAPPINGS; idx++) {
+		if (data->map_base[idx])
+			iounmap(data->map_base[idx]);
+	}
+	kfree(data);
+	return ret;
+}
+
+static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+					     struct device_node *parent)
+{
+	return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+				     "BCM7120 L2");
+}
+
+static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+					     struct device_node *parent)
+{
+	return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+				     "BCM3380 L2");
+}
+
+IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
+		bcm7120_l2_intc_probe_7120);
+
+IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
+		bcm7120_l2_intc_probe_3380);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
new file mode 100644
index 0000000..0e65f60
--- /dev/null
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -0,0 +1,289 @@
+/*
+ * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
+ *
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+struct brcmstb_intc_init_params {
+	irq_flow_handler_t handler;
+	int cpu_status;
+	int cpu_clear;
+	int cpu_mask_status;
+	int cpu_mask_set;
+	int cpu_mask_clear;
+};
+
+/* Register offsets in the L2 latched interrupt controller */
+static const struct brcmstb_intc_init_params l2_edge_intc_init = {
+	.handler		= handle_edge_irq,
+	.cpu_status		= 0x00,
+	.cpu_clear		= 0x08,
+	.cpu_mask_status	= 0x0c,
+	.cpu_mask_set		= 0x10,
+	.cpu_mask_clear		= 0x14
+};
+
+/* Register offsets in the L2 level interrupt controller */
+static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
+	.handler		= handle_level_irq,
+	.cpu_status		= 0x00,
+	.cpu_clear		= -1, /* Register not present */
+	.cpu_mask_status	= 0x04,
+	.cpu_mask_set		= 0x08,
+	.cpu_mask_clear		= 0x0C
+};
+
+/* L2 intc private data structure */
+struct brcmstb_l2_intc_data {
+	struct irq_domain *domain;
+	struct irq_chip_generic *gc;
+	int status_offset;
+	int mask_offset;
+	bool can_wake;
+	u32 saved_mask; /* for suspend/resume */
+};
+
+/**
+ * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register and pending interrupt is acknowledged by setting a bit.
+ *
+ * Note: This function is generic and could easily be added to the
+ * generic irqchip implementation if there ever becomes a will to do so.
+ * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
+ *
+ * e.g.: https://patchwork.kernel.org/patch/9831047/
+ */
+static void brcmstb_l2_mask_and_ack(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+	u32 mask = d->mask;
+
+	irq_gc_lock(gc);
+	irq_reg_writel(gc, mask, ct->regs.disable);
+	*ct->mask_cache &= ~mask;
+	irq_reg_writel(gc, mask, ct->regs.ack);
+	irq_gc_unlock(gc);
+}
+
+static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
+{
+	struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq;
+	u32 status;
+
+	chained_irq_enter(chip, desc);
+
+	status = irq_reg_readl(b->gc, b->status_offset) &
+		~(irq_reg_readl(b->gc, b->mask_offset));
+
+	if (status == 0) {
+		raw_spin_lock(&desc->lock);
+		handle_bad_irq(desc);
+		raw_spin_unlock(&desc->lock);
+		goto out;
+	}
+
+	do {
+		irq = ffs(status) - 1;
+		status &= ~(1 << irq);
+		generic_handle_irq(irq_linear_revmap(b->domain, irq));
+	} while (status);
+out:
+	chained_irq_exit(chip, desc);
+}
+
+static void brcmstb_l2_intc_suspend(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+	struct brcmstb_l2_intc_data *b = gc->private;
+
+	irq_gc_lock(gc);
+	/* Save the current mask */
+	b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
+
+	if (b->can_wake) {
+		/* Program the wakeup mask */
+		irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
+		irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
+	}
+	irq_gc_unlock(gc);
+}
+
+static void brcmstb_l2_intc_resume(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+	struct brcmstb_l2_intc_data *b = gc->private;
+
+	irq_gc_lock(gc);
+	if (ct->chip.irq_ack) {
+		/* Clear unmasked non-wakeup interrupts */
+		irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
+				ct->regs.ack);
+	}
+
+	/* Restore the saved mask */
+	irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
+	irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
+	irq_gc_unlock(gc);
+}
+
+static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+					  struct device_node *parent,
+					  const struct brcmstb_intc_init_params
+					  *init_params)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct brcmstb_l2_intc_data *data;
+	struct irq_chip_type *ct;
+	int ret;
+	unsigned int flags;
+	int parent_irq;
+	void __iomem *base;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("failed to remap intc L2 registers\n");
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	/* Disable all interrupts by default */
+	writel(0xffffffff, base + init_params->cpu_mask_set);
+
+	/* Wakeup interrupts may be retained from S5 (cold boot) */
+	data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
+	if (!data->can_wake && (init_params->cpu_clear >= 0))
+		writel(0xffffffff, base + init_params->cpu_clear);
+
+	parent_irq = irq_of_parse_and_map(np, 0);
+	if (!parent_irq) {
+		pr_err("failed to find parent interrupt\n");
+		ret = -EINVAL;
+		goto out_unmap;
+	}
+
+	data->domain = irq_domain_add_linear(np, 32,
+				&irq_generic_chip_ops, NULL);
+	if (!data->domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	/* MIPS chips strapped for BE will automagically configure the
+	 * peripheral registers for CPU-native byte order.
+	 */
+	flags = 0;
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		flags |= IRQ_GC_BE_IO;
+
+	/* Allocate a single Generic IRQ chip for this node */
+	ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
+			np->full_name, init_params->handler, clr, 0, flags);
+	if (ret) {
+		pr_err("failed to allocate generic irq chip\n");
+		goto out_free_domain;
+	}
+
+	/* Set the IRQ chaining logic */
+	irq_set_chained_handler_and_data(parent_irq,
+					 brcmstb_l2_intc_irq_handle, data);
+
+	data->gc = irq_get_domain_generic_chip(data->domain, 0);
+	data->gc->reg_base = base;
+	data->gc->private = data;
+	data->status_offset = init_params->cpu_status;
+	data->mask_offset = init_params->cpu_mask_status;
+
+	ct = data->gc->chip_types;
+
+	if (init_params->cpu_clear >= 0) {
+		ct->regs.ack = init_params->cpu_clear;
+		ct->chip.irq_ack = irq_gc_ack_set_bit;
+		ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+	} else {
+		/* No Ack - but still slightly more efficient to define this */
+		ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+	}
+
+	ct->chip.irq_mask = irq_gc_mask_disable_reg;
+	ct->regs.disable = init_params->cpu_mask_set;
+	ct->regs.mask = init_params->cpu_mask_status;
+
+	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+	ct->regs.enable = init_params->cpu_mask_clear;
+
+	ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
+	ct->chip.irq_resume = brcmstb_l2_intc_resume;
+	ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
+
+	if (data->can_wake) {
+		/* This IRQ chip can wake the system, set all child interrupts
+		 * in wake_enabled mask
+		 */
+		data->gc->wake_enabled = 0xffffffff;
+		ct->chip.irq_set_wake = irq_gc_set_wake;
+	}
+
+	return 0;
+
+out_free_domain:
+	irq_domain_remove(data->domain);
+out_unmap:
+	iounmap(base);
+out_free:
+	kfree(data);
+	return ret;
+}
+
+int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+	struct device_node *parent)
+{
+	return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+}
+IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
+
+int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+	struct device_node *parent)
+{
+	return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+}
+IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
+	brcmstb_l2_lvl_intc_of_init);
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
new file mode 100644
index 0000000..f913f4d
--- /dev/null
+++ b/drivers/irqchip/irq-clps711x.c
@@ -0,0 +1,238 @@
+/*
+ *  CLPS711X IRQ driver
+ *
+ *  Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define CLPS711X_INTSR1	(0x0240)
+#define CLPS711X_INTMR1	(0x0280)
+#define CLPS711X_BLEOI	(0x0600)
+#define CLPS711X_MCEOI	(0x0640)
+#define CLPS711X_TEOI	(0x0680)
+#define CLPS711X_TC1EOI	(0x06c0)
+#define CLPS711X_TC2EOI	(0x0700)
+#define CLPS711X_RTCEOI	(0x0740)
+#define CLPS711X_UMSEOI	(0x0780)
+#define CLPS711X_COEOI	(0x07c0)
+#define CLPS711X_INTSR2	(0x1240)
+#define CLPS711X_INTMR2	(0x1280)
+#define CLPS711X_SRXEOF	(0x1600)
+#define CLPS711X_KBDEOI	(0x1700)
+#define CLPS711X_INTSR3	(0x2240)
+#define CLPS711X_INTMR3	(0x2280)
+
+static const struct {
+#define CLPS711X_FLAG_EN	(1 << 0)
+#define CLPS711X_FLAG_FIQ	(1 << 1)
+	unsigned int	flags;
+	phys_addr_t	eoi;
+} clps711x_irqs[] = {
+	[1]	= { CLPS711X_FLAG_FIQ, CLPS711X_BLEOI, },
+	[3]	= { CLPS711X_FLAG_FIQ, CLPS711X_MCEOI, },
+	[4]	= { CLPS711X_FLAG_EN, CLPS711X_COEOI, },
+	[5]	= { CLPS711X_FLAG_EN, },
+	[6]	= { CLPS711X_FLAG_EN, },
+	[7]	= { CLPS711X_FLAG_EN, },
+	[8]	= { CLPS711X_FLAG_EN, CLPS711X_TC1EOI, },
+	[9]	= { CLPS711X_FLAG_EN, CLPS711X_TC2EOI, },
+	[10]	= { CLPS711X_FLAG_EN, CLPS711X_RTCEOI, },
+	[11]	= { CLPS711X_FLAG_EN, CLPS711X_TEOI, },
+	[12]	= { CLPS711X_FLAG_EN, },
+	[13]	= { CLPS711X_FLAG_EN, },
+	[14]	= { CLPS711X_FLAG_EN, CLPS711X_UMSEOI, },
+	[15]	= { CLPS711X_FLAG_EN, CLPS711X_SRXEOF, },
+	[16]	= { CLPS711X_FLAG_EN, CLPS711X_KBDEOI, },
+	[17]	= { CLPS711X_FLAG_EN, },
+	[18]	= { CLPS711X_FLAG_EN, },
+	[28]	= { CLPS711X_FLAG_EN, },
+	[29]	= { CLPS711X_FLAG_EN, },
+	[32]	= { CLPS711X_FLAG_FIQ, },
+};
+
+static struct {
+	void __iomem		*base;
+	void __iomem		*intmr[3];
+	void __iomem		*intsr[3];
+	struct irq_domain	*domain;
+	struct irq_domain_ops	ops;
+} *clps711x_intc;
+
+static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
+{
+	u32 irqstat;
+
+	do {
+		irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
+			  readw_relaxed(clps711x_intc->intsr[0]);
+		if (irqstat)
+			handle_domain_irq(clps711x_intc->domain,
+					  fls(irqstat) - 1, regs);
+
+		irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
+			  readw_relaxed(clps711x_intc->intsr[1]);
+		if (irqstat)
+			handle_domain_irq(clps711x_intc->domain,
+					  fls(irqstat) - 1 + 16, regs);
+	} while (irqstat);
+}
+
+static void clps711x_intc_eoi(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+	writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hwirq].eoi);
+}
+
+static void clps711x_intc_mask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+	u32 tmp;
+
+	tmp = readl_relaxed(intmr);
+	tmp &= ~(1 << (hwirq % 16));
+	writel_relaxed(tmp, intmr);
+}
+
+static void clps711x_intc_unmask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+	u32 tmp;
+
+	tmp = readl_relaxed(intmr);
+	tmp |= 1 << (hwirq % 16);
+	writel_relaxed(tmp, intmr);
+}
+
+static struct irq_chip clps711x_intc_chip = {
+	.name		= "clps711x-intc",
+	.irq_eoi	= clps711x_intc_eoi,
+	.irq_mask	= clps711x_intc_mask,
+	.irq_unmask	= clps711x_intc_unmask,
+};
+
+static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
+					irq_hw_number_t hw)
+{
+	irq_flow_handler_t handler = handle_level_irq;
+	unsigned int flags = 0;
+
+	if (!clps711x_irqs[hw].flags)
+		return 0;
+
+	if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
+		handler = handle_bad_irq;
+		flags |= IRQ_NOAUTOEN;
+	} else if (clps711x_irqs[hw].eoi) {
+		handler = handle_fasteoi_irq;
+	}
+
+	/* Clear down pending interrupt */
+	if (clps711x_irqs[hw].eoi)
+		writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
+
+	irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
+	irq_modify_status(virq, IRQ_NOPROBE, flags);
+
+	return 0;
+}
+
+static int __init _clps711x_intc_init(struct device_node *np,
+				      phys_addr_t base, resource_size_t size)
+{
+	int err;
+
+	clps711x_intc = kzalloc(sizeof(*clps711x_intc), GFP_KERNEL);
+	if (!clps711x_intc)
+		return -ENOMEM;
+
+	clps711x_intc->base = ioremap(base, size);
+	if (!clps711x_intc->base) {
+		err = -ENOMEM;
+		goto out_kfree;
+	}
+
+	clps711x_intc->intsr[0] = clps711x_intc->base + CLPS711X_INTSR1;
+	clps711x_intc->intmr[0] = clps711x_intc->base + CLPS711X_INTMR1;
+	clps711x_intc->intsr[1] = clps711x_intc->base + CLPS711X_INTSR2;
+	clps711x_intc->intmr[1] = clps711x_intc->base + CLPS711X_INTMR2;
+	clps711x_intc->intsr[2] = clps711x_intc->base + CLPS711X_INTSR3;
+	clps711x_intc->intmr[2] = clps711x_intc->base + CLPS711X_INTMR3;
+
+	/* Mask all interrupts */
+	writel_relaxed(0, clps711x_intc->intmr[0]);
+	writel_relaxed(0, clps711x_intc->intmr[1]);
+	writel_relaxed(0, clps711x_intc->intmr[2]);
+
+	err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
+	if (err < 0)
+		goto out_iounmap;
+
+	clps711x_intc->ops.map = clps711x_intc_irq_map;
+	clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
+	clps711x_intc->domain =
+		irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
+				      0, 0, &clps711x_intc->ops, NULL);
+	if (!clps711x_intc->domain) {
+		err = -ENOMEM;
+		goto out_irqfree;
+	}
+
+	irq_set_default_host(clps711x_intc->domain);
+	set_handle_irq(clps711x_irqh);
+
+#ifdef CONFIG_FIQ
+	init_FIQ(0);
+#endif
+
+	return 0;
+
+out_irqfree:
+	irq_free_descs(0, ARRAY_SIZE(clps711x_irqs));
+
+out_iounmap:
+	iounmap(clps711x_intc->base);
+
+out_kfree:
+	kfree(clps711x_intc);
+
+	return err;
+}
+
+void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
+{
+	BUG_ON(_clps711x_intc_init(NULL, base, size));
+}
+
+#ifdef CONFIG_IRQCHIP
+static int __init clps711x_intc_init_dt(struct device_node *np,
+					struct device_node *parent)
+{
+	struct resource res;
+	int err;
+
+	err = of_address_to_resource(np, 0, &res);
+	if (err)
+		return err;
+
+	return _clps711x_intc_init(np, res.start, resource_size(&res));
+}
+IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
+#endif
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
new file mode 100644
index 0000000..99d97d7
--- /dev/null
+++ b/drivers/irqchip/irq-crossbar.c
@@ -0,0 +1,370 @@
+/*
+ *  drivers/irqchip/irq-crossbar.c
+ *
+ *  Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Sricharan R <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ_FREE	-1
+#define IRQ_RESERVED	-2
+#define IRQ_SKIP	-3
+#define GIC_IRQ_START	32
+
+/**
+ * struct crossbar_device - crossbar device description
+ * @lock: spinlock serializing access to @irq_map
+ * @int_max: maximum number of supported interrupts
+ * @safe_map: safe default value to initialize the crossbar
+ * @max_crossbar_sources: Maximum number of crossbar sources
+ * @irq_map: array of interrupts to crossbar number mapping
+ * @crossbar_base: crossbar base address
+ * @register_offsets: offsets for each irq number
+ * @write: register write function pointer
+ */
+struct crossbar_device {
+	raw_spinlock_t lock;
+	uint int_max;
+	uint safe_map;
+	uint max_crossbar_sources;
+	uint *irq_map;
+	void __iomem *crossbar_base;
+	int *register_offsets;
+	void (*write)(int, int);
+};
+
+static struct crossbar_device *cb;
+
+static void crossbar_writel(int irq_no, int cb_no)
+{
+	writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static void crossbar_writew(int irq_no, int cb_no)
+{
+	writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static void crossbar_writeb(int irq_no, int cb_no)
+{
+	writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static struct irq_chip crossbar_chip = {
+	.name			= "CBAR",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= irq_chip_set_type_parent,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_SKIP_SET_WAKE,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
+			    irq_hw_number_t hwirq)
+{
+	struct irq_fwspec fwspec;
+	int i;
+	int err;
+
+	if (!irq_domain_get_of_node(domain->parent))
+		return -EINVAL;
+
+	raw_spin_lock(&cb->lock);
+	for (i = cb->int_max - 1; i >= 0; i--) {
+		if (cb->irq_map[i] == IRQ_FREE) {
+			cb->irq_map[i] = hwirq;
+			break;
+		}
+	}
+	raw_spin_unlock(&cb->lock);
+
+	if (i < 0)
+		return -ENODEV;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = 0;	/* SPI */
+	fwspec.param[1] = i;
+	fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (err)
+		cb->irq_map[i] = IRQ_FREE;
+	else
+		cb->write(i, hwirq);
+
+	return err;
+}
+
+static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
+				 unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	irq_hw_number_t hwirq;
+	int i;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;	/* Not GIC compliant */
+	if (fwspec->param[0] != 0)
+		return -EINVAL;	/* No PPI should point to this domain */
+
+	hwirq = fwspec->param[1];
+	if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
+		return -EINVAL;	/* Can't deal with this */
+
+	for (i = 0; i < nr_irqs; i++) {
+		int err = allocate_gic_irq(d, virq + i, hwirq + i);
+
+		if (err)
+			return err;
+
+		irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
+					      &crossbar_chip, NULL);
+	}
+
+	return 0;
+}
+
+/**
+ * crossbar_domain_free - unmap/free a crossbar<->irq connection
+ * @domain: domain of irq to unmap
+ * @virq: virq number
+ * @nr_irqs: number of irqs to free
+ *
+ * We do not maintain a use count of total number of map/unmap
+ * calls for a particular irq to find out if a irq can be really
+ * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
+ * after which irq is anyways unusable. So an explicit map has to be called
+ * after that.
+ */
+static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs)
+{
+	int i;
+
+	raw_spin_lock(&cb->lock);
+	for (i = 0; i < nr_irqs; i++) {
+		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+		irq_domain_reset_irq_data(d);
+		cb->irq_map[d->hwirq] = IRQ_FREE;
+		cb->write(d->hwirq, cb->safe_map);
+	}
+	raw_spin_unlock(&cb->lock);
+}
+
+static int crossbar_domain_translate(struct irq_domain *d,
+				     struct irq_fwspec *fwspec,
+				     unsigned long *hwirq,
+				     unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct irq_domain_ops crossbar_domain_ops = {
+	.alloc		= crossbar_domain_alloc,
+	.free		= crossbar_domain_free,
+	.translate	= crossbar_domain_translate,
+};
+
+static int __init crossbar_of_init(struct device_node *node)
+{
+	u32 max = 0, entry, reg_size;
+	int i, size, reserved = 0;
+	const __be32 *irqsr;
+	int ret = -ENOMEM;
+
+	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
+
+	if (!cb)
+		return ret;
+
+	cb->crossbar_base = of_iomap(node, 0);
+	if (!cb->crossbar_base)
+		goto err_cb;
+
+	of_property_read_u32(node, "ti,max-crossbar-sources",
+			     &cb->max_crossbar_sources);
+	if (!cb->max_crossbar_sources) {
+		pr_err("missing 'ti,max-crossbar-sources' property\n");
+		ret = -EINVAL;
+		goto err_base;
+	}
+
+	of_property_read_u32(node, "ti,max-irqs", &max);
+	if (!max) {
+		pr_err("missing 'ti,max-irqs' property\n");
+		ret = -EINVAL;
+		goto err_base;
+	}
+	cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
+	if (!cb->irq_map)
+		goto err_base;
+
+	cb->int_max = max;
+
+	for (i = 0; i < max; i++)
+		cb->irq_map[i] = IRQ_FREE;
+
+	/* Get and mark reserved irqs */
+	irqsr = of_get_property(node, "ti,irqs-reserved", &size);
+	if (irqsr) {
+		size /= sizeof(__be32);
+
+		for (i = 0; i < size; i++) {
+			of_property_read_u32_index(node,
+						   "ti,irqs-reserved",
+						   i, &entry);
+			if (entry >= max) {
+				pr_err("Invalid reserved entry\n");
+				ret = -EINVAL;
+				goto err_irq_map;
+			}
+			cb->irq_map[entry] = IRQ_RESERVED;
+		}
+	}
+
+	/* Skip irqs hardwired to bypass the crossbar */
+	irqsr = of_get_property(node, "ti,irqs-skip", &size);
+	if (irqsr) {
+		size /= sizeof(__be32);
+
+		for (i = 0; i < size; i++) {
+			of_property_read_u32_index(node,
+						   "ti,irqs-skip",
+						   i, &entry);
+			if (entry >= max) {
+				pr_err("Invalid skip entry\n");
+				ret = -EINVAL;
+				goto err_irq_map;
+			}
+			cb->irq_map[entry] = IRQ_SKIP;
+		}
+	}
+
+
+	cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
+	if (!cb->register_offsets)
+		goto err_irq_map;
+
+	of_property_read_u32(node, "ti,reg-size", &reg_size);
+
+	switch (reg_size) {
+	case 1:
+		cb->write = crossbar_writeb;
+		break;
+	case 2:
+		cb->write = crossbar_writew;
+		break;
+	case 4:
+		cb->write = crossbar_writel;
+		break;
+	default:
+		pr_err("Invalid reg-size property\n");
+		ret = -EINVAL;
+		goto err_reg_offset;
+		break;
+	}
+
+	/*
+	 * Register offsets are not linear because of the
+	 * reserved irqs. so find and store the offsets once.
+	 */
+	for (i = 0; i < max; i++) {
+		if (cb->irq_map[i] == IRQ_RESERVED)
+			continue;
+
+		cb->register_offsets[i] = reserved;
+		reserved += reg_size;
+	}
+
+	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
+	/* Initialize the crossbar with safe map to start with */
+	for (i = 0; i < max; i++) {
+		if (cb->irq_map[i] == IRQ_RESERVED ||
+		    cb->irq_map[i] == IRQ_SKIP)
+			continue;
+
+		cb->write(i, cb->safe_map);
+	}
+
+	raw_spin_lock_init(&cb->lock);
+
+	return 0;
+
+err_reg_offset:
+	kfree(cb->register_offsets);
+err_irq_map:
+	kfree(cb->irq_map);
+err_base:
+	iounmap(cb->crossbar_base);
+err_cb:
+	kfree(cb);
+
+	cb = NULL;
+	return ret;
+}
+
+static int __init irqcrossbar_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	int err;
+
+	if (!parent) {
+		pr_err("%pOF: no parent, giving up\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to obtain parent domain\n", node);
+		return -ENXIO;
+	}
+
+	err = crossbar_of_init(node);
+	if (err)
+		return err;
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0,
+					  cb->max_crossbar_sources,
+					  node, &crossbar_domain_ops,
+					  NULL);
+	if (!domain) {
+		pr_err("%pOF: failed to allocated domain\n", node);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
new file mode 100644
index 0000000..fc38d2d
--- /dev/null
+++ b/drivers/irqchip/irq-digicolor.c
@@ -0,0 +1,119 @@
+/*
+ * Conexant Digicolor SoCs IRQ chip driver
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2014 Paradox Innovation Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <asm/exception.h>
+
+#define UC_IRQ_CONTROL		0x04
+
+#define IC_FLAG_CLEAR_LO	0x00
+#define IC_FLAG_CLEAR_XLO	0x04
+#define IC_INT0ENABLE_LO	0x10
+#define IC_INT0ENABLE_XLO	0x14
+#define IC_INT0STATUS_LO	0x18
+#define IC_INT0STATUS_XLO	0x1c
+
+static struct irq_domain *digicolor_irq_domain;
+
+static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
+{
+	struct irq_domain_chip_generic *dgc = digicolor_irq_domain->gc;
+	struct irq_chip_generic *gc = dgc->gc[0];
+	u32 status, hwirq;
+
+	do {
+		status = irq_reg_readl(gc, IC_INT0STATUS_LO);
+		if (status) {
+			hwirq = ffs(status) - 1;
+		} else {
+			status = irq_reg_readl(gc, IC_INT0STATUS_XLO);
+			if (status)
+				hwirq = ffs(status) - 1 + 32;
+			else
+				return;
+		}
+
+		handle_domain_irq(digicolor_irq_domain, hwirq, regs);
+	} while (1);
+}
+
+static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
+				    unsigned en_reg, unsigned ack_reg)
+{
+	struct irq_chip_generic *gc;
+
+	gc = irq_get_domain_generic_chip(digicolor_irq_domain, irq_base);
+	gc->reg_base = reg_base;
+	gc->chip_types[0].regs.ack = ack_reg;
+	gc->chip_types[0].regs.mask = en_reg;
+	gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+}
+
+static int __init digicolor_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	void __iomem *reg_base;
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct regmap *ucregs;
+	int ret;
+
+	reg_base = of_iomap(node, 0);
+	if (!reg_base) {
+		pr_err("%pOF: unable to map IC registers\n", node);
+		return -ENXIO;
+	}
+
+	/* disable all interrupts */
+	writel(0, reg_base + IC_INT0ENABLE_LO);
+	writel(0, reg_base + IC_INT0ENABLE_XLO);
+
+	ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
+	if (IS_ERR(ucregs)) {
+		pr_err("%pOF: unable to map UC registers\n", node);
+		return PTR_ERR(ucregs);
+	}
+	/* channel 1, regular IRQs */
+	regmap_write(ucregs, UC_IRQ_CONTROL, 1);
+
+	digicolor_irq_domain =
+		irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
+	if (!digicolor_irq_domain) {
+		pr_err("%pOF: unable to create IRQ domain\n", node);
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(digicolor_irq_domain, 32, 1,
+					     "digicolor_irq", handle_level_irq,
+					     clr, 0, 0);
+	if (ret) {
+		pr_err("%pOF: unable to allocate IRQ gc\n", node);
+		return ret;
+	}
+
+	digicolor_set_gc(reg_base, 0, IC_INT0ENABLE_LO, IC_FLAG_CLEAR_LO);
+	digicolor_set_gc(reg_base, 32, IC_INT0ENABLE_XLO, IC_FLAG_CLEAR_XLO);
+
+	set_handle_irq(digicolor_handle_irq);
+
+	return 0;
+}
+IRQCHIP_DECLARE(conexant_digicolor_ic, "cnxt,cx92755-ic", digicolor_of_init);
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
new file mode 100644
index 0000000..0a19618
--- /dev/null
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -0,0 +1,160 @@
+/*
+ * Synopsys DW APB ICTL irqchip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * based on GPL'ed 2.6 kernel sources
+ *  (c) Marvell International Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define APB_INT_ENABLE_L	0x00
+#define APB_INT_ENABLE_H	0x04
+#define APB_INT_MASK_L		0x08
+#define APB_INT_MASK_H		0x0c
+#define APB_INT_FINALSTATUS_L	0x30
+#define APB_INT_FINALSTATUS_H	0x34
+#define APB_INT_BASE_OFFSET	0x04
+
+static void dw_apb_ictl_handler(struct irq_desc *desc)
+{
+	struct irq_domain *d = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int n;
+
+	chained_irq_enter(chip, desc);
+
+	for (n = 0; n < d->revmap_size; n += 32) {
+		struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+		u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
+		while (stat) {
+			u32 hwirq = ffs(stat) - 1;
+			u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
+
+			generic_handle_irq(virq);
+			stat &= ~(1 << hwirq);
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+#ifdef CONFIG_PM
+static void dw_apb_ictl_resume(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+	irq_gc_lock(gc);
+	writel_relaxed(~0, gc->reg_base + ct->regs.enable);
+	writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
+	irq_gc_unlock(gc);
+}
+#else
+#define dw_apb_ictl_resume	NULL
+#endif /* CONFIG_PM */
+
+static int __init dw_apb_ictl_init(struct device_node *np,
+				   struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct resource r;
+	struct irq_domain *domain;
+	struct irq_chip_generic *gc;
+	void __iomem *iobase;
+	int ret, nrirqs, irq, i;
+	u32 reg;
+
+	/* Map the parent interrupt for the chained handler */
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq <= 0) {
+		pr_err("%pOF: unable to parse irq\n", np);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	if (ret) {
+		pr_err("%pOF: unable to get resource\n", np);
+		return ret;
+	}
+
+	if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
+		pr_err("%pOF: unable to request mem region\n", np);
+		return -ENOMEM;
+	}
+
+	iobase = ioremap(r.start, resource_size(&r));
+	if (!iobase) {
+		pr_err("%pOF: unable to map resource\n", np);
+		ret = -ENOMEM;
+		goto err_release;
+	}
+
+	/*
+	 * DW IP can be configured to allow 2-64 irqs. We can determine
+	 * the number of irqs supported by writing into enable register
+	 * and look for bits not set, as corresponding flip-flops will
+	 * have been removed by sythesis tool.
+	 */
+
+	/* mask and enable all interrupts */
+	writel_relaxed(~0, iobase + APB_INT_MASK_L);
+	writel_relaxed(~0, iobase + APB_INT_MASK_H);
+	writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
+	writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
+
+	reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
+	if (reg)
+		nrirqs = 32 + fls(reg);
+	else
+		nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
+
+	domain = irq_domain_add_linear(np, nrirqs,
+				       &irq_generic_chip_ops, NULL);
+	if (!domain) {
+		pr_err("%pOF: unable to add irq domain\n", np);
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
+					     handle_level_irq, clr, 0,
+					     IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_err("%pOF: unable to alloc irq domain gc\n", np);
+		goto err_unmap;
+	}
+
+	for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
+		gc = irq_get_domain_generic_chip(domain, i * 32);
+		gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
+		gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+		gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
+		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
+	}
+
+	irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
+
+	return 0;
+
+err_unmap:
+	iounmap(iobase);
+err_release:
+	release_mem_region(r.start, resource_size(&r));
+	return ret;
+}
+IRQCHIP_DECLARE(dw_apb_ictl,
+		"snps,dw-apb-ictl", dw_apb_ictl_init);
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
new file mode 100644
index 0000000..2a7a388
--- /dev/null
+++ b/drivers/irqchip/irq-eznps.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <soc/nps/common.h>
+
+#define NPS_NR_CPU_IRQS 8  /* number of interrupt lines of NPS400 CPU */
+#define NPS_TIMER0_IRQ  3
+
+/*
+ * NPS400 core includes an Interrupt Controller (IC) support.
+ * All cores can deactivate level irqs at first level control
+ * at cores mesh layer called MTM.
+ * For devices out side chip e.g. uart, network there is another
+ * level called Global Interrupt Manager (GIM).
+ * This second level can control level and edge interrupt.
+ *
+ * NOTE: AUX_IENABLE and CTOP_AUX_IACK are auxiliary registers
+ * with private HW copy per CPU.
+ */
+
+static void nps400_irq_mask(struct irq_data *irqd)
+{
+	unsigned int ienb;
+	unsigned int irq = irqd_to_hwirq(irqd);
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb &= ~(1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_unmask(struct irq_data *irqd)
+{
+	unsigned int ienb;
+	unsigned int irq = irqd_to_hwirq(irqd);
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb |= (1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_eoi_global(struct irq_data *irqd)
+{
+	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+
+	/* Don't ack GIC before all device access attempts are done */
+	mb();
+
+	nps_ack_gic();
+}
+
+static void nps400_irq_ack(struct irq_data *irqd)
+{
+	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+}
+
+static struct irq_chip nps400_irq_chip_fasteoi = {
+	.name		= "NPS400 IC Global",
+	.irq_mask	= nps400_irq_mask,
+	.irq_unmask	= nps400_irq_unmask,
+	.irq_eoi	= nps400_irq_eoi_global,
+};
+
+static struct irq_chip nps400_irq_chip_percpu = {
+	.name		= "NPS400 IC",
+	.irq_mask	= nps400_irq_mask,
+	.irq_unmask	= nps400_irq_unmask,
+	.irq_ack	= nps400_irq_ack,
+};
+
+static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	switch (hw) {
+	case NPS_TIMER0_IRQ:
+#ifdef CONFIG_SMP
+	case NPS_IPI_IRQ:
+#endif
+		irq_set_percpu_devid(virq);
+		irq_set_chip_and_handler(virq, &nps400_irq_chip_percpu,
+					 handle_percpu_devid_irq);
+		break;
+	default:
+		irq_set_chip_and_handler(virq, &nps400_irq_chip_fasteoi,
+					 handle_fasteoi_irq);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct irq_domain_ops nps400_irq_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = nps400_irq_map,
+};
+
+static int __init nps400_of_init(struct device_node *node,
+				 struct device_node *parent)
+{
+	struct irq_domain *nps400_root_domain;
+
+	if (parent) {
+		pr_err("DeviceTree incore ic not a root irq controller\n");
+		return -EINVAL;
+	}
+
+	nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS,
+						   &nps400_irq_ops, NULL);
+
+	if (!nps400_root_domain) {
+		pr_err("nps400 root irq domain not avail\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Needed for primary domain lookup to succeed
+	 * This is a primary irqchip, and can never have a parent
+	 */
+	irq_set_default_host(nps400_root_domain);
+
+#ifdef CONFIG_SMP
+	irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ);
+#endif
+
+	return 0;
+}
+IRQCHIP_DECLARE(ezchip_nps400_ic, "ezchip,nps400-ic", nps400_of_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
new file mode 100644
index 0000000..0bf9842
--- /dev/null
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the Faraday Technology FTINTC010 Copyright (C) 2017 Linus
+ * Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-gemini/irq.c
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/versatile-fpga.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define FT010_NUM_IRQS 32
+
+#define FT010_IRQ_SOURCE(base_addr)	(base_addr + 0x00)
+#define FT010_IRQ_MASK(base_addr)	(base_addr + 0x04)
+#define FT010_IRQ_CLEAR(base_addr)	(base_addr + 0x08)
+/* Selects level- or edge-triggered */
+#define FT010_IRQ_MODE(base_addr)	(base_addr + 0x0C)
+/* Selects active low/high or falling/rising edge */
+#define FT010_IRQ_POLARITY(base_addr)	(base_addr + 0x10)
+#define FT010_IRQ_STATUS(base_addr)	(base_addr + 0x14)
+#define FT010_FIQ_SOURCE(base_addr)	(base_addr + 0x20)
+#define FT010_FIQ_MASK(base_addr)	(base_addr + 0x24)
+#define FT010_FIQ_CLEAR(base_addr)	(base_addr + 0x28)
+#define FT010_FIQ_MODE(base_addr)	(base_addr + 0x2C)
+#define FT010_FIQ_POLARITY(base_addr)	(base_addr + 0x30)
+#define FT010_FIQ_STATUS(base_addr)	(base_addr + 0x34)
+
+/**
+ * struct ft010_irq_data - irq data container for the Faraday IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ft010_irq_data {
+	void __iomem *base;
+	struct irq_chip chip;
+	struct irq_domain *domain;
+};
+
+static void ft010_irq_mask(struct irq_data *d)
+{
+	struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+	unsigned int mask;
+
+	mask = readl(FT010_IRQ_MASK(f->base));
+	mask &= ~BIT(irqd_to_hwirq(d));
+	writel(mask, FT010_IRQ_MASK(f->base));
+}
+
+static void ft010_irq_unmask(struct irq_data *d)
+{
+	struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+	unsigned int mask;
+
+	mask = readl(FT010_IRQ_MASK(f->base));
+	mask |= BIT(irqd_to_hwirq(d));
+	writel(mask, FT010_IRQ_MASK(f->base));
+}
+
+static void ft010_irq_ack(struct irq_data *d)
+{
+	struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+
+	writel(BIT(irqd_to_hwirq(d)), FT010_IRQ_CLEAR(f->base));
+}
+
+static int ft010_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+	struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+	int offset = irqd_to_hwirq(d);
+	u32 mode, polarity;
+
+	mode = readl(FT010_IRQ_MODE(f->base));
+	polarity = readl(FT010_IRQ_POLARITY(f->base));
+
+	if (trigger & (IRQ_TYPE_LEVEL_LOW)) {
+		irq_set_handler_locked(d, handle_level_irq);
+		mode &= ~BIT(offset);
+		polarity |= BIT(offset);
+	} else if (trigger & (IRQ_TYPE_LEVEL_HIGH)) {
+		irq_set_handler_locked(d, handle_level_irq);
+		mode &= ~BIT(offset);
+		polarity &= ~BIT(offset);
+	} else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+		irq_set_handler_locked(d, handle_edge_irq);
+		mode |= BIT(offset);
+		polarity |= BIT(offset);
+	} else if (trigger & IRQ_TYPE_EDGE_RISING) {
+		irq_set_handler_locked(d, handle_edge_irq);
+		mode |= BIT(offset);
+		polarity &= ~BIT(offset);
+	} else {
+		irq_set_handler_locked(d, handle_bad_irq);
+		pr_warn("Faraday IRQ: no supported trigger selected for line %d\n",
+			offset);
+	}
+
+	writel(mode, FT010_IRQ_MODE(f->base));
+	writel(polarity, FT010_IRQ_POLARITY(f->base));
+
+	return 0;
+}
+
+static struct irq_chip ft010_irq_chip = {
+	.name		= "FTINTC010",
+	.irq_ack	= ft010_irq_ack,
+	.irq_mask	= ft010_irq_mask,
+	.irq_unmask	= ft010_irq_unmask,
+	.irq_set_type	= ft010_irq_set_type,
+};
+
+/* Local static for the IRQ entry call */
+static struct ft010_irq_data firq;
+
+asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
+{
+	struct ft010_irq_data *f = &firq;
+	int irq;
+	u32 status;
+
+	while ((status = readl(FT010_IRQ_STATUS(f->base)))) {
+		irq = ffs(status) - 1;
+		handle_domain_irq(f->domain, irq, regs);
+	}
+}
+
+static int ft010_irqdomain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	struct ft010_irq_data *f = d->host_data;
+
+	irq_set_chip_data(irq, f);
+	/* All IRQs should set up their type, flags as bad by default */
+	irq_set_chip_and_handler(irq, &ft010_irq_chip, handle_bad_irq);
+	irq_set_probe(irq);
+
+	return 0;
+}
+
+static void ft010_irqdomain_unmap(struct irq_domain *d, unsigned int irq)
+{
+	irq_set_chip_and_handler(irq, NULL, NULL);
+	irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ft010_irqdomain_ops = {
+	.map = ft010_irqdomain_map,
+	.unmap = ft010_irqdomain_unmap,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+int __init ft010_of_init_irq(struct device_node *node,
+			      struct device_node *parent)
+{
+	struct ft010_irq_data *f = &firq;
+
+	/*
+	 * Disable the idle handler by default since it is buggy
+	 * For more info see arch/arm/mach-gemini/idle.c
+	 */
+	cpu_idle_poll_ctrl(true);
+
+	f->base = of_iomap(node, 0);
+	WARN(!f->base, "unable to map gemini irq registers\n");
+
+	/* Disable all interrupts */
+	writel(0, FT010_IRQ_MASK(f->base));
+	writel(0, FT010_FIQ_MASK(f->base));
+
+	f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0,
+					  &ft010_irqdomain_ops, f);
+	set_handle_irq(ft010_irqchip_handle_irq);
+
+	return 0;
+}
+IRQCHIP_DECLARE(faraday, "faraday,ftintc010",
+		ft010_of_init_irq);
+IRQCHIP_DECLARE(gemini, "cortina,gemini-interrupt-controller",
+		ft010_of_init_irq);
+IRQCHIP_DECLARE(moxa, "moxa,moxart-ic",
+		ft010_of_init_irq);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644
index 0000000..01e673c
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+static const struct gic_kvm_info *gic_kvm_info;
+
+const struct gic_kvm_info *gic_get_kvm_info(void)
+{
+	return gic_kvm_info;
+}
+
+void gic_set_kvm_info(const struct gic_kvm_info *info)
+{
+	BUG_ON(gic_kvm_info != NULL);
+	gic_kvm_info = info;
+}
+
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+		void *data)
+{
+	for (; quirks->desc; quirks++) {
+		if (quirks->iidr != (quirks->mask & iidr))
+			continue;
+		if (quirks->init(data))
+			pr_info("GIC: enabling workaround for %s\n",
+				quirks->desc);
+	}
+}
+
+int gic_configure_irq(unsigned int irq, unsigned int type,
+		       void __iomem *base, void (*sync_access)(void))
+{
+	u32 confmask = 0x2 << ((irq % 16) * 2);
+	u32 confoff = (irq / 16) * 4;
+	u32 val, oldval;
+	int ret = 0;
+	unsigned long flags;
+
+	/*
+	 * Read current configuration register, and insert the config
+	 * for "irq", depending on "type".
+	 */
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		val &= ~confmask;
+	else if (type & IRQ_TYPE_EDGE_BOTH)
+		val |= confmask;
+
+	/* If the current configuration is the same, then we are done */
+	if (val == oldval) {
+		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+		return 0;
+	}
+
+	/*
+	 * Write back the new configuration, and possibly re-enable
+	 * the interrupt. If we fail to write a new configuration for
+	 * an SPI then WARN and return an error. If we fail to write the
+	 * configuration for a PPI this is most likely because the GIC
+	 * does not allow us to set the configuration or we are in a
+	 * non-secure mode, and hence it may not be catastrophic.
+	 */
+	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+	if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
+		if (WARN_ON(irq >= 32))
+			ret = -EINVAL;
+		else
+			pr_warn("GIC: PPI%d is secure or misconfigured\n",
+				irq - 16);
+	}
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	if (sync_access)
+		sync_access();
+
+	return ret;
+}
+
+void gic_dist_config(void __iomem *base, int gic_irqs,
+		     void (*sync_access)(void))
+{
+	unsigned int i;
+
+	/*
+	 * Set all global interrupts to be level triggered, active low.
+	 */
+	for (i = 32; i < gic_irqs; i += 16)
+		writel_relaxed(GICD_INT_ACTLOW_LVLTRIG,
+					base + GIC_DIST_CONFIG + i / 4);
+
+	/*
+	 * Set priority on all global interrupts.
+	 */
+	for (i = 32; i < gic_irqs; i += 4)
+		writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
+
+	/*
+	 * Deactivate and disable all SPIs. Leave the PPI and SGIs
+	 * alone as they are in the redistributor registers on GICv3.
+	 */
+	for (i = 32; i < gic_irqs; i += 32) {
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			       base + GIC_DIST_ACTIVE_CLEAR + i / 8);
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			       base + GIC_DIST_ENABLE_CLEAR + i / 8);
+	}
+
+	if (sync_access)
+		sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+{
+	int i;
+
+	/*
+	 * Deal with the banked PPI and SGI interrupts - disable all
+	 * PPI interrupts, ensure all SGI interrupts are enabled.
+	 * Make sure everything is deactivated.
+	 */
+	writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
+	writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
+	writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
+
+	/*
+	 * Set priority on PPI and SGI interrupts
+	 */
+	for (i = 0; i < 32; i += 4)
+		writel_relaxed(GICD_INT_DEF_PRI_X4,
+					base + GIC_DIST_PRI + i * 4 / 4);
+
+	if (sync_access)
+		sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644
index 0000000..3919cd7
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/arm-gic-common.h>
+
+struct gic_quirk {
+	const char *desc;
+	bool (*init)(void *data);
+	u32 iidr;
+	u32 mask;
+};
+
+int gic_configure_irq(unsigned int irq, unsigned int type,
+                       void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+		     void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+		void *data);
+
+void gic_set_kvm_info(const struct gic_kvm_info *info);
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
new file mode 100644
index 0000000..ecafd29
--- /dev/null
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct gic_clk_data {
+	unsigned int num_clocks;
+	const char *const *clocks;
+};
+
+static int gic_runtime_resume(struct device *dev)
+{
+	struct gic_chip_data *gic = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_clk_resume(dev);
+	if (ret)
+		return ret;
+
+	/*
+	 * On the very first resume, the pointer to the driver data
+	 * will be NULL and this is intentional, because we do not
+	 * want to restore the GIC on the very first resume. So if
+	 * the pointer is not valid just return.
+	 */
+	if (!gic)
+		return 0;
+
+	gic_dist_restore(gic);
+	gic_cpu_restore(gic);
+
+	return 0;
+}
+
+static int gic_runtime_suspend(struct device *dev)
+{
+	struct gic_chip_data *gic = dev_get_drvdata(dev);
+
+	gic_dist_save(gic);
+	gic_cpu_save(gic);
+
+	return pm_clk_suspend(dev);
+}
+
+static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
+{
+	unsigned int i;
+	int ret;
+
+	if (!dev || !data)
+		return -EINVAL;
+
+	ret = pm_clk_create(dev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < data->num_clocks; i++) {
+		ret = of_pm_clk_add_clk(dev, data->clocks[i]);
+		if (ret) {
+			dev_err(dev, "failed to add clock %s\n",
+				data->clocks[i]);
+			pm_clk_destroy(dev);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int gic_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct gic_clk_data *data;
+	struct gic_chip_data *gic;
+	int ret, irq;
+
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "no device match found\n");
+		return -ENODEV;
+	}
+
+	irq = irq_of_parse_and_map(dev->of_node, 0);
+	if (!irq) {
+		dev_err(dev, "no parent interrupt found!\n");
+		return -EINVAL;
+	}
+
+	ret = gic_get_clocks(dev, data);
+	if (ret)
+		goto irq_dispose;
+
+	pm_runtime_enable(dev);
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0)
+		goto rpm_disable;
+
+	ret = gic_of_init_child(dev, &gic, irq);
+	if (ret)
+		goto rpm_put;
+
+	platform_set_drvdata(pdev, gic);
+
+	pm_runtime_put(dev);
+
+	dev_info(dev, "GIC IRQ controller registered\n");
+
+	return 0;
+
+rpm_put:
+	pm_runtime_put_sync(dev);
+rpm_disable:
+	pm_runtime_disable(dev);
+	pm_clk_destroy(dev);
+irq_dispose:
+	irq_dispose_mapping(irq);
+
+	return ret;
+}
+
+static const struct dev_pm_ops gic_pm_ops = {
+	SET_RUNTIME_PM_OPS(gic_runtime_suspend,
+			   gic_runtime_resume, NULL)
+};
+
+static const char * const gic400_clocks[] = {
+	"clk",
+};
+
+static const struct gic_clk_data gic400_data = {
+	.num_clocks = ARRAY_SIZE(gic400_clocks),
+	.clocks = gic400_clocks,
+};
+
+static const struct of_device_id gic_match[] = {
+	{ .compatible = "nvidia,tegra210-agic",	.data = &gic400_data },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gic_match);
+
+static struct platform_driver gic_driver = {
+	.probe		= gic_probe,
+	.driver		= {
+		.name	= "gic",
+		.of_match_table	= gic_match,
+		.pm	= &gic_pm_ops,
+	}
+};
+
+builtin_platform_driver(gic_driver);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
new file mode 100644
index 0000000..b4c1924
--- /dev/null
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Special GIC quirks for the ARM RealView
+ * Copyright (C) 2015 Linus Walleij
+ */
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+
+#define REALVIEW_SYS_LOCK_OFFSET	0x20
+#define REALVIEW_SYS_PLD_CTRL1		0x74
+#define REALVIEW_EB_REVB_SYS_PLD_CTRL1	0xD8
+#define VERSATILE_LOCK_VAL		0xA05F
+#define PLD_INTMODE_MASK		BIT(22)|BIT(23)|BIT(24)
+#define PLD_INTMODE_LEGACY		0x0
+#define PLD_INTMODE_NEW_DCC		BIT(22)
+#define PLD_INTMODE_NEW_NO_DCC		BIT(23)
+#define PLD_INTMODE_FIQ_ENABLE		BIT(24)
+
+/* For some reason RealView EB Rev B moved this register */
+static const struct of_device_id syscon_pldset_of_match[] = {
+	{
+		.compatible = "arm,realview-eb11mp-revb-syscon",
+		.data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-eb11mp-revc-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-eb-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-pb11mp-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{},
+};
+
+static int __init
+realview_gic_of_init(struct device_node *node, struct device_node *parent)
+{
+	struct regmap *map;
+	struct device_node *np;
+	const struct of_device_id *gic_id;
+	u32 pld1_ctrl;
+
+	np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
+					     &gic_id);
+	if (!np)
+		return -ENODEV;
+	pld1_ctrl = (u32)gic_id->data;
+
+	/* The PB11MPCore GIC needs to be configured in the syscon */
+	map = syscon_node_to_regmap(np);
+	if (!IS_ERR(map)) {
+		/* new irq mode with no DCC */
+		regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
+			     VERSATILE_LOCK_VAL);
+		regmap_update_bits(map, pld1_ctrl,
+				   PLD_INTMODE_NEW_NO_DCC,
+				   PLD_INTMODE_MASK);
+		regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
+		pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
+	} else {
+		pr_err("RealView GIC setup: could not find syscon\n");
+		return -ENODEV;
+	}
+	return gic_of_init(node, parent);
+}
+IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
+IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
new file mode 100644
index 0000000..f5fe010
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -0,0 +1,526 @@
+/*
+ * ARM GIC v2m MSI(-X) support
+ * Support for Message Signaled Interrupts for systems that
+ * implement ARM Generic Interrupt Controller: GICv2m.
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
+ *	    Brandon Anderson <brandon.anderson@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "GICv2m: " fmt
+
+#include <linux/acpi.h>
+#include <linux/dma-iommu.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irqchip/arm-gic.h>
+
+/*
+* MSI_TYPER:
+*     [31:26] Reserved
+*     [25:16] lowest SPI assigned to MSI
+*     [15:10] Reserved
+*     [9:0]   Numer of SPIs assigned to MSI
+*/
+#define V2M_MSI_TYPER		       0x008
+#define V2M_MSI_TYPER_BASE_SHIFT       16
+#define V2M_MSI_TYPER_BASE_MASK	       0x3FF
+#define V2M_MSI_TYPER_NUM_MASK	       0x3FF
+#define V2M_MSI_SETSPI_NS	       0x040
+#define V2M_MIN_SPI		       32
+#define V2M_MAX_SPI		       1019
+#define V2M_MSI_IIDR		       0xFCC
+
+#define V2M_MSI_TYPER_BASE_SPI(x)      \
+	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
+
+#define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
+
+/* APM X-Gene with GICv2m MSI_IIDR register value */
+#define XGENE_GICV2M_MSI_IIDR		0x06000170
+
+/* Broadcom NS2 GICv2m MSI_IIDR register value */
+#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
+
+/* List of flags for specific v2m implementation */
+#define GICV2M_NEEDS_SPI_OFFSET		0x00000001
+
+static LIST_HEAD(v2m_nodes);
+static DEFINE_SPINLOCK(v2m_lock);
+
+struct v2m_data {
+	struct list_head entry;
+	struct fwnode_handle *fwnode;
+	struct resource res;	/* GICv2m resource */
+	void __iomem *base;	/* GICv2m virt address */
+	u32 spi_start;		/* The SPI number that MSIs start */
+	u32 nr_spis;		/* The number of SPIs for MSIs */
+	u32 spi_offset;		/* offset to be subtracted from SPI number */
+	unsigned long *bm;	/* MSI vector bitmap */
+	u32 flags;		/* v2m flags for specific implementation */
+};
+
+static void gicv2m_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void gicv2m_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip gicv2m_msi_irq_chip = {
+	.name			= "MSI",
+	.irq_mask		= gicv2m_mask_msi_irq,
+	.irq_unmask		= gicv2m_unmask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+};
+
+static struct msi_domain_info gicv2m_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+	.chip	= &gicv2m_msi_irq_chip,
+};
+
+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
+
+	msg->address_hi = upper_32_bits(addr);
+	msg->address_lo = lower_32_bits(addr);
+	msg->data = data->hwirq;
+
+	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
+		msg->data -= v2m->spi_offset;
+
+	iommu_dma_map_msi_msg(data->irq, msg);
+}
+
+static struct irq_chip gicv2m_irq_chip = {
+	.name			= "GICv2m",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
+};
+
+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
+				       unsigned int virq,
+				       irq_hw_number_t hwirq)
+{
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	int err;
+
+	if (is_of_node(domain->parent->fwnode)) {
+		fwspec.fwnode = domain->parent->fwnode;
+		fwspec.param_count = 3;
+		fwspec.param[0] = 0;
+		fwspec.param[1] = hwirq - 32;
+		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
+		fwspec.fwnode = domain->parent->fwnode;
+		fwspec.param_count = 2;
+		fwspec.param[0] = hwirq;
+		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+	} else {
+		return -EINVAL;
+	}
+
+	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (err)
+		return err;
+
+	/* Configure the interrupt line to be edge */
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+	return 0;
+}
+
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
+			       int nr_irqs)
+{
+	spin_lock(&v2m_lock);
+	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
+			      get_count_order(nr_irqs));
+	spin_unlock(&v2m_lock);
+}
+
+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *args)
+{
+	struct v2m_data *v2m = NULL, *tmp;
+	int hwirq, offset, i, err = 0;
+
+	spin_lock(&v2m_lock);
+	list_for_each_entry(tmp, &v2m_nodes, entry) {
+		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
+						 get_count_order(nr_irqs));
+		if (offset >= 0) {
+			v2m = tmp;
+			break;
+		}
+	}
+	spin_unlock(&v2m_lock);
+
+	if (!v2m)
+		return -ENOSPC;
+
+	hwirq = v2m->spi_start + offset;
+
+	for (i = 0; i < nr_irqs; i++) {
+		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+		if (err)
+			goto fail;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &gicv2m_irq_chip, v2m);
+	}
+
+	return 0;
+
+fail:
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+	gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
+	return err;
+}
+
+static void gicv2m_irq_domain_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
+
+	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops gicv2m_domain_ops = {
+	.alloc			= gicv2m_irq_domain_alloc,
+	.free			= gicv2m_irq_domain_free,
+};
+
+static bool is_msi_spi_valid(u32 base, u32 num)
+{
+	if (base < V2M_MIN_SPI) {
+		pr_err("Invalid MSI base SPI (base:%u)\n", base);
+		return false;
+	}
+
+	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
+		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
+		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
+		return false;
+	}
+
+	return true;
+}
+
+static struct irq_chip gicv2m_pmsi_irq_chip = {
+	.name			= "pMSI",
+};
+
+static struct msi_domain_ops gicv2m_pmsi_ops = {
+};
+
+static struct msi_domain_info gicv2m_pmsi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &gicv2m_pmsi_ops,
+	.chip	= &gicv2m_pmsi_irq_chip,
+};
+
+static void gicv2m_teardown(void)
+{
+	struct v2m_data *v2m, *tmp;
+
+	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
+		list_del(&v2m->entry);
+		kfree(v2m->bm);
+		iounmap(v2m->base);
+		of_node_put(to_of_node(v2m->fwnode));
+		if (is_fwnode_irqchip(v2m->fwnode))
+			irq_domain_free_fwnode(v2m->fwnode);
+		kfree(v2m);
+	}
+}
+
+static int gicv2m_allocate_domains(struct irq_domain *parent)
+{
+	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
+	struct v2m_data *v2m;
+
+	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
+	if (!v2m)
+		return 0;
+
+	inner_domain = irq_domain_create_tree(v2m->fwnode,
+					      &gicv2m_domain_ops, v2m);
+	if (!inner_domain) {
+		pr_err("Failed to create GICv2m domain\n");
+		return -ENOMEM;
+	}
+
+	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+	inner_domain->parent = parent;
+	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
+					       &gicv2m_msi_domain_info,
+					       inner_domain);
+	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
+						     &gicv2m_pmsi_domain_info,
+						     inner_domain);
+	if (!pci_domain || !plat_domain) {
+		pr_err("Failed to create MSI domains\n");
+		if (plat_domain)
+			irq_domain_remove(plat_domain);
+		if (pci_domain)
+			irq_domain_remove(pci_domain);
+		irq_domain_remove(inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
+				  u32 spi_start, u32 nr_spis,
+				  struct resource *res)
+{
+	int ret;
+	struct v2m_data *v2m;
+
+	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
+	if (!v2m) {
+		pr_err("Failed to allocate struct v2m_data.\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&v2m->entry);
+	v2m->fwnode = fwnode;
+
+	memcpy(&v2m->res, res, sizeof(struct resource));
+
+	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
+	if (!v2m->base) {
+		pr_err("Failed to map GICv2m resource\n");
+		ret = -ENOMEM;
+		goto err_free_v2m;
+	}
+
+	if (spi_start && nr_spis) {
+		v2m->spi_start = spi_start;
+		v2m->nr_spis = nr_spis;
+	} else {
+		u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+
+		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
+		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
+	}
+
+	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
+		ret = -EINVAL;
+		goto err_iounmap;
+	}
+
+	/*
+	 * APM X-Gene GICv2m implementation has an erratum where
+	 * the MSI data needs to be the offset from the spi_start
+	 * in order to trigger the correct MSI interrupt. This is
+	 * different from the standard GICv2m implementation where
+	 * the MSI data is the absolute value within the range from
+	 * spi_start to (spi_start + num_spis).
+	 *
+	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+	 * is 'spi_number - 32'
+	 */
+	switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+	case XGENE_GICV2M_MSI_IIDR:
+		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+		v2m->spi_offset = v2m->spi_start;
+		break;
+	case BCM_NS2_GICV2M_MSI_IIDR:
+		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+		v2m->spi_offset = 32;
+		break;
+	}
+
+	v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
+			  GFP_KERNEL);
+	if (!v2m->bm) {
+		ret = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	list_add_tail(&v2m->entry, &v2m_nodes);
+
+	pr_info("range%pR, SPI[%d:%d]\n", res,
+		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
+	return 0;
+
+err_iounmap:
+	iounmap(v2m->base);
+err_free_v2m:
+	kfree(v2m);
+	return ret;
+}
+
+static struct of_device_id gicv2m_device_id[] = {
+	{	.compatible	= "arm,gic-v2m-frame",	},
+	{},
+};
+
+static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+				 struct irq_domain *parent)
+{
+	int ret = 0;
+	struct device_node *node = to_of_node(parent_handle);
+	struct device_node *child;
+
+	for (child = of_find_matching_node(node, gicv2m_device_id); child;
+	     child = of_find_matching_node(child, gicv2m_device_id)) {
+		u32 spi_start = 0, nr_spis = 0;
+		struct resource res;
+
+		if (!of_find_property(child, "msi-controller", NULL))
+			continue;
+
+		ret = of_address_to_resource(child, 0, &res);
+		if (ret) {
+			pr_err("Failed to allocate v2m resource.\n");
+			break;
+		}
+
+		if (!of_property_read_u32(child, "arm,msi-base-spi",
+					  &spi_start) &&
+		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
+			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+				spi_start, nr_spis);
+
+		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
+		if (ret) {
+			of_node_put(child);
+			break;
+		}
+	}
+
+	if (!ret)
+		ret = gicv2m_allocate_domains(parent);
+	if (ret)
+		gicv2m_teardown();
+	return ret;
+}
+
+#ifdef CONFIG_ACPI
+static int acpi_num_msi;
+
+static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
+{
+	struct v2m_data *data;
+
+	if (WARN_ON(acpi_num_msi <= 0))
+		return NULL;
+
+	/* We only return the fwnode of the first MSI frame. */
+	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
+	if (!data)
+		return NULL;
+
+	return data->fwnode;
+}
+
+static int __init
+acpi_parse_madt_msi(struct acpi_subtable_header *header,
+		    const unsigned long end)
+{
+	int ret;
+	struct resource res;
+	u32 spi_start = 0, nr_spis = 0;
+	struct acpi_madt_generic_msi_frame *m;
+	struct fwnode_handle *fwnode;
+
+	m = (struct acpi_madt_generic_msi_frame *)header;
+	if (BAD_MADT_ENTRY(m, end))
+		return -EINVAL;
+
+	res.start = m->base_address;
+	res.end = m->base_address + SZ_4K - 1;
+	res.flags = IORESOURCE_MEM;
+
+	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
+		spi_start = m->spi_base;
+		nr_spis = m->spi_count;
+
+		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+			spi_start, nr_spis);
+	}
+
+	fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
+	if (!fwnode) {
+		pr_err("Unable to allocate GICv2m domain token\n");
+		return -EINVAL;
+	}
+
+	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
+	if (ret)
+		irq_domain_free_fwnode(fwnode);
+
+	return ret;
+}
+
+static int __init gicv2m_acpi_init(struct irq_domain *parent)
+{
+	int ret;
+
+	if (acpi_num_msi > 0)
+		return 0;
+
+	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
+				      acpi_parse_madt_msi, 0);
+
+	if (acpi_num_msi <= 0)
+		goto err_out;
+
+	ret = gicv2m_allocate_domains(parent);
+	if (ret)
+		goto err_out;
+
+	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
+
+	return 0;
+
+err_out:
+	gicv2m_teardown();
+	return -EINVAL;
+}
+#else /* CONFIG_ACPI */
+static int __init gicv2m_acpi_init(struct irq_domain *parent)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+int __init gicv2m_init(struct fwnode_handle *parent_handle,
+		       struct irq_domain *parent)
+{
+	if (is_of_node(parent_handle))
+		return gicv2m_of_init(parent_handle, parent);
+
+	return gicv2m_acpi_init(parent);
+}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
new file mode 100644
index 0000000..606efa6
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/fsl/mc.h>
+
+static struct irq_chip its_msi_irq_chip = {
+	.name = "ITS-fMSI",
+	.irq_mask = irq_chip_mask_parent,
+	.irq_unmask = irq_chip_unmask_parent,
+	.irq_eoi = irq_chip_eoi_parent,
+	.irq_set_affinity = msi_domain_set_affinity
+};
+
+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+				  struct device *dev,
+				  int nvec, msi_alloc_info_t *info)
+{
+	struct fsl_mc_device *mc_bus_dev;
+	struct msi_domain_info *msi_info;
+
+	if (!dev_is_fsl_mc(dev))
+		return -EINVAL;
+
+	mc_bus_dev = to_fsl_mc_device(dev);
+	if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC))
+		return -EINVAL;
+
+	/*
+	 * Set the device Id to be passed to the GIC-ITS:
+	 *
+	 * NOTE: This device id corresponds to the IOMMU stream ID
+	 * associated with the DPRC object (ICID).
+	 */
+	info->scratchpad[0].ul = mc_bus_dev->icid;
+	msi_info = msi_get_domain_info(msi_domain->parent);
+
+	/* Allocate at least 32 MSIs, and always as a power of 2 */
+	nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+	return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
+	.msi_prepare = its_fsl_mc_msi_prepare,
+};
+
+static struct msi_domain_info its_fsl_mc_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &its_fsl_mc_msi_ops,
+	.chip	= &its_msi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_fsl_mc_msi_init(void)
+{
+	struct device_node *np;
+	struct irq_domain *parent;
+	struct irq_domain *mc_msi_domain;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+		if (!parent || !msi_get_domain_info(parent)) {
+			pr_err("%pOF: unable to locate ITS domain\n", np);
+			continue;
+		}
+
+		mc_msi_domain = fsl_mc_msi_create_irq_domain(
+						 of_node_to_fwnode(np),
+						 &its_fsl_mc_msi_domain_info,
+						 parent);
+		if (!mc_msi_domain) {
+			pr_err("%pOF: unable to create fsl-mc domain\n", np);
+			continue;
+		}
+
+		pr_info("fsl-mc MSI: %pOF domain created\n", np);
+	}
+
+	return 0;
+}
+
+early_initcall(its_fsl_mc_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
new file mode 100644
index 0000000..8d6d009
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi_iort.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+	.name			= "ITS-MSI",
+	.irq_unmask		= its_unmask_msi_irq,
+	.irq_mask		= its_mask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
+{
+	int msi, msix, *count = data;
+
+	msi = max(pci_msi_vec_count(pdev), 0);
+	msix = max(pci_msix_vec_count(pdev), 0);
+	*count += max(msi, msix);
+
+	return 0;
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+	struct pci_dev **alias_dev = data;
+
+	*alias_dev = pdev;
+
+	return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+			       int nvec, msi_alloc_info_t *info)
+{
+	struct pci_dev *pdev, *alias_dev;
+	struct msi_domain_info *msi_info;
+	int alias_count = 0, minnvec = 1;
+
+	if (!dev_is_pci(dev))
+		return -EINVAL;
+
+	msi_info = msi_get_domain_info(domain->parent);
+
+	pdev = to_pci_dev(dev);
+	/*
+	 * If pdev is downstream of any aliasing bridges, take an upper
+	 * bound of how many other vectors could map to the same DevID.
+	 */
+	pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
+	if (alias_dev != pdev && alias_dev->subordinate)
+		pci_walk_bus(alias_dev->subordinate, its_pci_msi_vec_count,
+			     &alias_count);
+
+	/* ITS specific DeviceID, as the core ITS ignores dev. */
+	info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
+
+	/*
+	 * Always allocate a power of 2, and special case device 0 for
+	 * broken systems where the DevID is not wired (and all devices
+	 * appear as DevID 0). For that reason, we generously allocate a
+	 * minimum of 32 MSIs for DevID 0. If you want more because all
+	 * your devices are aliasing to DevID 0, consider fixing your HW.
+	 */
+	nvec = max(nvec, alias_count);
+	if (!info->scratchpad[0].ul)
+		minnvec = 32;
+	nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+	return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+	.msi_prepare	= its_pci_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+	.ops	= &its_pci_msi_ops,
+	.chip	= &its_msi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
+				       const char *name)
+{
+	struct irq_domain *parent;
+
+	parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
+	if (!parent || !msi_get_domain_info(parent)) {
+		pr_err("%s: Unable to locate ITS domain\n", name);
+		return -ENXIO;
+	}
+
+	if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
+				       parent)) {
+		pr_err("%s: Unable to create PCI domain\n", name);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int __init its_pci_of_msi_init(void)
+{
+	struct device_node *np;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
+			continue;
+
+		pr_info("PCI/MSI: %pOF domain created\n", np);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+
+static int __init
+its_pci_msi_parse_madt(struct acpi_subtable_header *header,
+		       const unsigned long end)
+{
+	struct acpi_madt_generic_translator *its_entry;
+	struct fwnode_handle *dom_handle;
+	const char *node_name;
+	int err = -ENXIO;
+
+	its_entry = (struct acpi_madt_generic_translator *)header;
+	node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+			      (long)its_entry->base_address);
+	dom_handle = iort_find_domain_token(its_entry->translation_id);
+	if (!dom_handle) {
+		pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+		goto out;
+	}
+
+	err = its_pci_msi_init_one(dom_handle, node_name);
+	if (!err)
+		pr_info("PCI/MSI: %s domain created\n", node_name);
+
+out:
+	kfree(node_name);
+	return err;
+}
+
+static int __init its_pci_acpi_msi_init(void)
+{
+	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+			      its_pci_msi_parse_madt, 0);
+	return 0;
+}
+#else
+static int __init its_pci_acpi_msi_init(void)
+{
+	return 0;
+}
+#endif
+
+static int __init its_pci_msi_init(void)
+{
+	its_pci_of_msi_init();
+	its_pci_acpi_msi_init();
+
+	return 0;
+}
+early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
new file mode 100644
index 0000000..7b8e87b
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi_iort.h>
+#include <linux/device.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+static struct irq_chip its_pmsi_irq_chip = {
+	.name			= "ITS-pMSI",
+};
+
+static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
+				  u32 *dev_id)
+{
+	int ret, index = 0;
+
+	/* Suck the DeviceID out of the msi-parent property */
+	do {
+		struct of_phandle_args args;
+
+		ret = of_parse_phandle_with_args(dev->of_node,
+						 "msi-parent", "#msi-cells",
+						 index, &args);
+		if (args.np == irq_domain_get_of_node(domain)) {
+			if (WARN_ON(args.args_count != 1))
+				return -EINVAL;
+			*dev_id = args.args[0];
+			break;
+		}
+		index++;
+	} while (!ret);
+
+	return ret;
+}
+
+int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+	return -1;
+}
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+			    int nvec, msi_alloc_info_t *info)
+{
+	struct msi_domain_info *msi_info;
+	u32 dev_id;
+	int ret;
+
+	msi_info = msi_get_domain_info(domain->parent);
+
+	if (dev->of_node)
+		ret = of_pmsi_get_dev_id(domain, dev, &dev_id);
+	else
+		ret = iort_pmsi_get_dev_id(dev, &dev_id);
+	if (ret)
+		return ret;
+
+	/* ITS specific DeviceID, as the core ITS ignores dev. */
+	info->scratchpad[0].ul = dev_id;
+
+	/* Allocate at least 32 MSIs, and always as a power of 2 */
+	nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+	return msi_info->ops->msi_prepare(domain->parent,
+					  dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pmsi_ops = {
+	.msi_prepare	= its_pmsi_prepare,
+};
+
+static struct msi_domain_info its_pmsi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &its_pmsi_ops,
+	.chip	= &its_pmsi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
+				const char *name)
+{
+	struct irq_domain *parent;
+
+	parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS);
+	if (!parent || !msi_get_domain_info(parent)) {
+		pr_err("%s: unable to locate ITS domain\n", name);
+		return -ENXIO;
+	}
+
+	if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info,
+					    parent)) {
+		pr_err("%s: unable to create platform domain\n", name);
+		return -ENXIO;
+	}
+
+	pr_info("Platform MSI: %s domain created\n", name);
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int __init
+its_pmsi_parse_madt(struct acpi_subtable_header *header,
+			const unsigned long end)
+{
+	struct acpi_madt_generic_translator *its_entry;
+	struct fwnode_handle *domain_handle;
+	const char *node_name;
+	int err = -ENXIO;
+
+	its_entry = (struct acpi_madt_generic_translator *)header;
+	node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+			      (long)its_entry->base_address);
+	domain_handle = iort_find_domain_token(its_entry->translation_id);
+	if (!domain_handle) {
+		pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+		goto out;
+	}
+
+	err = its_pmsi_init_one(domain_handle, node_name);
+
+out:
+	kfree(node_name);
+	return err;
+}
+
+static void __init its_pmsi_acpi_init(void)
+{
+	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+			      its_pmsi_parse_madt, 0);
+}
+#else
+static inline void its_pmsi_acpi_init(void) { }
+#endif
+
+static void __init its_pmsi_of_init(void)
+{
+	struct device_node *np;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		its_pmsi_init_one(of_node_to_fwnode(np), np->full_name);
+	}
+}
+
+static int __init its_pmsi_init(void)
+{
+	its_pmsi_of_init();
+	its_pmsi_acpi_init();
+	return 0;
+}
+early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
new file mode 100644
index 0000000..c2df341
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -0,0 +1,3794 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/dma-iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v4.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+
+#include "irq-gic-common.h"
+
+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
+#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
+
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
+
+static u32 lpi_id_bits;
+
+/*
+ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_NRBITS		lpi_id_bits
+#define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
+#define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+
+#define LPI_PROP_DEFAULT_PRIO	0xa0
+
+/*
+ * Collection structure - just an ID, and a redistributor address to
+ * ping. We use one per CPU as a bag of interrupts assigned to this
+ * CPU.
+ */
+struct its_collection {
+	u64			target_address;
+	u16			col_id;
+};
+
+/*
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
+ */
+struct its_baser {
+	void		*base;
+	u64		val;
+	u32		order;
+	u32		psz;
+};
+
+struct its_device;
+
+/*
+ * The ITS structure - contains most of the infrastructure, with the
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
+ */
+struct its_node {
+	raw_spinlock_t		lock;
+	struct list_head	entry;
+	void __iomem		*base;
+	phys_addr_t		phys_base;
+	struct its_cmd_block	*cmd_base;
+	struct its_cmd_block	*cmd_write;
+	struct its_baser	tables[GITS_BASER_NR_REGS];
+	struct its_collection	*collections;
+	struct fwnode_handle	*fwnode_handle;
+	u64			(*get_msi_base)(struct its_device *its_dev);
+	u64			cbaser_save;
+	u32			ctlr_save;
+	struct list_head	its_device_list;
+	u64			flags;
+	unsigned long		list_nr;
+	u32			ite_size;
+	u32			device_ids;
+	int			numa_node;
+	unsigned int		msi_domain_flags;
+	u32			pre_its_base; /* for Socionext Synquacer */
+	bool			is_v4;
+	int			vlpi_redist_offset;
+};
+
+#define ITS_ITT_ALIGN		SZ_256
+
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS	(16)
+#define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))
+
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))
+
+struct event_lpi_map {
+	unsigned long		*lpi_map;
+	u16			*col_map;
+	irq_hw_number_t		lpi_base;
+	int			nr_lpis;
+	struct mutex		vlpi_lock;
+	struct its_vm		*vm;
+	struct its_vlpi_map	*vlpi_maps;
+	int			nr_vlpis;
+};
+
+/*
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
+ * translation table, and a list of interrupts.  If it some of its
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
+ * indicates which one.
+ */
+struct its_device {
+	struct list_head	entry;
+	struct its_node		*its;
+	struct event_lpi_map	event_map;
+	void			*itt;
+	u32			nr_ites;
+	u32			device_id;
+};
+
+static struct {
+	raw_spinlock_t		lock;
+	struct its_device	*dev;
+	struct its_vpe		**vpes;
+	int			next_victim;
+} vpe_proxy;
+
+static LIST_HEAD(its_nodes);
+static DEFINE_RAW_SPINLOCK(its_lock);
+static struct rdists *gic_rdists;
+static struct irq_domain *its_parent;
+
+static unsigned long its_list_map;
+static u16 vmovp_seq_num;
+static DEFINE_RAW_SPINLOCK(vmovp_lock);
+
+static DEFINE_IDA(its_vpeid_ida);
+
+#define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
+#define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
+
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+					       u32 event)
+{
+	struct its_node *its = its_dev->its;
+
+	return its->collections + its_dev->event_map.col_map[event];
+}
+
+static struct its_collection *valid_col(struct its_collection *col)
+{
+	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+		return NULL;
+
+	return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+	if (valid_col(its->collections + vpe->col_idx))
+		return vpe;
+
+	return NULL;
+}
+
+/*
+ * ITS command descriptors - parameters to be encoded in a command
+ * block.
+ */
+struct its_cmd_desc {
+	union {
+		struct {
+			struct its_device *dev;
+			u32 event_id;
+		} its_inv_cmd;
+
+		struct {
+			struct its_device *dev;
+			u32 event_id;
+		} its_clear_cmd;
+
+		struct {
+			struct its_device *dev;
+			u32 event_id;
+		} its_int_cmd;
+
+		struct {
+			struct its_device *dev;
+			int valid;
+		} its_mapd_cmd;
+
+		struct {
+			struct its_collection *col;
+			int valid;
+		} its_mapc_cmd;
+
+		struct {
+			struct its_device *dev;
+			u32 phys_id;
+			u32 event_id;
+		} its_mapti_cmd;
+
+		struct {
+			struct its_device *dev;
+			struct its_collection *col;
+			u32 event_id;
+		} its_movi_cmd;
+
+		struct {
+			struct its_device *dev;
+			u32 event_id;
+		} its_discard_cmd;
+
+		struct {
+			struct its_collection *col;
+		} its_invall_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+		} its_vinvall_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_collection *col;
+			bool valid;
+		} its_vmapp_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_device *dev;
+			u32 virt_id;
+			u32 event_id;
+			bool db_enabled;
+		} its_vmapti_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_device *dev;
+			u32 event_id;
+			bool db_enabled;
+		} its_vmovi_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_collection *col;
+			u16 seq_num;
+			u16 its_list;
+		} its_vmovp_cmd;
+	};
+};
+
+/*
+ * The ITS command block, which is what the ITS actually parses.
+ */
+struct its_cmd_block {
+	u64	raw_cmd[4];
+};
+
+#define ITS_CMD_QUEUE_SZ		SZ_64K
+#define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
+
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
+						    struct its_cmd_block *,
+						    struct its_cmd_desc *);
+
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
+					      struct its_cmd_block *,
+					      struct its_cmd_desc *);
+
+static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
+{
+	u64 mask = GENMASK_ULL(h, l);
+	*raw_cmd &= ~mask;
+	*raw_cmd |= (val << l) & mask;
+}
+
+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
+{
+	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
+}
+
+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
+{
+	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
+}
+
+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
+{
+	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
+}
+
+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
+{
+	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
+}
+
+static void its_encode_size(struct its_cmd_block *cmd, u8 size)
+{
+	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
+}
+
+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
+{
+	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
+}
+
+static void its_encode_valid(struct its_cmd_block *cmd, int valid)
+{
+	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
+}
+
+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
+{
+	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
+}
+
+static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
+{
+	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
+}
+
+static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
+{
+	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
+}
+
+static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
+{
+	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
+}
+
+static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
+{
+	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
+}
+
+static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
+{
+	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
+}
+
+static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
+{
+	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
+}
+
+static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
+{
+	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
+}
+
+static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
+{
+	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
+}
+
+static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
+{
+	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
+}
+
+static inline void its_fixup_cmd(struct its_cmd_block *cmd)
+{
+	/* Let's fixup BE commands */
+	cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
+	cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
+	cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
+	cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+}
+
+static struct its_collection *its_build_mapd_cmd(struct its_node *its,
+						 struct its_cmd_block *cmd,
+						 struct its_cmd_desc *desc)
+{
+	unsigned long itt_addr;
+	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
+
+	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
+	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
+
+	its_encode_cmd(cmd, GITS_CMD_MAPD);
+	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
+	its_encode_size(cmd, size - 1);
+	its_encode_itt(cmd, itt_addr);
+	its_encode_valid(cmd, desc->its_mapd_cmd.valid);
+
+	its_fixup_cmd(cmd);
+
+	return NULL;
+}
+
+static struct its_collection *its_build_mapc_cmd(struct its_node *its,
+						 struct its_cmd_block *cmd,
+						 struct its_cmd_desc *desc)
+{
+	its_encode_cmd(cmd, GITS_CMD_MAPC);
+	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
+	its_encode_valid(cmd, desc->its_mapc_cmd.valid);
+
+	its_fixup_cmd(cmd);
+
+	return desc->its_mapc_cmd.col;
+}
+
+static struct its_collection *its_build_mapti_cmd(struct its_node *its,
+						  struct its_cmd_block *cmd,
+						  struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_mapti_cmd.dev,
+			       desc->its_mapti_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_MAPTI);
+	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
+	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
+	its_encode_collection(cmd, col->col_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_movi_cmd(struct its_node *its,
+						 struct its_cmd_block *cmd,
+						 struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_movi_cmd.dev,
+			       desc->its_movi_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_MOVI);
+	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
+	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_discard_cmd(struct its_node *its,
+						    struct its_cmd_block *cmd,
+						    struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_discard_cmd.dev,
+			       desc->its_discard_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_DISCARD);
+	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_inv_cmd(struct its_node *its,
+						struct its_cmd_block *cmd,
+						struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_inv_cmd.dev,
+			       desc->its_inv_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_INV);
+	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_int_cmd(struct its_node *its,
+						struct its_cmd_block *cmd,
+						struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_int_cmd.dev,
+			       desc->its_int_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_INT);
+	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_clear_cmd(struct its_node *its,
+						  struct its_cmd_block *cmd,
+						  struct its_cmd_desc *desc)
+{
+	struct its_collection *col;
+
+	col = dev_event_to_col(desc->its_clear_cmd.dev,
+			       desc->its_clear_cmd.event_id);
+
+	its_encode_cmd(cmd, GITS_CMD_CLEAR);
+	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_col(col);
+}
+
+static struct its_collection *its_build_invall_cmd(struct its_node *its,
+						   struct its_cmd_block *cmd,
+						   struct its_cmd_desc *desc)
+{
+	its_encode_cmd(cmd, GITS_CMD_INVALL);
+	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+
+	its_fixup_cmd(cmd);
+
+	return NULL;
+}
+
+static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+					     struct its_cmd_block *cmd,
+					     struct its_cmd_desc *desc)
+{
+	its_encode_cmd(cmd, GITS_CMD_VINVALL);
+	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+					   struct its_cmd_block *cmd,
+					   struct its_cmd_desc *desc)
+{
+	unsigned long vpt_addr;
+	u64 target;
+
+	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
+	its_encode_cmd(cmd, GITS_CMD_VMAPP);
+	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
+	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+	its_encode_target(cmd, target);
+	its_encode_vpt_addr(cmd, vpt_addr);
+	its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+
+	its_fixup_cmd(cmd);
+
+	return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+					    struct its_cmd_block *cmd,
+					    struct its_cmd_desc *desc)
+{
+	u32 db;
+
+	if (desc->its_vmapti_cmd.db_enabled)
+		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
+	else
+		db = 1023;
+
+	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
+	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
+	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
+	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
+	its_encode_db_phys_id(cmd, db);
+	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
+
+	its_fixup_cmd(cmd);
+
+	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
+					   struct its_cmd_block *cmd,
+					   struct its_cmd_desc *desc)
+{
+	u32 db;
+
+	if (desc->its_vmovi_cmd.db_enabled)
+		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
+	else
+		db = 1023;
+
+	its_encode_cmd(cmd, GITS_CMD_VMOVI);
+	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
+	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
+	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
+	its_encode_db_phys_id(cmd, db);
+	its_encode_db_valid(cmd, true);
+
+	its_fixup_cmd(cmd);
+
+	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
+					   struct its_cmd_block *cmd,
+					   struct its_cmd_desc *desc)
+{
+	u64 target;
+
+	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
+	its_encode_cmd(cmd, GITS_CMD_VMOVP);
+	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
+	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
+	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
+	its_encode_target(cmd, target);
+
+	its_fixup_cmd(cmd);
+
+	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
+}
+
+static u64 its_cmd_ptr_to_offset(struct its_node *its,
+				 struct its_cmd_block *ptr)
+{
+	return (ptr - its->cmd_base) * sizeof(*ptr);
+}
+
+static int its_queue_full(struct its_node *its)
+{
+	int widx;
+	int ridx;
+
+	widx = its->cmd_write - its->cmd_base;
+	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
+
+	/* This is incredibly unlikely to happen, unless the ITS locks up. */
+	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
+		return 1;
+
+	return 0;
+}
+
+static struct its_cmd_block *its_allocate_entry(struct its_node *its)
+{
+	struct its_cmd_block *cmd;
+	u32 count = 1000000;	/* 1s! */
+
+	while (its_queue_full(its)) {
+		count--;
+		if (!count) {
+			pr_err_ratelimited("ITS queue not draining\n");
+			return NULL;
+		}
+		cpu_relax();
+		udelay(1);
+	}
+
+	cmd = its->cmd_write++;
+
+	/* Handle queue wrapping */
+	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
+		its->cmd_write = its->cmd_base;
+
+	/* Clear command  */
+	cmd->raw_cmd[0] = 0;
+	cmd->raw_cmd[1] = 0;
+	cmd->raw_cmd[2] = 0;
+	cmd->raw_cmd[3] = 0;
+
+	return cmd;
+}
+
+static struct its_cmd_block *its_post_commands(struct its_node *its)
+{
+	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
+
+	writel_relaxed(wr, its->base + GITS_CWRITER);
+
+	return its->cmd_write;
+}
+
+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
+{
+	/*
+	 * Make sure the commands written to memory are observable by
+	 * the ITS.
+	 */
+	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
+		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
+	else
+		dsb(ishst);
+}
+
+static int its_wait_for_range_completion(struct its_node *its,
+					 struct its_cmd_block *from,
+					 struct its_cmd_block *to)
+{
+	u64 rd_idx, from_idx, to_idx;
+	u32 count = 1000000;	/* 1s! */
+
+	from_idx = its_cmd_ptr_to_offset(its, from);
+	to_idx = its_cmd_ptr_to_offset(its, to);
+
+	while (1) {
+		rd_idx = readl_relaxed(its->base + GITS_CREADR);
+
+		/* Direct case */
+		if (from_idx < to_idx && rd_idx >= to_idx)
+			break;
+
+		/* Wrapped case */
+		if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
+			break;
+
+		count--;
+		if (!count) {
+			pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
+					   from_idx, to_idx, rd_idx);
+			return -1;
+		}
+		cpu_relax();
+		udelay(1);
+	}
+
+	return 0;
+}
+
+/* Warning, macro hell follows */
+#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
+void name(struct its_node *its,						\
+	  buildtype builder,						\
+	  struct its_cmd_desc *desc)					\
+{									\
+	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
+	synctype *sync_obj;						\
+	unsigned long flags;						\
+									\
+	raw_spin_lock_irqsave(&its->lock, flags);			\
+									\
+	cmd = its_allocate_entry(its);					\
+	if (!cmd) {		/* We're soooooo screewed... */		\
+		raw_spin_unlock_irqrestore(&its->lock, flags);		\
+		return;							\
+	}								\
+	sync_obj = builder(its, cmd, desc);				\
+	its_flush_cmd(its, cmd);					\
+									\
+	if (sync_obj) {							\
+		sync_cmd = its_allocate_entry(its);			\
+		if (!sync_cmd)						\
+			goto post;					\
+									\
+		buildfn(its, sync_cmd, sync_obj);			\
+		its_flush_cmd(its, sync_cmd);				\
+	}								\
+									\
+post:									\
+	next_cmd = its_post_commands(its);				\
+	raw_spin_unlock_irqrestore(&its->lock, flags);			\
+									\
+	if (its_wait_for_range_completion(its, cmd, next_cmd))		\
+		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
+}
+
+static void its_build_sync_cmd(struct its_node *its,
+			       struct its_cmd_block *sync_cmd,
+			       struct its_collection *sync_col)
+{
+	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+	its_encode_target(sync_cmd, sync_col->target_address);
+
+	its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
+			     struct its_collection, its_build_sync_cmd)
+
+static void its_build_vsync_cmd(struct its_node *its,
+				struct its_cmd_block *sync_cmd,
+				struct its_vpe *sync_vpe)
+{
+	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
+	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
+
+	its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
+			     struct its_vpe, its_build_vsync_cmd)
+
+static void its_send_int(struct its_device *dev, u32 event_id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_int_cmd.dev = dev;
+	desc.its_int_cmd.event_id = event_id;
+
+	its_send_single_command(dev->its, its_build_int_cmd, &desc);
+}
+
+static void its_send_clear(struct its_device *dev, u32 event_id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_clear_cmd.dev = dev;
+	desc.its_clear_cmd.event_id = event_id;
+
+	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
+}
+
+static void its_send_inv(struct its_device *dev, u32 event_id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_inv_cmd.dev = dev;
+	desc.its_inv_cmd.event_id = event_id;
+
+	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
+}
+
+static void its_send_mapd(struct its_device *dev, int valid)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_mapd_cmd.dev = dev;
+	desc.its_mapd_cmd.valid = !!valid;
+
+	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
+}
+
+static void its_send_mapc(struct its_node *its, struct its_collection *col,
+			  int valid)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_mapc_cmd.col = col;
+	desc.its_mapc_cmd.valid = !!valid;
+
+	its_send_single_command(its, its_build_mapc_cmd, &desc);
+}
+
+static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_mapti_cmd.dev = dev;
+	desc.its_mapti_cmd.phys_id = irq_id;
+	desc.its_mapti_cmd.event_id = id;
+
+	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
+}
+
+static void its_send_movi(struct its_device *dev,
+			  struct its_collection *col, u32 id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_movi_cmd.dev = dev;
+	desc.its_movi_cmd.col = col;
+	desc.its_movi_cmd.event_id = id;
+
+	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
+}
+
+static void its_send_discard(struct its_device *dev, u32 id)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_discard_cmd.dev = dev;
+	desc.its_discard_cmd.event_id = id;
+
+	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
+}
+
+static void its_send_invall(struct its_node *its, struct its_collection *col)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_invall_cmd.col = col;
+
+	its_send_single_command(its, its_build_invall_cmd, &desc);
+}
+
+static void its_send_vmapti(struct its_device *dev, u32 id)
+{
+	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+	struct its_cmd_desc desc;
+
+	desc.its_vmapti_cmd.vpe = map->vpe;
+	desc.its_vmapti_cmd.dev = dev;
+	desc.its_vmapti_cmd.virt_id = map->vintid;
+	desc.its_vmapti_cmd.event_id = id;
+	desc.its_vmapti_cmd.db_enabled = map->db_enabled;
+
+	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
+}
+
+static void its_send_vmovi(struct its_device *dev, u32 id)
+{
+	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+	struct its_cmd_desc desc;
+
+	desc.its_vmovi_cmd.vpe = map->vpe;
+	desc.its_vmovi_cmd.dev = dev;
+	desc.its_vmovi_cmd.event_id = id;
+	desc.its_vmovi_cmd.db_enabled = map->db_enabled;
+
+	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
+}
+
+static void its_send_vmapp(struct its_node *its,
+			   struct its_vpe *vpe, bool valid)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_vmapp_cmd.vpe = vpe;
+	desc.its_vmapp_cmd.valid = valid;
+	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
+
+	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
+}
+
+static void its_send_vmovp(struct its_vpe *vpe)
+{
+	struct its_cmd_desc desc;
+	struct its_node *its;
+	unsigned long flags;
+	int col_id = vpe->col_idx;
+
+	desc.its_vmovp_cmd.vpe = vpe;
+	desc.its_vmovp_cmd.its_list = (u16)its_list_map;
+
+	if (!its_list_map) {
+		its = list_first_entry(&its_nodes, struct its_node, entry);
+		desc.its_vmovp_cmd.seq_num = 0;
+		desc.its_vmovp_cmd.col = &its->collections[col_id];
+		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+		return;
+	}
+
+	/*
+	 * Yet another marvel of the architecture. If using the
+	 * its_list "feature", we need to make sure that all ITSs
+	 * receive all VMOVP commands in the same order. The only way
+	 * to guarantee this is to make vmovp a serialization point.
+	 *
+	 * Wall <-- Head.
+	 */
+	raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+
+	/* Emit VMOVPs */
+	list_for_each_entry(its, &its_nodes, entry) {
+		if (!its->is_v4)
+			continue;
+
+		if (!vpe->its_vm->vlpi_count[its->list_nr])
+			continue;
+
+		desc.its_vmovp_cmd.col = &its->collections[col_id];
+		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+	}
+
+	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
+{
+	struct its_cmd_desc desc;
+
+	desc.its_vinvall_cmd.vpe = vpe;
+	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
+}
+
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
+
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	return d->hwirq - its_dev->event_map.lpi_base;
+}
+
+static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
+{
+	irq_hw_number_t hwirq;
+	struct page *prop_page;
+	u8 *cfg;
+
+	if (irqd_is_forwarded_to_vcpu(d)) {
+		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+		u32 event = its_get_event_id(d);
+		struct its_vlpi_map *map;
+
+		prop_page = its_dev->event_map.vm->vprop_page;
+		map = &its_dev->event_map.vlpi_maps[event];
+		hwirq = map->vintid;
+
+		/* Remember the updated property */
+		map->properties &= ~clr;
+		map->properties |= set | LPI_PROP_GROUP1;
+	} else {
+		prop_page = gic_rdists->prop_page;
+		hwirq = d->hwirq;
+	}
+
+	cfg = page_address(prop_page) + hwirq - 8192;
+	*cfg &= ~clr;
+	*cfg |= set | LPI_PROP_GROUP1;
+
+	/*
+	 * Make the above write visible to the redistributors.
+	 * And yes, we're flushing exactly: One. Single. Byte.
+	 * Humpf...
+	 */
+	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
+		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
+	else
+		dsb(ishst);
+}
+
+static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+	lpi_write_config(d, clr, set);
+	its_send_inv(its_dev, its_get_event_id(d));
+}
+
+static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+
+	if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+		return;
+
+	its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+
+	/*
+	 * More fun with the architecture:
+	 *
+	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
+	 * value or to 1023, depending on the enable bit. But that
+	 * would be issueing a mapping for an /existing/ DevID+EventID
+	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
+	 * to the /same/ vPE, using this opportunity to adjust the
+	 * doorbell. Mouahahahaha. We loves it, Precious.
+	 */
+	its_send_vmovi(its_dev, event);
+}
+
+static void its_mask_irq(struct irq_data *d)
+{
+	if (irqd_is_forwarded_to_vcpu(d))
+		its_vlpi_set_doorbell(d, false);
+
+	lpi_update_config(d, LPI_PROP_ENABLED, 0);
+}
+
+static void its_unmask_irq(struct irq_data *d)
+{
+	if (irqd_is_forwarded_to_vcpu(d))
+		its_vlpi_set_doorbell(d, true);
+
+	lpi_update_config(d, 0, LPI_PROP_ENABLED);
+}
+
+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	unsigned int cpu;
+	const struct cpumask *cpu_mask = cpu_online_mask;
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	struct its_collection *target_col;
+	u32 id = its_get_event_id(d);
+
+	/* A forwarded interrupt should use irq_set_vcpu_affinity */
+	if (irqd_is_forwarded_to_vcpu(d))
+		return -EINVAL;
+
+       /* lpi cannot be routed to a redistributor that is on a foreign node */
+	if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+		if (its_dev->its->numa_node >= 0) {
+			cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+			if (!cpumask_intersects(mask_val, cpu_mask))
+				return -EINVAL;
+		}
+	}
+
+	cpu = cpumask_any_and(mask_val, cpu_mask);
+
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	/* don't set the affinity when the target cpu is same as current one */
+	if (cpu != its_dev->event_map.col_map[id]) {
+		target_col = &its_dev->its->collections[cpu];
+		its_send_movi(its_dev, target_col, id);
+		its_dev->event_map.col_map[id] = cpu;
+		irq_data_update_effective_affinity(d, cpumask_of(cpu));
+	}
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+
+static u64 its_irq_get_msi_base(struct its_device *its_dev)
+{
+	struct its_node *its = its_dev->its;
+
+	return its->phys_base + GITS_TRANSLATER;
+}
+
+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	struct its_node *its;
+	u64 addr;
+
+	its = its_dev->its;
+	addr = its->get_msi_base(its_dev);
+
+	msg->address_lo		= lower_32_bits(addr);
+	msg->address_hi		= upper_32_bits(addr);
+	msg->data		= its_get_event_id(d);
+
+	iommu_dma_map_msi_msg(d->irq, msg);
+}
+
+static int its_irq_set_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which,
+				     bool state)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+
+	if (which != IRQCHIP_STATE_PENDING)
+		return -EINVAL;
+
+	if (state)
+		its_send_int(its_dev, event);
+	else
+		its_send_clear(its_dev, event);
+
+	return 0;
+}
+
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+	unsigned long flags;
+
+	/* Not using the ITS list? Everything is always mapped. */
+	if (!its_list_map)
+		return;
+
+	raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+	/*
+	 * If the VM wasn't mapped yet, iterate over the vpes and get
+	 * them mapped now.
+	 */
+	vm->vlpi_count[its->list_nr]++;
+
+	if (vm->vlpi_count[its->list_nr] == 1) {
+		int i;
+
+		for (i = 0; i < vm->nr_vpes; i++) {
+			struct its_vpe *vpe = vm->vpes[i];
+			struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+			/* Map the VPE to the first possible CPU */
+			vpe->col_idx = cpumask_first(cpu_online_mask);
+			its_send_vmapp(its, vpe, true);
+			its_send_vinvall(its, vpe);
+			irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+		}
+	}
+
+	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+	unsigned long flags;
+
+	/* Not using the ITS list? Everything is always mapped. */
+	if (!its_list_map)
+		return;
+
+	raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+	if (!--vm->vlpi_count[its->list_nr]) {
+		int i;
+
+		for (i = 0; i < vm->nr_vpes; i++)
+			its_send_vmapp(its, vm->vpes[i], false);
+	}
+
+	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	if (!info->map)
+		return -EINVAL;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm) {
+		struct its_vlpi_map *maps;
+
+		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+			       GFP_KERNEL);
+		if (!maps) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		its_dev->event_map.vm = info->map->vm;
+		its_dev->event_map.vlpi_maps = maps;
+	} else if (its_dev->event_map.vm != info->map->vm) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Get our private copy of the mapping information */
+	its_dev->event_map.vlpi_maps[event] = *info->map;
+
+	if (irqd_is_forwarded_to_vcpu(d)) {
+		/* Already mapped, move it around */
+		its_send_vmovi(its_dev, event);
+	} else {
+		/* Ensure all the VPEs are mapped on this ITS */
+		its_map_vm(its_dev->its, info->map->vm);
+
+		/*
+		 * Flag the interrupt as forwarded so that we can
+		 * start poking the virtual property table.
+		 */
+		irqd_set_forwarded_to_vcpu(d);
+
+		/* Write out the property to the prop table */
+		lpi_write_config(d, 0xff, info->map->properties);
+
+		/* Drop the physical mapping */
+		its_send_discard(its_dev, event);
+
+		/* and install the virtual one */
+		its_send_vmapti(its_dev, event);
+
+		/* Increment the number of VLPIs */
+		its_dev->event_map.nr_vlpis++;
+	}
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
+static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm ||
+	    !its_dev->event_map.vlpi_maps[event].vm) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Copy our mapping information to the incoming request */
+	*info->map = its_dev->event_map.vlpi_maps[event];
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
+static int its_vlpi_unmap(struct irq_data *d)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Drop the virtual mapping */
+	its_send_discard(its_dev, event);
+
+	/* and restore the physical one */
+	irqd_clr_forwarded_to_vcpu(d);
+	its_send_mapti(its_dev, d->hwirq, event);
+	lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+				    LPI_PROP_ENABLED |
+				    LPI_PROP_GROUP1));
+
+	/* Potentially unmap the VM from this ITS */
+	its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
+	/*
+	 * Drop the refcount and make the device available again if
+	 * this was the last VLPI.
+	 */
+	if (!--its_dev->event_map.nr_vlpis) {
+		its_dev->event_map.vm = NULL;
+		kfree(its_dev->event_map.vlpi_maps);
+	}
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
+static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+		return -EINVAL;
+
+	if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
+		lpi_update_config(d, 0xff, info->config);
+	else
+		lpi_write_config(d, 0xff, info->config);
+	its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
+
+	return 0;
+}
+
+static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	struct its_cmd_info *info = vcpu_info;
+
+	/* Need a v4 ITS */
+	if (!its_dev->its->is_v4)
+		return -EINVAL;
+
+	/* Unmap request? */
+	if (!info)
+		return its_vlpi_unmap(d);
+
+	switch (info->cmd_type) {
+	case MAP_VLPI:
+		return its_vlpi_map(d, info);
+
+	case GET_VLPI:
+		return its_vlpi_get(d, info);
+
+	case PROP_UPDATE_VLPI:
+	case PROP_UPDATE_AND_INV_VLPI:
+		return its_vlpi_prop_update(d, info);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static struct irq_chip its_irq_chip = {
+	.name			= "ITS",
+	.irq_mask		= its_mask_irq,
+	.irq_unmask		= its_unmask_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= its_set_affinity,
+	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
+	.irq_set_irqchip_state	= its_irq_set_irqchip_state,
+	.irq_set_vcpu_affinity	= its_irq_set_vcpu_affinity,
+};
+
+
+/*
+ * How we allocate LPIs:
+ *
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
+ *
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
+ */
+#define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
+
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+	struct list_head	entry;
+	u32			base_id;
+	u32			span;
+};
+
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
+{
+	struct lpi_range *range;
+
+	range = kzalloc(sizeof(*range), GFP_KERNEL);
+	if (range) {
+		INIT_LIST_HEAD(&range->entry);
+		range->base_id = base;
+		range->span = span;
+	}
+
+	return range;
+}
+
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct lpi_range *ra, *rb;
+
+	ra = container_of(a, struct lpi_range, entry);
+	rb = container_of(b, struct lpi_range, entry);
+
+	return rb->base_id - ra->base_id;
+}
+
+static void merge_lpi_ranges(void)
+{
+	struct lpi_range *range, *tmp;
+
+	list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+		if (!list_is_last(&range->entry, &lpi_range_list) &&
+		    (tmp->base_id == (range->base_id + range->span))) {
+			tmp->base_id = range->base_id;
+			tmp->span += range->span;
+			list_del(&range->entry);
+			kfree(range);
+		}
+	}
+}
+
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+	struct lpi_range *range, *tmp;
+	int err = -ENOSPC;
+
+	mutex_lock(&lpi_range_lock);
+
+	list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+		if (range->span >= nr_lpis) {
+			*base = range->base_id;
+			range->base_id += nr_lpis;
+			range->span -= nr_lpis;
+
+			if (range->span == 0) {
+				list_del(&range->entry);
+				kfree(range);
+			}
+
+			err = 0;
+			break;
+		}
+	}
+
+	mutex_unlock(&lpi_range_lock);
+
+	pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+	return err;
+}
+
+static int free_lpi_range(u32 base, u32 nr_lpis)
+{
+	struct lpi_range *new;
+	int err = 0;
+
+	mutex_lock(&lpi_range_lock);
+
+	new = mk_lpi_range(base, nr_lpis);
+	if (!new) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	list_add(&new->entry, &lpi_range_list);
+	list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+	merge_lpi_ranges();
+out:
+	mutex_unlock(&lpi_range_lock);
+	return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+	u32 lpis = (1UL << id_bits) - 8192;
+	u32 numlpis;
+	int err;
+
+	numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+	if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+		lpis = numlpis;
+		pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+			lpis);
+	}
+
+	/*
+	 * Initializing the allocator is just the same as freeing the
+	 * full range of LPIs.
+	 */
+	err = free_lpi_range(8192, lpis);
+	pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+	return err;
+}
+
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+	unsigned long *bitmap = NULL;
+	int err = 0;
+
+	do {
+		err = alloc_lpi_range(nr_irqs, base);
+		if (!err)
+			break;
+
+		nr_irqs /= 2;
+	} while (nr_irqs > 0);
+
+	if (err)
+		goto out;
+
+	bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
+	if (!bitmap)
+		goto out;
+
+	*nr_ids = nr_irqs;
+
+out:
+	if (!bitmap)
+		*base = *nr_ids = 0;
+
+	return bitmap;
+}
+
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
+{
+	WARN_ON(free_lpi_range(base, nr_ids));
+	kfree(bitmap);
+}
+
+static struct page *its_allocate_prop_table(gfp_t gfp_flags)
+{
+	struct page *prop_page;
+
+	prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+	if (!prop_page)
+		return NULL;
+
+	/* Priority 0xa0, Group-1, disabled */
+	memset(page_address(prop_page),
+	       LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
+	       LPI_PROPBASE_SZ);
+
+	/* Make sure the GIC will observe the written configuration */
+	gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+
+	return prop_page;
+}
+
+static void its_free_prop_table(struct page *prop_page)
+{
+	free_pages((unsigned long)page_address(prop_page),
+		   get_order(LPI_PROPBASE_SZ));
+}
+
+static int __init its_alloc_lpi_tables(void)
+{
+	phys_addr_t paddr;
+
+	lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+				ITS_MAX_LPI_NRBITS);
+	gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
+	if (!gic_rdists->prop_page) {
+		pr_err("Failed to allocate PROPBASE\n");
+		return -ENOMEM;
+	}
+
+	paddr = page_to_phys(gic_rdists->prop_page);
+	pr_info("GIC: using LPI property table @%pa\n", &paddr);
+
+	return its_lpi_init(lpi_id_bits);
+}
+
+static const char *its_base_type_string[] = {
+	[GITS_BASER_TYPE_DEVICE]	= "Devices",
+	[GITS_BASER_TYPE_VCPU]		= "Virtual CPUs",
+	[GITS_BASER_TYPE_RESERVED3]	= "Reserved (3)",
+	[GITS_BASER_TYPE_COLLECTION]	= "Interrupt Collections",
+	[GITS_BASER_TYPE_RESERVED5] 	= "Reserved (5)",
+	[GITS_BASER_TYPE_RESERVED6] 	= "Reserved (6)",
+	[GITS_BASER_TYPE_RESERVED7] 	= "Reserved (7)",
+};
+
+static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
+{
+	u32 idx = baser - its->tables;
+
+	return gits_read_baser(its->base + GITS_BASER + (idx << 3));
+}
+
+static void its_write_baser(struct its_node *its, struct its_baser *baser,
+			    u64 val)
+{
+	u32 idx = baser - its->tables;
+
+	gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
+	baser->val = its_read_baser(its, baser);
+}
+
+static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+			   u64 cache, u64 shr, u32 psz, u32 order,
+			   bool indirect)
+{
+	u64 val = its_read_baser(its, baser);
+	u64 esz = GITS_BASER_ENTRY_SIZE(val);
+	u64 type = GITS_BASER_TYPE(val);
+	u64 baser_phys, tmp;
+	u32 alloc_pages;
+	void *base;
+
+retry_alloc_baser:
+	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
+	if (alloc_pages > GITS_BASER_PAGES_MAX) {
+		pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
+			&its->phys_base, its_base_type_string[type],
+			alloc_pages, GITS_BASER_PAGES_MAX);
+		alloc_pages = GITS_BASER_PAGES_MAX;
+		order = get_order(GITS_BASER_PAGES_MAX * psz);
+	}
+
+	base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!base)
+		return -ENOMEM;
+
+	baser_phys = virt_to_phys(base);
+
+	/* Check if the physical address of the memory is above 48bits */
+	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+		/* 52bit PA is supported only when PageSize=64K */
+		if (psz != SZ_64K) {
+			pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+			free_pages((unsigned long)base, order);
+			return -ENXIO;
+		}
+
+		/* Convert 52bit PA to 48bit field */
+		baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+	}
+
+retry_baser:
+	val = (baser_phys					 |
+		(type << GITS_BASER_TYPE_SHIFT)			 |
+		((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)	 |
+		((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)	 |
+		cache						 |
+		shr						 |
+		GITS_BASER_VALID);
+
+	val |=	indirect ? GITS_BASER_INDIRECT : 0x0;
+
+	switch (psz) {
+	case SZ_4K:
+		val |= GITS_BASER_PAGE_SIZE_4K;
+		break;
+	case SZ_16K:
+		val |= GITS_BASER_PAGE_SIZE_16K;
+		break;
+	case SZ_64K:
+		val |= GITS_BASER_PAGE_SIZE_64K;
+		break;
+	}
+
+	its_write_baser(its, baser, val);
+	tmp = baser->val;
+
+	if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+		/*
+		 * Shareability didn't stick. Just use
+		 * whatever the read reported, which is likely
+		 * to be the only thing this redistributor
+		 * supports. If that's zero, make it
+		 * non-cacheable as well.
+		 */
+		shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+		if (!shr) {
+			cache = GITS_BASER_nC;
+			gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
+		}
+		goto retry_baser;
+	}
+
+	if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+		/*
+		 * Page size didn't stick. Let's try a smaller
+		 * size and retry. If we reach 4K, then
+		 * something is horribly wrong...
+		 */
+		free_pages((unsigned long)base, order);
+		baser->base = NULL;
+
+		switch (psz) {
+		case SZ_16K:
+			psz = SZ_4K;
+			goto retry_alloc_baser;
+		case SZ_64K:
+			psz = SZ_16K;
+			goto retry_alloc_baser;
+		}
+	}
+
+	if (val != tmp) {
+		pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
+		       &its->phys_base, its_base_type_string[type],
+		       val, tmp);
+		free_pages((unsigned long)base, order);
+		return -ENXIO;
+	}
+
+	baser->order = order;
+	baser->base = base;
+	baser->psz = psz;
+	tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
+
+	pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
+		&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
+		its_base_type_string[type],
+		(unsigned long)virt_to_phys(base),
+		indirect ? "indirect" : "flat", (int)esz,
+		psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+
+	return 0;
+}
+
+static bool its_parse_indirect_baser(struct its_node *its,
+				     struct its_baser *baser,
+				     u32 psz, u32 *order, u32 ids)
+{
+	u64 tmp = its_read_baser(its, baser);
+	u64 type = GITS_BASER_TYPE(tmp);
+	u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
+	u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
+	u32 new_order = *order;
+	bool indirect = false;
+
+	/* No need to enable Indirection if memory requirement < (psz*2)bytes */
+	if ((esz << ids) > (psz * 2)) {
+		/*
+		 * Find out whether hw supports a single or two-level table by
+		 * table by reading bit at offset '62' after writing '1' to it.
+		 */
+		its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
+		indirect = !!(baser->val & GITS_BASER_INDIRECT);
+
+		if (indirect) {
+			/*
+			 * The size of the lvl2 table is equal to ITS page size
+			 * which is 'psz'. For computing lvl1 table size,
+			 * subtract ID bits that sparse lvl2 table from 'ids'
+			 * which is reported by ITS hardware times lvl1 table
+			 * entry size.
+			 */
+			ids -= ilog2(psz / (int)esz);
+			esz = GITS_LVL1_ENTRY_SIZE;
+		}
+	}
+
+	/*
+	 * Allocate as many entries as required to fit the
+	 * range of device IDs that the ITS can grok... The ID
+	 * space being incredibly sparse, this results in a
+	 * massive waste of memory if two-level device table
+	 * feature is not supported by hardware.
+	 */
+	new_order = max_t(u32, get_order(esz << ids), new_order);
+	if (new_order >= MAX_ORDER) {
+		new_order = MAX_ORDER - 1;
+		ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
+		pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+			&its->phys_base, its_base_type_string[type],
+			its->device_ids, ids);
+	}
+
+	*order = new_order;
+
+	return indirect;
+}
+
+static void its_free_tables(struct its_node *its)
+{
+	int i;
+
+	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+		if (its->tables[i].base) {
+			free_pages((unsigned long)its->tables[i].base,
+				   its->tables[i].order);
+			its->tables[i].base = NULL;
+		}
+	}
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+	u64 shr = GITS_BASER_InnerShareable;
+	u64 cache = GITS_BASER_RaWaWb;
+	u32 psz = SZ_64K;
+	int err, i;
+
+	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
+		/* erratum 24313: ignore memory access type */
+		cache = GITS_BASER_nCnB;
+
+	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+		struct its_baser *baser = its->tables + i;
+		u64 val = its_read_baser(its, baser);
+		u64 type = GITS_BASER_TYPE(val);
+		u32 order = get_order(psz);
+		bool indirect = false;
+
+		switch (type) {
+		case GITS_BASER_TYPE_NONE:
+			continue;
+
+		case GITS_BASER_TYPE_DEVICE:
+			indirect = its_parse_indirect_baser(its, baser,
+							    psz, &order,
+							    its->device_ids);
+		case GITS_BASER_TYPE_VCPU:
+			indirect = its_parse_indirect_baser(its, baser,
+							    psz, &order,
+							    ITS_MAX_VPEID_BITS);
+			break;
+		}
+
+		err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+		if (err < 0) {
+			its_free_tables(its);
+			return err;
+		}
+
+		/* Update settings which will be used for next BASERn */
+		psz = baser->psz;
+		cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
+		shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
+	}
+
+	return 0;
+}
+
+static int its_alloc_collections(struct its_node *its)
+{
+	int i;
+
+	its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
+				   GFP_KERNEL);
+	if (!its->collections)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_cpu_ids; i++)
+		its->collections[i].target_address = ~0ULL;
+
+	return 0;
+}
+
+static struct page *its_allocate_pending_table(gfp_t gfp_flags)
+{
+	struct page *pend_page;
+	/*
+	 * The pending pages have to be at least 64kB aligned,
+	 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
+	 */
+	pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+				get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+	if (!pend_page)
+		return NULL;
+
+	/* Make sure the GIC will observe the zero-ed page */
+	gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
+
+	return pend_page;
+}
+
+static void its_free_pending_table(struct page *pt)
+{
+	free_pages((unsigned long)page_address(pt),
+		   get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+}
+
+static void its_cpu_init_lpis(void)
+{
+	void __iomem *rbase = gic_data_rdist_rd_base();
+	struct page *pend_page;
+	u64 val, tmp;
+
+	/* If we didn't allocate the pending table yet, do it now */
+	pend_page = gic_data_rdist()->pend_page;
+	if (!pend_page) {
+		phys_addr_t paddr;
+
+		pend_page = its_allocate_pending_table(GFP_NOWAIT);
+		if (!pend_page) {
+			pr_err("Failed to allocate PENDBASE for CPU%d\n",
+			       smp_processor_id());
+			return;
+		}
+
+		paddr = page_to_phys(pend_page);
+		pr_info("CPU%d: using LPI pending table @%pa\n",
+			smp_processor_id(), &paddr);
+		gic_data_rdist()->pend_page = pend_page;
+	}
+
+	/* set PROPBASE */
+	val = (page_to_phys(gic_rdists->prop_page) |
+	       GICR_PROPBASER_InnerShareable |
+	       GICR_PROPBASER_RaWaWb |
+	       ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
+
+	gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+	tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
+
+	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
+			/*
+			 * The HW reports non-shareable, we must
+			 * remove the cacheability attributes as
+			 * well.
+			 */
+			val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
+				 GICR_PROPBASER_CACHEABILITY_MASK);
+			val |= GICR_PROPBASER_nC;
+			gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+		}
+		pr_info_once("GIC: using cache flushing for LPI property table\n");
+		gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
+	}
+
+	/* set PENDBASE */
+	val = (page_to_phys(pend_page) |
+	       GICR_PENDBASER_InnerShareable |
+	       GICR_PENDBASER_RaWaWb);
+
+	gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+	tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+
+	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+		/*
+		 * The HW reports non-shareable, we must remove the
+		 * cacheability attributes as well.
+		 */
+		val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
+			 GICR_PENDBASER_CACHEABILITY_MASK);
+		val |= GICR_PENDBASER_nC;
+		gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+	}
+
+	/* Enable LPIs */
+	val = readl_relaxed(rbase + GICR_CTLR);
+	val |= GICR_CTLR_ENABLE_LPIS;
+	writel_relaxed(val, rbase + GICR_CTLR);
+
+	/* Make sure the GIC has seen the above */
+	dsb(sy);
+}
+
+static void its_cpu_init_collection(struct its_node *its)
+{
+	int cpu = smp_processor_id();
+	u64 target;
+
+	/* avoid cross node collections and its mapping */
+	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+		struct device_node *cpu_node;
+
+		cpu_node = of_get_cpu_node(cpu, NULL);
+		if (its->numa_node != NUMA_NO_NODE &&
+			its->numa_node != of_node_to_nid(cpu_node))
+			return;
+	}
+
+	/*
+	 * We now have to bind each collection to its target
+	 * redistributor.
+	 */
+	if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+		/*
+		 * This ITS wants the physical address of the
+		 * redistributor.
+		 */
+		target = gic_data_rdist()->phys_base;
+	} else {
+		/* This ITS wants a linear CPU number. */
+		target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+		target = GICR_TYPER_CPU_NUMBER(target) << 16;
+	}
+
+	/* Perform collection mapping */
+	its->collections[cpu].target_address = target;
+	its->collections[cpu].col_id = cpu;
+
+	its_send_mapc(its, &its->collections[cpu], 1);
+	its_send_invall(its, &its->collections[cpu]);
+}
+
+static void its_cpu_init_collections(void)
+{
+	struct its_node *its;
+
+	raw_spin_lock(&its_lock);
+
+	list_for_each_entry(its, &its_nodes, entry)
+		its_cpu_init_collection(its);
+
+	raw_spin_unlock(&its_lock);
+}
+
+static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
+{
+	struct its_device *its_dev = NULL, *tmp;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&its->lock, flags);
+
+	list_for_each_entry(tmp, &its->its_device_list, entry) {
+		if (tmp->device_id == dev_id) {
+			its_dev = tmp;
+			break;
+		}
+	}
+
+	raw_spin_unlock_irqrestore(&its->lock, flags);
+
+	return its_dev;
+}
+
+static struct its_baser *its_get_baser(struct its_node *its, u32 type)
+{
+	int i;
+
+	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+		if (GITS_BASER_TYPE(its->tables[i].val) == type)
+			return &its->tables[i];
+	}
+
+	return NULL;
+}
+
+static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
+{
+	struct page *page;
+	u32 esz, idx;
+	__le64 *table;
+
+	/* Don't allow device id that exceeds single, flat table limit */
+	esz = GITS_BASER_ENTRY_SIZE(baser->val);
+	if (!(baser->val & GITS_BASER_INDIRECT))
+		return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+
+	/* Compute 1st level table index & check if that exceeds table limit */
+	idx = id >> ilog2(baser->psz / esz);
+	if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
+		return false;
+
+	table = baser->base;
+
+	/* Allocate memory for 2nd level table */
+	if (!table[idx]) {
+		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+		if (!page)
+			return false;
+
+		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
+		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+			gic_flush_dcache_to_poc(page_address(page), baser->psz);
+
+		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+		/* Ensure updated table contents are visible to ITS hardware */
+		dsb(sy);
+	}
+
+	return true;
+}
+
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+	struct its_baser *baser;
+
+	baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+	/* Don't allow device id that exceeds ITS hardware limit */
+	if (!baser)
+		return (ilog2(dev_id) < its->device_ids);
+
+	return its_alloc_table_entry(baser, dev_id);
+}
+
+static bool its_alloc_vpe_table(u32 vpe_id)
+{
+	struct its_node *its;
+
+	/*
+	 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
+	 * could try and only do it on ITSs corresponding to devices
+	 * that have interrupts targeted at this VPE, but the
+	 * complexity becomes crazy (and you have tons of memory
+	 * anyway, right?).
+	 */
+	list_for_each_entry(its, &its_nodes, entry) {
+		struct its_baser *baser;
+
+		if (!its->is_v4)
+			continue;
+
+		baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
+		if (!baser)
+			return false;
+
+		if (!its_alloc_table_entry(baser, vpe_id))
+			return false;
+	}
+
+	return true;
+}
+
+static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+					    int nvecs, bool alloc_lpis)
+{
+	struct its_device *dev;
+	unsigned long *lpi_map = NULL;
+	unsigned long flags;
+	u16 *col_map = NULL;
+	void *itt;
+	int lpi_base;
+	int nr_lpis;
+	int nr_ites;
+	int sz;
+
+	if (!its_alloc_device_table(its, dev_id))
+		return NULL;
+
+	if (WARN_ON(!is_power_of_2(nvecs)))
+		nvecs = roundup_pow_of_two(nvecs);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	/*
+	 * Even if the device wants a single LPI, the ITT must be
+	 * sized as a power of two (and you need at least one bit...).
+	 */
+	nr_ites = max(2, nvecs);
+	sz = nr_ites * its->ite_size;
+	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+	itt = kzalloc(sz, GFP_KERNEL);
+	if (alloc_lpis) {
+		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
+		if (lpi_map)
+			col_map = kcalloc(nr_lpis, sizeof(*col_map),
+					  GFP_KERNEL);
+	} else {
+		col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
+		nr_lpis = 0;
+		lpi_base = 0;
+	}
+
+	if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
+		kfree(dev);
+		kfree(itt);
+		kfree(lpi_map);
+		kfree(col_map);
+		return NULL;
+	}
+
+	gic_flush_dcache_to_poc(itt, sz);
+
+	dev->its = its;
+	dev->itt = itt;
+	dev->nr_ites = nr_ites;
+	dev->event_map.lpi_map = lpi_map;
+	dev->event_map.col_map = col_map;
+	dev->event_map.lpi_base = lpi_base;
+	dev->event_map.nr_lpis = nr_lpis;
+	mutex_init(&dev->event_map.vlpi_lock);
+	dev->device_id = dev_id;
+	INIT_LIST_HEAD(&dev->entry);
+
+	raw_spin_lock_irqsave(&its->lock, flags);
+	list_add(&dev->entry, &its->its_device_list);
+	raw_spin_unlock_irqrestore(&its->lock, flags);
+
+	/* Map device to its ITT */
+	its_send_mapd(dev, 1);
+
+	return dev;
+}
+
+static void its_free_device(struct its_device *its_dev)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
+	list_del(&its_dev->entry);
+	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
+	kfree(its_dev->itt);
+	kfree(its_dev);
+}
+
+static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+{
+	int idx;
+
+	idx = find_first_zero_bit(dev->event_map.lpi_map,
+				  dev->event_map.nr_lpis);
+	if (idx == dev->event_map.nr_lpis)
+		return -ENOSPC;
+
+	*hwirq = dev->event_map.lpi_base + idx;
+	set_bit(idx, dev->event_map.lpi_map);
+
+	return 0;
+}
+
+static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+			   int nvec, msi_alloc_info_t *info)
+{
+	struct its_node *its;
+	struct its_device *its_dev;
+	struct msi_domain_info *msi_info;
+	u32 dev_id;
+
+	/*
+	 * We ignore "dev" entierely, and rely on the dev_id that has
+	 * been passed via the scratchpad. This limits this domain's
+	 * usefulness to upper layers that definitely know that they
+	 * are built on top of the ITS.
+	 */
+	dev_id = info->scratchpad[0].ul;
+
+	msi_info = msi_get_domain_info(domain);
+	its = msi_info->data;
+
+	if (!gic_rdists->has_direct_lpi &&
+	    vpe_proxy.dev &&
+	    vpe_proxy.dev->its == its &&
+	    dev_id == vpe_proxy.dev->device_id) {
+		/* Bad luck. Get yourself a better implementation */
+		WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
+			  dev_id);
+		return -EINVAL;
+	}
+
+	its_dev = its_find_device(its, dev_id);
+	if (its_dev) {
+		/*
+		 * We already have seen this ID, probably through
+		 * another alias (PCI bridge of some sort). No need to
+		 * create the device.
+		 */
+		pr_debug("Reusing ITT for devID %x\n", dev_id);
+		goto out;
+	}
+
+	its_dev = its_create_device(its, dev_id, nvec, true);
+	if (!its_dev)
+		return -ENOMEM;
+
+	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
+out:
+	info->scratchpad[0].ptr = its_dev;
+	return 0;
+}
+
+static struct msi_domain_ops its_msi_domain_ops = {
+	.msi_prepare	= its_msi_prepare,
+};
+
+static int its_irq_gic_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq,
+				    irq_hw_number_t hwirq)
+{
+	struct irq_fwspec fwspec;
+
+	if (irq_domain_get_of_node(domain->parent)) {
+		fwspec.fwnode = domain->parent->fwnode;
+		fwspec.param_count = 3;
+		fwspec.param[0] = GIC_IRQ_TYPE_LPI;
+		fwspec.param[1] = hwirq;
+		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
+		fwspec.fwnode = domain->parent->fwnode;
+		fwspec.param_count = 2;
+		fwspec.param[0] = hwirq;
+		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+	} else {
+		return -EINVAL;
+	}
+
+	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs, void *args)
+{
+	msi_alloc_info_t *info = args;
+	struct its_device *its_dev = info->scratchpad[0].ptr;
+	irq_hw_number_t hwirq;
+	int err;
+	int i;
+
+	for (i = 0; i < nr_irqs; i++) {
+		err = its_alloc_device_irq(its_dev, &hwirq);
+		if (err)
+			return err;
+
+		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+		if (err)
+			return err;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i,
+					      hwirq, &its_irq_chip, its_dev);
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
+		pr_debug("ID:%d pID:%d vID:%d\n",
+			 (int)(hwirq - its_dev->event_map.lpi_base),
+			 (int) hwirq, virq + i);
+	}
+
+	return 0;
+}
+
+static int its_irq_domain_activate(struct irq_domain *domain,
+				   struct irq_data *d, bool reserve)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	const struct cpumask *cpu_mask = cpu_online_mask;
+	int cpu;
+
+	/* get the cpu_mask of local node */
+	if (its_dev->its->numa_node >= 0)
+		cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+
+	/* Bind the LPI to the first possible CPU */
+	cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+	if (cpu >= nr_cpu_ids) {
+		if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+			return -EINVAL;
+
+		cpu = cpumask_first(cpu_online_mask);
+	}
+
+	its_dev->event_map.col_map[event] = cpu;
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	/* Map the GIC IRQ and event to the device */
+	its_send_mapti(its_dev, d->hwirq, event);
+	return 0;
+}
+
+static void its_irq_domain_deactivate(struct irq_domain *domain,
+				      struct irq_data *d)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+
+	/* Stop the delivery of interrupts */
+	its_send_discard(its_dev, event);
+}
+
+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	int i;
+
+	for (i = 0; i < nr_irqs; i++) {
+		struct irq_data *data = irq_domain_get_irq_data(domain,
+								virq + i);
+		u32 event = its_get_event_id(data);
+
+		/* Mark interrupt index as unused */
+		clear_bit(event, its_dev->event_map.lpi_map);
+
+		/* Nuke the entry in the domain */
+		irq_domain_reset_irq_data(data);
+	}
+
+	/* If all interrupts have been freed, start mopping the floor */
+	if (bitmap_empty(its_dev->event_map.lpi_map,
+			 its_dev->event_map.nr_lpis)) {
+		its_lpi_free(its_dev->event_map.lpi_map,
+			     its_dev->event_map.lpi_base,
+			     its_dev->event_map.nr_lpis);
+		kfree(its_dev->event_map.col_map);
+
+		/* Unmap device/itt */
+		its_send_mapd(its_dev, 0);
+		its_free_device(its_dev);
+	}
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops its_domain_ops = {
+	.alloc			= its_irq_domain_alloc,
+	.free			= its_irq_domain_free,
+	.activate		= its_irq_domain_activate,
+	.deactivate		= its_irq_domain_deactivate,
+};
+
+/*
+ * This is insane.
+ *
+ * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * likely), the only way to perform an invalidate is to use a fake
+ * device to issue an INV command, implying that the LPI has first
+ * been mapped to some event on that device. Since this is not exactly
+ * cheap, we try to keep that mapping around as long as possible, and
+ * only issue an UNMAP if we're short on available slots.
+ *
+ * Broken by design(tm).
+ */
+static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
+{
+	/* Already unmapped? */
+	if (vpe->vpe_proxy_event == -1)
+		return;
+
+	its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
+	vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
+
+	/*
+	 * We don't track empty slots at all, so let's move the
+	 * next_victim pointer if we can quickly reuse that slot
+	 * instead of nuking an existing entry. Not clear that this is
+	 * always a win though, and this might just generate a ripple
+	 * effect... Let's just hope VPEs don't migrate too often.
+	 */
+	if (vpe_proxy.vpes[vpe_proxy.next_victim])
+		vpe_proxy.next_victim = vpe->vpe_proxy_event;
+
+	vpe->vpe_proxy_event = -1;
+}
+
+static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
+{
+	if (!gic_rdists->has_direct_lpi) {
+		unsigned long flags;
+
+		raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+		its_vpe_db_proxy_unmap_locked(vpe);
+		raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+	}
+}
+
+static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
+{
+	/* Already mapped? */
+	if (vpe->vpe_proxy_event != -1)
+		return;
+
+	/* This slot was already allocated. Kick the other VPE out. */
+	if (vpe_proxy.vpes[vpe_proxy.next_victim])
+		its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
+
+	/* Map the new VPE instead */
+	vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
+	vpe->vpe_proxy_event = vpe_proxy.next_victim;
+	vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
+
+	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
+	its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
+}
+
+static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
+{
+	unsigned long flags;
+	struct its_collection *target_col;
+
+	if (gic_rdists->has_direct_lpi) {
+		void __iomem *rdbase;
+
+		rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
+		gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+		while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+			cpu_relax();
+
+		return;
+	}
+
+	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+	its_vpe_db_proxy_map_locked(vpe);
+
+	target_col = &vpe_proxy.dev->its->collections[to];
+	its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
+	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
+
+	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static int its_vpe_set_affinity(struct irq_data *d,
+				const struct cpumask *mask_val,
+				bool force)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+	int cpu = cpumask_first(mask_val);
+
+	/*
+	 * Changing affinity is mega expensive, so let's be as lazy as
+	 * we can and only do it if we really have to. Also, if mapped
+	 * into the proxy device, we need to move the doorbell
+	 * interrupt to its new location.
+	 */
+	if (vpe->col_idx != cpu) {
+		int from = vpe->col_idx;
+
+		vpe->col_idx = cpu;
+		its_send_vmovp(vpe);
+		its_vpe_db_proxy_move(vpe, from, cpu);
+	}
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_vpe_schedule(struct its_vpe *vpe)
+{
+	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+	u64 val;
+
+	/* Schedule the VPE */
+	val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
+		GENMASK_ULL(51, 12);
+	val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+	val |= GICR_VPROPBASER_RaWb;
+	val |= GICR_VPROPBASER_InnerShareable;
+	gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+	val  = virt_to_phys(page_address(vpe->vpt_page)) &
+		GENMASK_ULL(51, 16);
+	val |= GICR_VPENDBASER_RaWaWb;
+	val |= GICR_VPENDBASER_NonShareable;
+	/*
+	 * There is no good way of finding out if the pending table is
+	 * empty as we can race against the doorbell interrupt very
+	 * easily. So in the end, vpe->pending_last is only an
+	 * indication that the vcpu has something pending, not one
+	 * that the pending table is empty. A good implementation
+	 * would be able to read its coarse map pretty quickly anyway,
+	 * making this a tolerable issue.
+	 */
+	val |= GICR_VPENDBASER_PendingLast;
+	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+	val |= GICR_VPENDBASER_Valid;
+	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_deschedule(struct its_vpe *vpe)
+{
+	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+	u32 count = 1000000;	/* 1s! */
+	bool clean;
+	u64 val;
+
+	/* We're being scheduled out */
+	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+	val &= ~GICR_VPENDBASER_Valid;
+	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+	do {
+		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+		clean = !(val & GICR_VPENDBASER_Dirty);
+		if (!clean) {
+			count--;
+			cpu_relax();
+			udelay(1);
+		}
+	} while (!clean && count);
+
+	if (unlikely(!clean && !count)) {
+		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+		vpe->idai = false;
+		vpe->pending_last = true;
+	} else {
+		vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+		vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+	}
+}
+
+static void its_vpe_invall(struct its_vpe *vpe)
+{
+	struct its_node *its;
+
+	list_for_each_entry(its, &its_nodes, entry) {
+		if (!its->is_v4)
+			continue;
+
+		if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+			continue;
+
+		/*
+		 * Sending a VINVALL to a single ITS is enough, as all
+		 * we need is to reach the redistributors.
+		 */
+		its_send_vinvall(its, vpe);
+		return;
+	}
+}
+
+static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+	struct its_cmd_info *info = vcpu_info;
+
+	switch (info->cmd_type) {
+	case SCHEDULE_VPE:
+		its_vpe_schedule(vpe);
+		return 0;
+
+	case DESCHEDULE_VPE:
+		its_vpe_deschedule(vpe);
+		return 0;
+
+	case INVALL_VPE:
+		its_vpe_invall(vpe);
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static void its_vpe_send_cmd(struct its_vpe *vpe,
+			     void (*cmd)(struct its_device *, u32))
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+	its_vpe_db_proxy_map_locked(vpe);
+	cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
+
+	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static void its_vpe_send_inv(struct irq_data *d)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+	if (gic_rdists->has_direct_lpi) {
+		void __iomem *rdbase;
+
+		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+		gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
+		while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+			cpu_relax();
+	} else {
+		its_vpe_send_cmd(vpe, its_send_inv);
+	}
+}
+
+static void its_vpe_mask_irq(struct irq_data *d)
+{
+	/*
+	 * We need to unmask the LPI, which is described by the parent
+	 * irq_data. Instead of calling into the parent (which won't
+	 * exactly do the right thing, let's simply use the
+	 * parent_data pointer. Yes, I'm naughty.
+	 */
+	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+	its_vpe_send_inv(d);
+}
+
+static void its_vpe_unmask_irq(struct irq_data *d)
+{
+	/* Same hack as above... */
+	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+	its_vpe_send_inv(d);
+}
+
+static int its_vpe_set_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which,
+				     bool state)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+	if (which != IRQCHIP_STATE_PENDING)
+		return -EINVAL;
+
+	if (gic_rdists->has_direct_lpi) {
+		void __iomem *rdbase;
+
+		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+		if (state) {
+			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
+		} else {
+			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+			while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+				cpu_relax();
+		}
+	} else {
+		if (state)
+			its_vpe_send_cmd(vpe, its_send_int);
+		else
+			its_vpe_send_cmd(vpe, its_send_clear);
+	}
+
+	return 0;
+}
+
+static struct irq_chip its_vpe_irq_chip = {
+	.name			= "GICv4-vpe",
+	.irq_mask		= its_vpe_mask_irq,
+	.irq_unmask		= its_vpe_unmask_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= its_vpe_set_affinity,
+	.irq_set_irqchip_state	= its_vpe_set_irqchip_state,
+	.irq_set_vcpu_affinity	= its_vpe_set_vcpu_affinity,
+};
+
+static int its_vpe_id_alloc(void)
+{
+	return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
+}
+
+static void its_vpe_id_free(u16 id)
+{
+	ida_simple_remove(&its_vpeid_ida, id);
+}
+
+static int its_vpe_init(struct its_vpe *vpe)
+{
+	struct page *vpt_page;
+	int vpe_id;
+
+	/* Allocate vpe_id */
+	vpe_id = its_vpe_id_alloc();
+	if (vpe_id < 0)
+		return vpe_id;
+
+	/* Allocate VPT */
+	vpt_page = its_allocate_pending_table(GFP_KERNEL);
+	if (!vpt_page) {
+		its_vpe_id_free(vpe_id);
+		return -ENOMEM;
+	}
+
+	if (!its_alloc_vpe_table(vpe_id)) {
+		its_vpe_id_free(vpe_id);
+		its_free_pending_table(vpe->vpt_page);
+		return -ENOMEM;
+	}
+
+	vpe->vpe_id = vpe_id;
+	vpe->vpt_page = vpt_page;
+	vpe->vpe_proxy_event = -1;
+
+	return 0;
+}
+
+static void its_vpe_teardown(struct its_vpe *vpe)
+{
+	its_vpe_db_proxy_unmap(vpe);
+	its_vpe_id_free(vpe->vpe_id);
+	its_free_pending_table(vpe->vpt_page);
+}
+
+static void its_vpe_irq_domain_free(struct irq_domain *domain,
+				    unsigned int virq,
+				    unsigned int nr_irqs)
+{
+	struct its_vm *vm = domain->host_data;
+	int i;
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+	for (i = 0; i < nr_irqs; i++) {
+		struct irq_data *data = irq_domain_get_irq_data(domain,
+								virq + i);
+		struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
+
+		BUG_ON(vm != vpe->its_vm);
+
+		clear_bit(data->hwirq, vm->db_bitmap);
+		its_vpe_teardown(vpe);
+		irq_domain_reset_irq_data(data);
+	}
+
+	if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
+		its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+		its_free_prop_table(vm->vprop_page);
+	}
+}
+
+static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				    unsigned int nr_irqs, void *args)
+{
+	struct its_vm *vm = args;
+	unsigned long *bitmap;
+	struct page *vprop_page;
+	int base, nr_ids, i, err = 0;
+
+	BUG_ON(!vm);
+
+	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
+	if (!bitmap)
+		return -ENOMEM;
+
+	if (nr_ids < nr_irqs) {
+		its_lpi_free(bitmap, base, nr_ids);
+		return -ENOMEM;
+	}
+
+	vprop_page = its_allocate_prop_table(GFP_KERNEL);
+	if (!vprop_page) {
+		its_lpi_free(bitmap, base, nr_ids);
+		return -ENOMEM;
+	}
+
+	vm->db_bitmap = bitmap;
+	vm->db_lpi_base = base;
+	vm->nr_db_lpis = nr_ids;
+	vm->vprop_page = vprop_page;
+
+	for (i = 0; i < nr_irqs; i++) {
+		vm->vpes[i]->vpe_db_lpi = base + i;
+		err = its_vpe_init(vm->vpes[i]);
+		if (err)
+			break;
+		err = its_irq_gic_domain_alloc(domain, virq + i,
+					       vm->vpes[i]->vpe_db_lpi);
+		if (err)
+			break;
+		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+					      &its_vpe_irq_chip, vm->vpes[i]);
+		set_bit(i, bitmap);
+	}
+
+	if (err) {
+		if (i > 0)
+			its_vpe_irq_domain_free(domain, virq, i - 1);
+
+		its_lpi_free(bitmap, base, nr_ids);
+		its_free_prop_table(vprop_page);
+	}
+
+	return err;
+}
+
+static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+				       struct irq_data *d, bool reserve)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+	struct its_node *its;
+
+	/* If we use the list map, we issue VMAPP on demand... */
+	if (its_list_map)
+		return 0;
+
+	/* Map the VPE to the first possible CPU */
+	vpe->col_idx = cpumask_first(cpu_online_mask);
+
+	list_for_each_entry(its, &its_nodes, entry) {
+		if (!its->is_v4)
+			continue;
+
+		its_send_vmapp(its, vpe, true);
+		its_send_vinvall(its, vpe);
+	}
+
+	irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
+	return 0;
+}
+
+static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
+					  struct irq_data *d)
+{
+	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+	struct its_node *its;
+
+	/*
+	 * If we use the list map, we unmap the VPE once no VLPIs are
+	 * associated with the VM.
+	 */
+	if (its_list_map)
+		return;
+
+	list_for_each_entry(its, &its_nodes, entry) {
+		if (!its->is_v4)
+			continue;
+
+		its_send_vmapp(its, vpe, false);
+	}
+}
+
+static const struct irq_domain_ops its_vpe_domain_ops = {
+	.alloc			= its_vpe_irq_domain_alloc,
+	.free			= its_vpe_irq_domain_free,
+	.activate		= its_vpe_irq_domain_activate,
+	.deactivate		= its_vpe_irq_domain_deactivate,
+};
+
+static int its_force_quiescent(void __iomem *base)
+{
+	u32 count = 1000000;	/* 1s */
+	u32 val;
+
+	val = readl_relaxed(base + GITS_CTLR);
+	/*
+	 * GIC architecture specification requires the ITS to be both
+	 * disabled and quiescent for writes to GITS_BASER<n> or
+	 * GITS_CBASER to not have UNPREDICTABLE results.
+	 */
+	if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
+		return 0;
+
+	/* Disable the generation of all interrupts to this ITS */
+	val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
+	writel_relaxed(val, base + GITS_CTLR);
+
+	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
+	while (1) {
+		val = readl_relaxed(base + GITS_CTLR);
+		if (val & GITS_CTLR_QUIESCENT)
+			return 0;
+
+		count--;
+		if (!count)
+			return -EBUSY;
+
+		cpu_relax();
+		udelay(1);
+	}
+}
+
+static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
+{
+	struct its_node *its = data;
+
+	/* erratum 22375: only alloc 8MB table size */
+	its->device_ids = 0x14;		/* 20 bits, 8MB */
+	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+
+	return true;
+}
+
+static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+	struct its_node *its = data;
+
+	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+
+	return true;
+}
+
+static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+	struct its_node *its = data;
+
+	/* On QDF2400, the size of the ITE is 16Bytes */
+	its->ite_size = 16;
+
+	return true;
+}
+
+static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
+{
+	struct its_node *its = its_dev->its;
+
+	/*
+	 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
+	 * which maps 32-bit writes targeted at a separate window of
+	 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
+	 * with device ID taken from bits [device_id_bits + 1:2] of
+	 * the window offset.
+	 */
+	return its->pre_its_base + (its_dev->device_id << 2);
+}
+
+static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
+{
+	struct its_node *its = data;
+	u32 pre_its_window[2];
+	u32 ids;
+
+	if (!fwnode_property_read_u32_array(its->fwnode_handle,
+					   "socionext,synquacer-pre-its",
+					   pre_its_window,
+					   ARRAY_SIZE(pre_its_window))) {
+
+		its->pre_its_base = pre_its_window[0];
+		its->get_msi_base = its_irq_get_msi_base_pre_its;
+
+		ids = ilog2(pre_its_window[1]) - 2;
+		if (its->device_ids > ids)
+			its->device_ids = ids;
+
+		/* the pre-ITS breaks isolation, so disable MSI remapping */
+		its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+		return true;
+	}
+	return false;
+}
+
+static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
+{
+	struct its_node *its = data;
+
+	/*
+	 * Hip07 insists on using the wrong address for the VLPI
+	 * page. Trick it into doing the right thing...
+	 */
+	its->vlpi_redist_offset = SZ_128K;
+	return true;
+}
+
+static const struct gic_quirk its_quirks[] = {
+#ifdef CONFIG_CAVIUM_ERRATUM_22375
+	{
+		.desc	= "ITS: Cavium errata 22375, 24313",
+		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
+		.mask	= 0xffff0fff,
+		.init	= its_enable_quirk_cavium_22375,
+	},
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+	{
+		.desc	= "ITS: Cavium erratum 23144",
+		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
+		.mask	= 0xffff0fff,
+		.init	= its_enable_quirk_cavium_23144,
+	},
+#endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+	{
+		.desc	= "ITS: QDF2400 erratum 0065",
+		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
+		.mask	= 0xffffffff,
+		.init	= its_enable_quirk_qdf2400_e0065,
+	},
+#endif
+#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
+	{
+		/*
+		 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
+		 * implementation, but with a 'pre-ITS' added that requires
+		 * special handling in software.
+		 */
+		.desc	= "ITS: Socionext Synquacer pre-ITS",
+		.iidr	= 0x0001143b,
+		.mask	= 0xffffffff,
+		.init	= its_enable_quirk_socionext_synquacer,
+	},
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161600802
+	{
+		.desc	= "ITS: Hip07 erratum 161600802",
+		.iidr	= 0x00000004,
+		.mask	= 0xffffffff,
+		.init	= its_enable_quirk_hip07_161600802,
+	},
+#endif
+	{
+	}
+};
+
+static void its_enable_quirks(struct its_node *its)
+{
+	u32 iidr = readl_relaxed(its->base + GITS_IIDR);
+
+	gic_enable_quirks(iidr, its_quirks, its);
+}
+
+static int its_save_disable(void)
+{
+	struct its_node *its;
+	int err = 0;
+
+	raw_spin_lock(&its_lock);
+	list_for_each_entry(its, &its_nodes, entry) {
+		void __iomem *base;
+
+		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+			continue;
+
+		base = its->base;
+		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
+		err = its_force_quiescent(base);
+		if (err) {
+			pr_err("ITS@%pa: failed to quiesce: %d\n",
+			       &its->phys_base, err);
+			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+			goto err;
+		}
+
+		its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
+	}
+
+err:
+	if (err) {
+		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
+			void __iomem *base;
+
+			if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+				continue;
+
+			base = its->base;
+			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+		}
+	}
+	raw_spin_unlock(&its_lock);
+
+	return err;
+}
+
+static void its_restore_enable(void)
+{
+	struct its_node *its;
+	int ret;
+
+	raw_spin_lock(&its_lock);
+	list_for_each_entry(its, &its_nodes, entry) {
+		void __iomem *base;
+		int i;
+
+		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+			continue;
+
+		base = its->base;
+
+		/*
+		 * Make sure that the ITS is disabled. If it fails to quiesce,
+		 * don't restore it since writing to CBASER or BASER<n>
+		 * registers is undefined according to the GIC v3 ITS
+		 * Specification.
+		 */
+		ret = its_force_quiescent(base);
+		if (ret) {
+			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
+			       &its->phys_base, ret);
+			continue;
+		}
+
+		gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
+
+		/*
+		 * Writing CBASER resets CREADR to 0, so make CWRITER and
+		 * cmd_write line up with it.
+		 */
+		its->cmd_write = its->cmd_base;
+		gits_write_cwriter(0, base + GITS_CWRITER);
+
+		/* Restore GITS_BASER from the value cache. */
+		for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+			struct its_baser *baser = &its->tables[i];
+
+			if (!(baser->val & GITS_BASER_VALID))
+				continue;
+
+			its_write_baser(its, baser, baser->val);
+		}
+		writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+
+		/*
+		 * Reinit the collection if it's stored in the ITS. This is
+		 * indicated by the col_id being less than the HCC field.
+		 * CID < HCC as specified in the GIC v3 Documentation.
+		 */
+		if (its->collections[smp_processor_id()].col_id <
+		    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
+			its_cpu_init_collection(its);
+	}
+	raw_spin_unlock(&its_lock);
+}
+
+static struct syscore_ops its_syscore_ops = {
+	.suspend = its_save_disable,
+	.resume = its_restore_enable,
+};
+
+static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
+{
+	struct irq_domain *inner_domain;
+	struct msi_domain_info *info;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
+	if (!inner_domain) {
+		kfree(info);
+		return -ENOMEM;
+	}
+
+	inner_domain->parent = its_parent;
+	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+	inner_domain->flags |= its->msi_domain_flags;
+	info->ops = &its_msi_domain_ops;
+	info->data = its;
+	inner_domain->host_data = info;
+
+	return 0;
+}
+
+static int its_init_vpe_domain(void)
+{
+	struct its_node *its;
+	u32 devid;
+	int entries;
+
+	if (gic_rdists->has_direct_lpi) {
+		pr_info("ITS: Using DirectLPI for VPE invalidation\n");
+		return 0;
+	}
+
+	/* Any ITS will do, even if not v4 */
+	its = list_first_entry(&its_nodes, struct its_node, entry);
+
+	entries = roundup_pow_of_two(nr_cpu_ids);
+	vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
+				 GFP_KERNEL);
+	if (!vpe_proxy.vpes) {
+		pr_err("ITS: Can't allocate GICv4 proxy device array\n");
+		return -ENOMEM;
+	}
+
+	/* Use the last possible DevID */
+	devid = GENMASK(its->device_ids - 1, 0);
+	vpe_proxy.dev = its_create_device(its, devid, entries, false);
+	if (!vpe_proxy.dev) {
+		kfree(vpe_proxy.vpes);
+		pr_err("ITS: Can't allocate GICv4 proxy device\n");
+		return -ENOMEM;
+	}
+
+	BUG_ON(entries > vpe_proxy.dev->nr_ites);
+
+	raw_spin_lock_init(&vpe_proxy.lock);
+	vpe_proxy.next_victim = 0;
+	pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
+		devid, vpe_proxy.dev->nr_ites);
+
+	return 0;
+}
+
+static int __init its_compute_its_list_map(struct resource *res,
+					   void __iomem *its_base)
+{
+	int its_number;
+	u32 ctlr;
+
+	/*
+	 * This is assumed to be done early enough that we're
+	 * guaranteed to be single-threaded, hence no
+	 * locking. Should this change, we should address
+	 * this.
+	 */
+	its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
+	if (its_number >= GICv4_ITS_LIST_MAX) {
+		pr_err("ITS@%pa: No ITSList entry available!\n",
+		       &res->start);
+		return -EINVAL;
+	}
+
+	ctlr = readl_relaxed(its_base + GITS_CTLR);
+	ctlr &= ~GITS_CTLR_ITS_NUMBER;
+	ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
+	writel_relaxed(ctlr, its_base + GITS_CTLR);
+	ctlr = readl_relaxed(its_base + GITS_CTLR);
+	if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
+		its_number = ctlr & GITS_CTLR_ITS_NUMBER;
+		its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
+	}
+
+	if (test_and_set_bit(its_number, &its_list_map)) {
+		pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
+		       &res->start, its_number);
+		return -EINVAL;
+	}
+
+	return its_number;
+}
+
+static int __init its_probe_one(struct resource *res,
+				struct fwnode_handle *handle, int numa_node)
+{
+	struct its_node *its;
+	void __iomem *its_base;
+	u32 val, ctlr;
+	u64 baser, tmp, typer;
+	int err;
+
+	its_base = ioremap(res->start, resource_size(res));
+	if (!its_base) {
+		pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
+		return -ENOMEM;
+	}
+
+	val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+	if (val != 0x30 && val != 0x40) {
+		pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
+		err = -ENODEV;
+		goto out_unmap;
+	}
+
+	err = its_force_quiescent(its_base);
+	if (err) {
+		pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
+		goto out_unmap;
+	}
+
+	pr_info("ITS %pR\n", res);
+
+	its = kzalloc(sizeof(*its), GFP_KERNEL);
+	if (!its) {
+		err = -ENOMEM;
+		goto out_unmap;
+	}
+
+	raw_spin_lock_init(&its->lock);
+	INIT_LIST_HEAD(&its->entry);
+	INIT_LIST_HEAD(&its->its_device_list);
+	typer = gic_read_typer(its_base + GITS_TYPER);
+	its->base = its_base;
+	its->phys_base = res->start;
+	its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+	its->device_ids = GITS_TYPER_DEVBITS(typer);
+	its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
+	if (its->is_v4) {
+		if (!(typer & GITS_TYPER_VMOVP)) {
+			err = its_compute_its_list_map(res, its_base);
+			if (err < 0)
+				goto out_free_its;
+
+			its->list_nr = err;
+
+			pr_info("ITS@%pa: Using ITS number %d\n",
+				&res->start, err);
+		} else {
+			pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+		}
+	}
+
+	its->numa_node = numa_node;
+
+	its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+						get_order(ITS_CMD_QUEUE_SZ));
+	if (!its->cmd_base) {
+		err = -ENOMEM;
+		goto out_free_its;
+	}
+	its->cmd_write = its->cmd_base;
+	its->fwnode_handle = handle;
+	its->get_msi_base = its_irq_get_msi_base;
+	its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
+
+	its_enable_quirks(its);
+
+	err = its_alloc_tables(its);
+	if (err)
+		goto out_free_cmd;
+
+	err = its_alloc_collections(its);
+	if (err)
+		goto out_free_tables;
+
+	baser = (virt_to_phys(its->cmd_base)	|
+		 GITS_CBASER_RaWaWb		|
+		 GITS_CBASER_InnerShareable	|
+		 (ITS_CMD_QUEUE_SZ / SZ_4K - 1)	|
+		 GITS_CBASER_VALID);
+
+	gits_write_cbaser(baser, its->base + GITS_CBASER);
+	tmp = gits_read_cbaser(its->base + GITS_CBASER);
+
+	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
+		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
+			/*
+			 * The HW reports non-shareable, we must
+			 * remove the cacheability attributes as
+			 * well.
+			 */
+			baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
+				   GITS_CBASER_CACHEABILITY_MASK);
+			baser |= GITS_CBASER_nC;
+			gits_write_cbaser(baser, its->base + GITS_CBASER);
+		}
+		pr_info("ITS: using cache flushing for cmd queue\n");
+		its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
+	}
+
+	gits_write_cwriter(0, its->base + GITS_CWRITER);
+	ctlr = readl_relaxed(its->base + GITS_CTLR);
+	ctlr |= GITS_CTLR_ENABLE;
+	if (its->is_v4)
+		ctlr |= GITS_CTLR_ImDe;
+	writel_relaxed(ctlr, its->base + GITS_CTLR);
+
+	if (GITS_TYPER_HCC(typer))
+		its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
+
+	err = its_init_domain(handle, its);
+	if (err)
+		goto out_free_tables;
+
+	raw_spin_lock(&its_lock);
+	list_add(&its->entry, &its_nodes);
+	raw_spin_unlock(&its_lock);
+
+	return 0;
+
+out_free_tables:
+	its_free_tables(its);
+out_free_cmd:
+	free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+out_free_its:
+	kfree(its);
+out_unmap:
+	iounmap(its_base);
+	pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
+	return err;
+}
+
+static bool gic_rdists_supports_plpis(void)
+{
+	return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+}
+
+static int redist_disable_lpis(void)
+{
+	void __iomem *rbase = gic_data_rdist_rd_base();
+	u64 timeout = USEC_PER_SEC;
+	u64 val;
+
+	/*
+	 * If coming via a CPU hotplug event, we don't need to disable
+	 * LPIs before trying to re-enable them. They are already
+	 * configured and all is well in the world. Detect this case
+	 * by checking the allocation of the pending table for the
+	 * current CPU.
+	 */
+	if (gic_data_rdist()->pend_page)
+		return 0;
+
+	if (!gic_rdists_supports_plpis()) {
+		pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+		return -ENXIO;
+	}
+
+	val = readl_relaxed(rbase + GICR_CTLR);
+	if (!(val & GICR_CTLR_ENABLE_LPIS))
+		return 0;
+
+	pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+		smp_processor_id());
+	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+	/* Disable LPIs */
+	val &= ~GICR_CTLR_ENABLE_LPIS;
+	writel_relaxed(val, rbase + GICR_CTLR);
+
+	/* Make sure any change to GICR_CTLR is observable by the GIC */
+	dsb(sy);
+
+	/*
+	 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
+	 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
+	 * Error out if we time out waiting for RWP to clear.
+	 */
+	while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
+		if (!timeout) {
+			pr_err("CPU%d: Timeout while disabling LPIs\n",
+			       smp_processor_id());
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+		timeout--;
+	}
+
+	/*
+	 * After it has been written to 1, it is IMPLEMENTATION
+	 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
+	 * cleared to 0. Error out if clearing the bit failed.
+	 */
+	if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
+		pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+int its_cpu_init(void)
+{
+	if (!list_empty(&its_nodes)) {
+		int ret;
+
+		ret = redist_disable_lpis();
+		if (ret)
+			return ret;
+
+		its_cpu_init_lpis();
+		its_cpu_init_collections();
+	}
+
+	return 0;
+}
+
+static const struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_of_probe(struct device_node *node)
+{
+	struct device_node *np;
+	struct resource res;
+
+	for (np = of_find_matching_node(node, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
+		if (!of_property_read_bool(np, "msi-controller")) {
+			pr_warn("%pOF: no msi-controller property, ITS ignored\n",
+				np);
+			continue;
+		}
+
+		if (of_address_to_resource(np, 0, &res)) {
+			pr_warn("%pOF: no regs?\n", np);
+			continue;
+		}
+
+		its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
+	}
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+
+#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
+
+#ifdef CONFIG_ACPI_NUMA
+struct its_srat_map {
+	/* numa node id */
+	u32	numa_node;
+	/* GIC ITS ID */
+	u32	its_id;
+};
+
+static struct its_srat_map *its_srat_maps __initdata;
+static int its_in_srat __initdata;
+
+static int __init acpi_get_its_numa_node(u32 its_id)
+{
+	int i;
+
+	for (i = 0; i < its_in_srat; i++) {
+		if (its_id == its_srat_maps[i].its_id)
+			return its_srat_maps[i].numa_node;
+	}
+	return NUMA_NO_NODE;
+}
+
+static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+					  const unsigned long end)
+{
+	return 0;
+}
+
+static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
+			 const unsigned long end)
+{
+	int node;
+	struct acpi_srat_gic_its_affinity *its_affinity;
+
+	its_affinity = (struct acpi_srat_gic_its_affinity *)header;
+	if (!its_affinity)
+		return -EINVAL;
+
+	if (its_affinity->header.length < sizeof(*its_affinity)) {
+		pr_err("SRAT: Invalid header length %d in ITS affinity\n",
+			its_affinity->header.length);
+		return -EINVAL;
+	}
+
+	node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
+
+	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+		pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
+		return 0;
+	}
+
+	its_srat_maps[its_in_srat].numa_node = node;
+	its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
+	its_in_srat++;
+	pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
+		its_affinity->proximity_domain, its_affinity->its_id, node);
+
+	return 0;
+}
+
+static void __init acpi_table_parse_srat_its(void)
+{
+	int count;
+
+	count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+			sizeof(struct acpi_table_srat),
+			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+			gic_acpi_match_srat_its, 0);
+	if (count <= 0)
+		return;
+
+	its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
+				      GFP_KERNEL);
+	if (!its_srat_maps) {
+		pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+		return;
+	}
+
+	acpi_table_parse_entries(ACPI_SIG_SRAT,
+			sizeof(struct acpi_table_srat),
+			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+			gic_acpi_parse_srat_its, 0);
+}
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+	kfree(its_srat_maps);
+}
+#else
+static void __init acpi_table_parse_srat_its(void)	{ }
+static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
+#endif
+
+static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
+					  const unsigned long end)
+{
+	struct acpi_madt_generic_translator *its_entry;
+	struct fwnode_handle *dom_handle;
+	struct resource res;
+	int err;
+
+	its_entry = (struct acpi_madt_generic_translator *)header;
+	memset(&res, 0, sizeof(res));
+	res.start = its_entry->base_address;
+	res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
+	res.flags = IORESOURCE_MEM;
+
+	dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
+	if (!dom_handle) {
+		pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
+		       &res.start);
+		return -ENOMEM;
+	}
+
+	err = iort_register_domain_token(its_entry->translation_id, res.start,
+					 dom_handle);
+	if (err) {
+		pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
+		       &res.start, its_entry->translation_id);
+		goto dom_err;
+	}
+
+	err = its_probe_one(&res, dom_handle,
+			acpi_get_its_numa_node(its_entry->translation_id));
+	if (!err)
+		return 0;
+
+	iort_deregister_domain_token(its_entry->translation_id);
+dom_err:
+	irq_domain_free_fwnode(dom_handle);
+	return err;
+}
+
+static void __init its_acpi_probe(void)
+{
+	acpi_table_parse_srat_its();
+	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+			      gic_acpi_parse_madt_its, 0);
+	acpi_its_srat_maps_free();
+}
+#else
+static void __init its_acpi_probe(void) { }
+#endif
+
+int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
+		    struct irq_domain *parent_domain)
+{
+	struct device_node *of_node;
+	struct its_node *its;
+	bool has_v4 = false;
+	int err;
+
+	its_parent = parent_domain;
+	of_node = to_of_node(handle);
+	if (of_node)
+		its_of_probe(of_node);
+	else
+		its_acpi_probe();
+
+	if (list_empty(&its_nodes)) {
+		pr_warn("ITS: No ITS available, not enabling LPIs\n");
+		return -ENXIO;
+	}
+
+	gic_rdists = rdists;
+	err = its_alloc_lpi_tables();
+	if (err)
+		return err;
+
+	list_for_each_entry(its, &its_nodes, entry)
+		has_v4 |= its->is_v4;
+
+	if (has_v4 & rdists->has_vlpis) {
+		if (its_init_vpe_domain() ||
+		    its_init_v4(parent_domain, &its_vpe_domain_ops)) {
+			rdists->has_vlpis = false;
+			pr_err("ITS: Disabling GICv4 support\n");
+		}
+	}
+
+	register_syscore_ops(&its_syscore_ops);
+
+	return 0;
+}
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
new file mode 100644
index 0000000..ad70e7c
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#define pr_fmt(fmt) "GICv3: " fmt
+
+#include <linux/dma-iommu.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+struct mbi_range {
+	u32			spi_start;
+	u32			nr_spis;
+	unsigned long		*bm;
+};
+
+static struct mutex		mbi_lock;
+static phys_addr_t		mbi_phys_base;
+static struct mbi_range		*mbi_ranges;
+static unsigned int		mbi_range_nr;
+
+static struct irq_chip mbi_irq_chip = {
+	.name			= "MBI",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static int mbi_irq_gic_domain_alloc(struct irq_domain *domain,
+				       unsigned int virq,
+				       irq_hw_number_t hwirq)
+{
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	int err;
+
+	/*
+	 * Using ACPI? There is no MBI support in the spec, you
+	 * shouldn't even be here.
+	 */
+	if (!is_of_node(domain->parent->fwnode))
+		return -EINVAL;
+
+	/*
+	 * Let's default to edge. This is consistent with traditional
+	 * MSIs, and systems requiring level signaling will just
+	 * enforce the trigger on their own.
+	 */
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = 0;
+	fwspec.param[1] = hwirq - 32;
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (err)
+		return err;
+
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq,
+			 int nr_irqs)
+{
+	mutex_lock(&mbi_lock);
+	bitmap_release_region(mbi->bm, hwirq - mbi->spi_start,
+			      get_count_order(nr_irqs));
+	mutex_unlock(&mbi_lock);
+}
+
+static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *args)
+{
+	struct mbi_range *mbi = NULL;
+	int hwirq, offset, i, err = 0;
+
+	mutex_lock(&mbi_lock);
+	for (i = 0; i < mbi_range_nr; i++) {
+		offset = bitmap_find_free_region(mbi_ranges[i].bm,
+						 mbi_ranges[i].nr_spis,
+						 get_count_order(nr_irqs));
+		if (offset >= 0) {
+			mbi = &mbi_ranges[i];
+			break;
+		}
+	}
+	mutex_unlock(&mbi_lock);
+
+	if (!mbi)
+		return -ENOSPC;
+
+	hwirq = mbi->spi_start + offset;
+
+	for (i = 0; i < nr_irqs; i++) {
+		err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+		if (err)
+			goto fail;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &mbi_irq_chip, mbi);
+	}
+
+	return 0;
+
+fail:
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+	mbi_free_msi(mbi, hwirq, nr_irqs);
+	return err;
+}
+
+static void mbi_irq_domain_free(struct irq_domain *domain,
+				unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct mbi_range *mbi = irq_data_get_irq_chip_data(d);
+
+	mbi_free_msi(mbi, d->hwirq, nr_irqs);
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mbi_domain_ops = {
+	.alloc			= mbi_irq_domain_alloc,
+	.free			= mbi_irq_domain_free,
+};
+
+static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	msg[0].address_hi = upper_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
+	msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
+	msg[0].data = data->parent_data->hwirq;
+
+	iommu_dma_map_msi_msg(data->irq, msg);
+}
+
+#ifdef CONFIG_PCI_MSI
+/* PCI-specific irqchip */
+static void mbi_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void mbi_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip mbi_msi_irq_chip = {
+	.name			= "MSI",
+	.irq_mask		= mbi_mask_msi_irq,
+	.irq_unmask		= mbi_unmask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_compose_msi_msg	= mbi_compose_msi_msg,
+	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+};
+
+static struct msi_domain_info mbi_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+	.chip	= &mbi_msi_irq_chip,
+};
+
+static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
+				   struct irq_domain **pci_domain)
+{
+	*pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode,
+						&mbi_msi_domain_info,
+						nexus_domain);
+	if (!*pci_domain)
+		return -ENOMEM;
+
+	return 0;
+}
+#else
+static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
+				   struct irq_domain **pci_domain)
+{
+	*pci_domain = NULL;
+	return 0;
+}
+#endif
+
+static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	mbi_compose_msi_msg(data, msg);
+
+	msg[1].address_hi = upper_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
+	msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
+	msg[1].data = data->parent_data->hwirq;
+
+	iommu_dma_map_msi_msg(data->irq, &msg[1]);
+}
+
+/* Platform-MSI specific irqchip */
+static struct irq_chip mbi_pmsi_irq_chip = {
+	.name			= "pMSI",
+	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_compose_msi_msg	= mbi_compose_mbi_msg,
+	.flags			= IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static struct msi_domain_ops mbi_pmsi_ops = {
+};
+
+static struct msi_domain_info mbi_pmsi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_LEVEL_CAPABLE),
+	.ops	= &mbi_pmsi_ops,
+	.chip	= &mbi_pmsi_irq_chip,
+};
+
+static int mbi_allocate_domains(struct irq_domain *parent)
+{
+	struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
+	int err;
+
+	nexus_domain = irq_domain_create_tree(parent->fwnode,
+					      &mbi_domain_ops, NULL);
+	if (!nexus_domain)
+		return -ENOMEM;
+
+	irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
+	nexus_domain->parent = parent;
+
+	err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
+
+	plat_domain = platform_msi_create_irq_domain(parent->fwnode,
+						     &mbi_pmsi_domain_info,
+						     nexus_domain);
+
+	if (err || !plat_domain) {
+		if (plat_domain)
+			irq_domain_remove(plat_domain);
+		if (pci_domain)
+			irq_domain_remove(pci_domain);
+		irq_domain_remove(nexus_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
+{
+	struct device_node *np;
+	const __be32 *reg;
+	int ret, n;
+
+	np = to_of_node(fwnode);
+
+	if (!of_property_read_bool(np, "msi-controller"))
+		return 0;
+
+	n = of_property_count_elems_of_size(np, "mbi-ranges", sizeof(u32));
+	if (n <= 0 || n % 2)
+		return -EINVAL;
+
+	mbi_range_nr = n / 2;
+	mbi_ranges = kcalloc(mbi_range_nr, sizeof(*mbi_ranges), GFP_KERNEL);
+	if (!mbi_ranges)
+		return -ENOMEM;
+
+	for (n = 0; n < mbi_range_nr; n++) {
+		ret = of_property_read_u32_index(np, "mbi-ranges", n * 2,
+						 &mbi_ranges[n].spi_start);
+		if (ret)
+			goto err_free_mbi;
+		ret = of_property_read_u32_index(np, "mbi-ranges", n * 2 + 1,
+						 &mbi_ranges[n].nr_spis);
+		if (ret)
+			goto err_free_mbi;
+
+		mbi_ranges[n].bm = kcalloc(BITS_TO_LONGS(mbi_ranges[n].nr_spis),
+					   sizeof(long), GFP_KERNEL);
+		if (!mbi_ranges[n].bm) {
+			ret = -ENOMEM;
+			goto err_free_mbi;
+		}
+		pr_info("MBI range [%d:%d]\n", mbi_ranges[n].spi_start,
+			mbi_ranges[n].spi_start + mbi_ranges[n].nr_spis - 1);
+	}
+
+	reg = of_get_property(np, "mbi-alias", NULL);
+	if (reg) {
+		mbi_phys_base = of_translate_address(np, reg);
+		if (mbi_phys_base == OF_BAD_ADDR) {
+			ret = -ENXIO;
+			goto err_free_mbi;
+		}
+	} else {
+		struct resource res;
+
+		if (of_address_to_resource(np, 0, &res)) {
+			ret = -ENXIO;
+			goto err_free_mbi;
+		}
+
+		mbi_phys_base = res.start;
+	}
+
+	pr_info("Using MBI frame %pa\n", &mbi_phys_base);
+
+	ret = mbi_allocate_domains(parent);
+	if (ret)
+		goto err_free_mbi;
+
+	return 0;
+
+err_free_mbi:
+	if (mbi_ranges) {
+		for (n = 0; n < mbi_range_nr; n++)
+			kfree(mbi_ranges[n].bm);
+		kfree(mbi_ranges);
+	}
+
+	return ret;
+}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644
index 0000000..d5912f1
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -0,0 +1,1654 @@
+/*
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"GICv3: " fmt
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-common.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/virt.h>
+
+#include "irq-gic-common.h"
+
+struct redist_region {
+	void __iomem		*redist_base;
+	phys_addr_t		phys_base;
+	bool			single_redist;
+};
+
+struct gic_chip_data {
+	struct fwnode_handle	*fwnode;
+	void __iomem		*dist_base;
+	struct redist_region	*redist_regions;
+	struct rdists		rdists;
+	struct irq_domain	*domain;
+	u64			redist_stride;
+	u32			nr_redist_regions;
+	bool			has_rss;
+	unsigned int		irq_nr;
+	struct partition_desc	*ppi_descs[16];
+};
+
+static struct gic_chip_data gic_data __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+
+static struct gic_kvm_info gic_v3_kvm_info;
+static DEFINE_PER_CPU(bool, has_rss);
+
+#define MPIDR_RS(mpidr)			(((mpidr) & 0xF0UL) >> 4)
+#define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
+#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
+#define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
+
+/* Our default, arbitrary priority value. Linux only uses one anyway. */
+#define DEFAULT_PMR_VALUE	0xf0
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
+static inline int gic_irq_in_rdist(struct irq_data *d)
+{
+	return gic_irq(d) < 32;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+	if (gic_irq_in_rdist(d))	/* SGI+PPI -> SGI_base for this CPU */
+		return gic_data_rdist_sgi_base();
+
+	if (d->hwirq <= 1023)		/* SPI -> dist_base */
+		return gic_data.dist_base;
+
+	return NULL;
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base)
+{
+	u32 count = 1000000;	/* 1s! */
+
+	while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+		count--;
+		if (!count) {
+			pr_err_ratelimited("RWP timeout, gone fishing\n");
+			return;
+		}
+		cpu_relax();
+		udelay(1);
+	};
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+	gic_do_wait_for_rwp(gic_data.dist_base);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+	gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+#ifdef CONFIG_ARM64
+
+static u64 __maybe_unused gic_read_iar(void)
+{
+	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
+		return gic_read_iar_cavium_thunderx();
+	else
+		return gic_read_iar_common();
+}
+#endif
+
+static void gic_enable_redist(bool enable)
+{
+	void __iomem *rbase;
+	u32 count = 1000000;	/* 1s! */
+	u32 val;
+
+	rbase = gic_data_rdist_rd_base();
+
+	val = readl_relaxed(rbase + GICR_WAKER);
+	if (enable)
+		/* Wake up this CPU redistributor */
+		val &= ~GICR_WAKER_ProcessorSleep;
+	else
+		val |= GICR_WAKER_ProcessorSleep;
+	writel_relaxed(val, rbase + GICR_WAKER);
+
+	if (!enable) {		/* Check that GICR_WAKER is writeable */
+		val = readl_relaxed(rbase + GICR_WAKER);
+		if (!(val & GICR_WAKER_ProcessorSleep))
+			return;	/* No PM support in this redistributor */
+	}
+
+	while (--count) {
+		val = readl_relaxed(rbase + GICR_WAKER);
+		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
+			break;
+		cpu_relax();
+		udelay(1);
+	};
+	if (!count)
+		pr_err_ratelimited("redistributor failed to %s...\n",
+				   enable ? "wakeup" : "sleep");
+}
+
+/*
+ * Routines to disable, enable, EOI and route interrupts
+ */
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	void __iomem *base;
+
+	if (gic_irq_in_rdist(d))
+		base = gic_data_rdist_sgi_base();
+	else
+		base = gic_data.dist_base;
+
+	return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	void (*rwp_wait)(void);
+	void __iomem *base;
+
+	if (gic_irq_in_rdist(d)) {
+		base = gic_data_rdist_sgi_base();
+		rwp_wait = gic_redist_wait_for_rwp;
+	} else {
+		base = gic_data.dist_base;
+		rwp_wait = gic_dist_wait_for_rwp;
+	}
+
+	writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+	rwp_wait();
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GICD_ICENABLER);
+}
+
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+	gic_mask_irq(d);
+	/*
+	 * When masking a forwarded interrupt, make sure it is
+	 * deactivated as well.
+	 *
+	 * This ensures that an interrupt that is getting
+	 * disabled/masked will not get "stuck", because there is
+	 * noone to deactivate it (guest is being terminated).
+	 */
+	if (irqd_is_forwarded_to_vcpu(d))
+		gic_poke_irq(d, GICD_ICACTIVER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static int gic_irq_set_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which, bool val)
+{
+	u32 reg;
+
+	if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
+		return -EINVAL;
+
+	switch (which) {
+	case IRQCHIP_STATE_PENDING:
+		reg = val ? GICD_ISPENDR : GICD_ICPENDR;
+		break;
+
+	case IRQCHIP_STATE_ACTIVE:
+		reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
+		break;
+
+	case IRQCHIP_STATE_MASKED:
+		reg = val ? GICD_ICENABLER : GICD_ISENABLER;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	gic_poke_irq(d, reg);
+	return 0;
+}
+
+static int gic_irq_get_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which, bool *val)
+{
+	if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
+		return -EINVAL;
+
+	switch (which) {
+	case IRQCHIP_STATE_PENDING:
+		*val = gic_peek_irq(d, GICD_ISPENDR);
+		break;
+
+	case IRQCHIP_STATE_ACTIVE:
+		*val = gic_peek_irq(d, GICD_ISACTIVER);
+		break;
+
+	case IRQCHIP_STATE_MASKED:
+		*val = !gic_peek_irq(d, GICD_ISENABLER);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+	gic_write_eoir(gic_irq(d));
+}
+
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+	/*
+	 * No need to deactivate an LPI, or an interrupt that
+	 * is is getting forwarded to a vcpu.
+	 */
+	if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
+		return;
+	gic_write_dir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+	unsigned int irq = gic_irq(d);
+	void (*rwp_wait)(void);
+	void __iomem *base;
+
+	/* Interrupt configuration for SGIs can't be changed */
+	if (irq < 16)
+		return -EINVAL;
+
+	/* SPIs have restrictions on the supported types */
+	if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+			 type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	if (gic_irq_in_rdist(d)) {
+		base = gic_data_rdist_sgi_base();
+		rwp_wait = gic_redist_wait_for_rwp;
+	} else {
+		base = gic_data.dist_base;
+		rwp_wait = gic_dist_wait_for_rwp;
+	}
+
+	return gic_configure_irq(irq, type, base, rwp_wait);
+}
+
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+	if (vcpu)
+		irqd_set_forwarded_to_vcpu(d);
+	else
+		irqd_clr_forwarded_to_vcpu(d);
+	return 0;
+}
+
+static u64 gic_mpidr_to_affinity(unsigned long mpidr)
+{
+	u64 aff;
+
+	aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+	return aff;
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+	u32 irqnr;
+
+	do {
+		irqnr = gic_read_iar();
+
+		if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+			int err;
+
+			if (static_branch_likely(&supports_deactivate_key))
+				gic_write_eoir(irqnr);
+			else
+				isb();
+
+			err = handle_domain_irq(gic_data.domain, irqnr, regs);
+			if (err) {
+				WARN_ONCE(true, "Unexpected interrupt received!\n");
+				if (static_branch_likely(&supports_deactivate_key)) {
+					if (irqnr < 8192)
+						gic_write_dir(irqnr);
+				} else {
+					gic_write_eoir(irqnr);
+				}
+			}
+			continue;
+		}
+		if (irqnr < 16) {
+			gic_write_eoir(irqnr);
+			if (static_branch_likely(&supports_deactivate_key))
+				gic_write_dir(irqnr);
+#ifdef CONFIG_SMP
+			/*
+			 * Unlike GICv2, we don't need an smp_rmb() here.
+			 * The control dependency from gic_read_iar to
+			 * the ISB in gic_write_eoir is enough to ensure
+			 * that any shared data read by handle_IPI will
+			 * be read after the ACK.
+			 */
+			handle_IPI(irqnr, regs);
+#else
+			WARN_ONCE(true, "Unexpected SGI received!\n");
+#endif
+			continue;
+		}
+	} while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+}
+
+static void __init gic_dist_init(void)
+{
+	unsigned int i;
+	u64 affinity;
+	void __iomem *base = gic_data.dist_base;
+
+	/* Disable the distributor */
+	writel_relaxed(0, base + GICD_CTLR);
+	gic_dist_wait_for_rwp();
+
+	/*
+	 * Configure SPIs as non-secure Group-1. This will only matter
+	 * if the GIC only has a single security state. This will not
+	 * do the right thing if the kernel is running in secure mode,
+	 * but that's not the intended use case anyway.
+	 */
+	for (i = 32; i < gic_data.irq_nr; i += 32)
+		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
+
+	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+
+	/* Enable distributor with ARE, Group1 */
+	writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
+		       base + GICD_CTLR);
+
+	/*
+	 * Set all global interrupts to the boot CPU only. ARE must be
+	 * enabled.
+	 */
+	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+	for (i = 32; i < gic_data.irq_nr; i++)
+		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+}
+
+static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
+{
+	int ret = -ENODEV;
+	int i;
+
+	for (i = 0; i < gic_data.nr_redist_regions; i++) {
+		void __iomem *ptr = gic_data.redist_regions[i].redist_base;
+		u64 typer;
+		u32 reg;
+
+		reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+		if (reg != GIC_PIDR2_ARCH_GICv3 &&
+		    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
+			pr_warn("No redistributor present @%p\n", ptr);
+			break;
+		}
+
+		do {
+			typer = gic_read_typer(ptr + GICR_TYPER);
+			ret = fn(gic_data.redist_regions + i, ptr);
+			if (!ret)
+				return 0;
+
+			if (gic_data.redist_regions[i].single_redist)
+				break;
+
+			if (gic_data.redist_stride) {
+				ptr += gic_data.redist_stride;
+			} else {
+				ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+				if (typer & GICR_TYPER_VLPIS)
+					ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+			}
+		} while (!(typer & GICR_TYPER_LAST));
+	}
+
+	return ret ? -ENODEV : 0;
+}
+
+static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
+{
+	unsigned long mpidr = cpu_logical_map(smp_processor_id());
+	u64 typer;
+	u32 aff;
+
+	/*
+	 * Convert affinity to a 32bit value that can be matched to
+	 * GICR_TYPER bits [63:32].
+	 */
+	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+	typer = gic_read_typer(ptr + GICR_TYPER);
+	if ((typer >> 32) == aff) {
+		u64 offset = ptr - region->redist_base;
+		gic_data_rdist_rd_base() = ptr;
+		gic_data_rdist()->phys_base = region->phys_base + offset;
+
+		pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
+			smp_processor_id(), mpidr,
+			(int)(region - gic_data.redist_regions),
+			&gic_data_rdist()->phys_base);
+		return 0;
+	}
+
+	/* Try next one */
+	return 1;
+}
+
+static int gic_populate_rdist(void)
+{
+	if (gic_iterate_rdists(__gic_populate_rdist) == 0)
+		return 0;
+
+	/* We couldn't even deal with ourselves... */
+	WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
+	     smp_processor_id(),
+	     (unsigned long)cpu_logical_map(smp_processor_id()));
+	return -ENODEV;
+}
+
+static int __gic_update_vlpi_properties(struct redist_region *region,
+					void __iomem *ptr)
+{
+	u64 typer = gic_read_typer(ptr + GICR_TYPER);
+	gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
+	gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+	return 1;
+}
+
+static void gic_update_vlpi_properties(void)
+{
+	gic_iterate_rdists(__gic_update_vlpi_properties);
+	pr_info("%sVLPI support, %sdirect LPI support\n",
+		!gic_data.rdists.has_vlpis ? "no " : "",
+		!gic_data.rdists.has_direct_lpi ? "no " : "");
+}
+
+static void gic_cpu_sys_reg_init(void)
+{
+	int i, cpu = smp_processor_id();
+	u64 mpidr = cpu_logical_map(cpu);
+	u64 need_rss = MPIDR_RS(mpidr);
+	bool group0;
+	u32 val, pribits;
+
+	/*
+	 * Need to check that the SRE bit has actually been set. If
+	 * not, it means that SRE is disabled at EL2. We're going to
+	 * die painfully, and there is nothing we can do about it.
+	 *
+	 * Kindly inform the luser.
+	 */
+	if (!gic_enable_sre())
+		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+
+	pribits = gic_read_ctlr();
+	pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
+	pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
+	pribits++;
+
+	/*
+	 * Let's find out if Group0 is under control of EL3 or not by
+	 * setting the highest possible, non-zero priority in PMR.
+	 *
+	 * If SCR_EL3.FIQ is set, the priority gets shifted down in
+	 * order for the CPU interface to set bit 7, and keep the
+	 * actual priority in the non-secure range. In the process, it
+	 * looses the least significant bit and the actual priority
+	 * becomes 0x80. Reading it back returns 0, indicating that
+	 * we're don't have access to Group0.
+	 */
+	write_gicreg(BIT(8 - pribits), ICC_PMR_EL1);
+	val = read_gicreg(ICC_PMR_EL1);
+	group0 = val != 0;
+
+	/* Set priority mask register */
+	write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
+
+	/*
+	 * Some firmwares hand over to the kernel with the BPR changed from
+	 * its reset value (and with a value large enough to prevent
+	 * any pre-emptive interrupts from working at all). Writing a zero
+	 * to BPR restores is reset value.
+	 */
+	gic_write_bpr1(0);
+
+	if (static_branch_likely(&supports_deactivate_key)) {
+		/* EOI drops priority only (mode 1) */
+		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
+	} else {
+		/* EOI deactivates interrupt too (mode 0) */
+		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+	}
+
+	/* Always whack Group0 before Group1 */
+	if (group0) {
+		switch(pribits) {
+		case 8:
+		case 7:
+			write_gicreg(0, ICC_AP0R3_EL1);
+			write_gicreg(0, ICC_AP0R2_EL1);
+		case 6:
+			write_gicreg(0, ICC_AP0R1_EL1);
+		case 5:
+		case 4:
+			write_gicreg(0, ICC_AP0R0_EL1);
+		}
+
+		isb();
+	}
+
+	switch(pribits) {
+	case 8:
+	case 7:
+		write_gicreg(0, ICC_AP1R3_EL1);
+		write_gicreg(0, ICC_AP1R2_EL1);
+	case 6:
+		write_gicreg(0, ICC_AP1R1_EL1);
+	case 5:
+	case 4:
+		write_gicreg(0, ICC_AP1R0_EL1);
+	}
+
+	isb();
+
+	/* ... and let's hit the road... */
+	gic_write_grpen1(1);
+
+	/* Keep the RSS capability status in per_cpu variable */
+	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
+
+	/* Check all the CPUs have capable of sending SGIs to other CPUs */
+	for_each_online_cpu(i) {
+		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
+
+		need_rss |= MPIDR_RS(cpu_logical_map(i));
+		if (need_rss && (!have_rss))
+			pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
+				cpu, (unsigned long)mpidr,
+				i, (unsigned long)cpu_logical_map(i));
+	}
+
+	/**
+	 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
+	 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
+	 * UNPREDICTABLE choice of :
+	 *   - The write is ignored.
+	 *   - The RS field is treated as 0.
+	 */
+	if (need_rss && (!gic_data.has_rss))
+		pr_crit_once("RSS is required but GICD doesn't support it\n");
+}
+
+static bool gicv3_nolpi;
+
+static int __init gicv3_nolpi_cfg(char *buf)
+{
+	return strtobool(buf, &gicv3_nolpi);
+}
+early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
+
+static int gic_dist_supports_lpis(void)
+{
+	return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
+}
+
+static void gic_cpu_init(void)
+{
+	void __iomem *rbase;
+
+	/* Register ourselves with the rest of the world */
+	if (gic_populate_rdist())
+		return;
+
+	gic_enable_redist(true);
+
+	rbase = gic_data_rdist_sgi_base();
+
+	/* Configure SGIs/PPIs as non-secure Group-1 */
+	writel_relaxed(~0, rbase + GICR_IGROUPR0);
+
+	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
+	/* Give LPIs a spin */
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+		its_cpu_init();
+
+	/* initialise system registers */
+	gic_cpu_sys_reg_init();
+}
+
+#ifdef CONFIG_SMP
+
+#define MPIDR_TO_SGI_RS(mpidr)	(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
+#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+	gic_cpu_init();
+	return 0;
+}
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+				   unsigned long cluster_id)
+{
+	int next_cpu, cpu = *base_cpu;
+	unsigned long mpidr = cpu_logical_map(cpu);
+	u16 tlist = 0;
+
+	while (cpu < nr_cpu_ids) {
+		tlist |= 1 << (mpidr & 0xf);
+
+		next_cpu = cpumask_next(cpu, mask);
+		if (next_cpu >= nr_cpu_ids)
+			goto out;
+		cpu = next_cpu;
+
+		mpidr = cpu_logical_map(cpu);
+
+		if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
+			cpu--;
+			goto out;
+		}
+	}
+out:
+	*base_cpu = cpu;
+	return tlist;
+}
+
+#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
+	(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
+		<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+	u64 val;
+
+	val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)	|
+	       MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
+	       irq << ICC_SGI1R_SGI_ID_SHIFT		|
+	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
+	       MPIDR_TO_SGI_RS(cluster_id)		|
+	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
+
+	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+	gic_write_sgi1r(val);
+}
+
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+	int cpu;
+
+	if (WARN_ON(irq >= 16))
+		return;
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	wmb();
+
+	for_each_cpu(cpu, mask) {
+		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
+		u16 tlist;
+
+		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+		gic_send_sgi(cluster_id, tlist, irq);
+	}
+
+	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
+	isb();
+}
+
+static void gic_smp_init(void)
+{
+	set_smp_cross_call(gic_raise_softirq);
+	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+				  "irqchip/arm/gicv3:starting",
+				  gic_starting_cpu, NULL);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	unsigned int cpu;
+	void __iomem *reg;
+	int enabled;
+	u64 val;
+
+	if (force)
+		cpu = cpumask_first(mask_val);
+	else
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	if (gic_irq_in_rdist(d))
+		return -EINVAL;
+
+	/* If interrupt was enabled, disable it first */
+	enabled = gic_peek_irq(d, GICD_ISENABLER);
+	if (enabled)
+		gic_mask_irq(d);
+
+	reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+	gic_write_irouter(val, reg);
+
+	/*
+	 * If the interrupt was enabled, enabled it again. Otherwise,
+	 * just wait for the distributor to have digested our changes.
+	 */
+	if (enabled)
+		gic_unmask_irq(d);
+	else
+		gic_dist_wait_for_rwp();
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+#else
+#define gic_set_affinity	NULL
+#define gic_smp_init()		do { } while(0)
+#endif
+
+#ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+	return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
+static int gic_cpu_pm_notifier(struct notifier_block *self,
+			       unsigned long cmd, void *v)
+{
+	if (cmd == CPU_PM_EXIT) {
+		if (gic_dist_security_disabled())
+			gic_enable_redist(true);
+		gic_cpu_sys_reg_init();
+	} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
+		gic_write_grpen1(0);
+		gic_enable_redist(false);
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block gic_cpu_pm_notifier_block = {
+	.notifier_call = gic_cpu_pm_notifier,
+};
+
+static void gic_cpu_pm_init(void)
+{
+	cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
+}
+
+#else
+static inline void gic_cpu_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+static struct irq_chip gic_chip = {
+	.name			= "GICv3",
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_set_affinity	= gic_set_affinity,
+	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static struct irq_chip gic_eoimode1_chip = {
+	.name			= "GICv3",
+	.irq_mask		= gic_eoimode1_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoimode1_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_set_affinity	= gic_set_affinity,
+	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
+	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
+#define GIC_ID_NR	(1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			      irq_hw_number_t hw)
+{
+	struct irq_chip *chip = &gic_chip;
+
+	if (static_branch_likely(&supports_deactivate_key))
+		chip = &gic_eoimode1_chip;
+
+	/* SGIs are private to the core kernel */
+	if (hw < 16)
+		return -EPERM;
+	/* Nothing here */
+	if (hw >= gic_data.irq_nr && hw < 8192)
+		return -EPERM;
+	/* Off limits */
+	if (hw >= GIC_ID_NR)
+		return -EPERM;
+
+	/* PPIs */
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
+				    handle_percpu_devid_irq, NULL, NULL);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	}
+	/* SPIs */
+	if (hw >= 32 && hw < gic_data.irq_nr) {
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
+				    handle_fasteoi_irq, NULL, NULL);
+		irq_set_probe(irq);
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+	}
+	/* LPIs */
+	if (hw >= 8192 && hw < GIC_ID_NR) {
+		if (!gic_dist_supports_lpis())
+			return -EPERM;
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
+				    handle_fasteoi_irq, NULL, NULL);
+	}
+
+	return 0;
+}
+
+#define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
+
+static int gic_irq_domain_translate(struct irq_domain *d,
+				    struct irq_fwspec *fwspec,
+				    unsigned long *hwirq,
+				    unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count < 3)
+			return -EINVAL;
+
+		switch (fwspec->param[0]) {
+		case 0:			/* SPI */
+			*hwirq = fwspec->param[1] + 32;
+			break;
+		case 1:			/* PPI */
+		case GIC_IRQ_TYPE_PARTITION:
+			*hwirq = fwspec->param[1] + 16;
+			break;
+		case GIC_IRQ_TYPE_LPI:	/* LPI */
+			*hwirq = fwspec->param[1];
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+		/*
+		 * Make it clear that broken DTs are... broken.
+		 * Partitionned PPIs are an unfortunate exception.
+		 */
+		WARN_ON(*type == IRQ_TYPE_NONE &&
+			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
+		return 0;
+	}
+
+	if (is_fwnode_irqchip(fwspec->fwnode)) {
+		if(fwspec->param_count != 2)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+
+		WARN_ON(*type == IRQ_TYPE_NONE);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	int i, ret;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	struct irq_fwspec *fwspec = arg;
+
+	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nr_irqs; i++) {
+		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs)
+{
+	int i;
+
+	for (i = 0; i < nr_irqs; i++) {
+		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+		irq_set_handler(virq + i, NULL);
+		irq_domain_reset_irq_data(d);
+	}
+}
+
+static int gic_irq_domain_select(struct irq_domain *d,
+				 struct irq_fwspec *fwspec,
+				 enum irq_domain_bus_token bus_token)
+{
+	/* Not for us */
+        if (fwspec->fwnode != d->fwnode)
+		return 0;
+
+	/* If this is not DT, then we have a single domain */
+	if (!is_of_node(fwspec->fwnode))
+		return 1;
+
+	/*
+	 * If this is a PPI and we have a 4th (non-null) parameter,
+	 * then we need to match the partition domain.
+	 */
+	if (fwspec->param_count >= 4 &&
+	    fwspec->param[0] == 1 && fwspec->param[3] != 0)
+		return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
+
+	return d == gic_data.domain;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+	.translate = gic_irq_domain_translate,
+	.alloc = gic_irq_domain_alloc,
+	.free = gic_irq_domain_free,
+	.select = gic_irq_domain_select,
+};
+
+static int partition_domain_translate(struct irq_domain *d,
+				      struct irq_fwspec *fwspec,
+				      unsigned long *hwirq,
+				      unsigned int *type)
+{
+	struct device_node *np;
+	int ret;
+
+	np = of_find_node_by_phandle(fwspec->param[3]);
+	if (WARN_ON(!np))
+		return -EINVAL;
+
+	ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+				     of_node_to_fwnode(np));
+	if (ret < 0)
+		return ret;
+
+	*hwirq = ret;
+	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static const struct irq_domain_ops partition_domain_ops = {
+	.translate = partition_domain_translate,
+	.select = gic_irq_domain_select,
+};
+
+static int __init gic_init_bases(void __iomem *dist_base,
+				 struct redist_region *rdist_regs,
+				 u32 nr_redist_regions,
+				 u64 redist_stride,
+				 struct fwnode_handle *handle)
+{
+	u32 typer;
+	int gic_irqs;
+	int err;
+
+	if (!is_hyp_mode_available())
+		static_branch_disable(&supports_deactivate_key);
+
+	if (static_branch_likely(&supports_deactivate_key))
+		pr_info("GIC: Using split EOI/Deactivate mode\n");
+
+	gic_data.fwnode = handle;
+	gic_data.dist_base = dist_base;
+	gic_data.redist_regions = rdist_regs;
+	gic_data.nr_redist_regions = nr_redist_regions;
+	gic_data.redist_stride = redist_stride;
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+	 */
+	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+	gic_data.rdists.gicd_typer = typer;
+	gic_irqs = GICD_TYPER_IRQS(typer);
+	if (gic_irqs > 1020)
+		gic_irqs = 1020;
+	gic_data.irq_nr = gic_irqs;
+
+	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+						 &gic_data);
+	irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
+	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+	gic_data.rdists.has_vlpis = true;
+	gic_data.rdists.has_direct_lpi = true;
+
+	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
+	pr_info("Distributor has %sRange Selector support\n",
+		gic_data.has_rss ? "" : "no ");
+
+	if (typer & GICD_TYPER_MBIS) {
+		err = mbi_init(handle, gic_data.domain);
+		if (err)
+			pr_err("Failed to initialize MBIs\n");
+	}
+
+	set_handle_irq(gic_handle_irq);
+
+	gic_update_vlpi_properties();
+
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+		its_init(handle, &gic_data.rdists, gic_data.domain);
+
+	gic_smp_init();
+	gic_dist_init();
+	gic_cpu_init();
+	gic_cpu_pm_init();
+
+	return 0;
+
+out_free:
+	if (gic_data.domain)
+		irq_domain_remove(gic_data.domain);
+	free_percpu(gic_data.rdists.rdist);
+	return err;
+}
+
+static int __init gic_validate_dist_version(void __iomem *dist_base)
+{
+	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+
+	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
+		return -ENODEV;
+
+	return 0;
+}
+
+/* Create all possible partitions at boot time */
+static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
+{
+	struct device_node *parts_node, *child_part;
+	int part_idx = 0, i;
+	int nr_parts;
+	struct partition_affinity *parts;
+
+	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
+	if (!parts_node)
+		return;
+
+	nr_parts = of_get_child_count(parts_node);
+
+	if (!nr_parts)
+		goto out_put_node;
+
+	parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
+	if (WARN_ON(!parts))
+		goto out_put_node;
+
+	for_each_child_of_node(parts_node, child_part) {
+		struct partition_affinity *part;
+		int n;
+
+		part = &parts[part_idx];
+
+		part->partition_id = of_node_to_fwnode(child_part);
+
+		pr_info("GIC: PPI partition %s[%d] { ",
+			child_part->name, part_idx);
+
+		n = of_property_count_elems_of_size(child_part, "affinity",
+						    sizeof(u32));
+		WARN_ON(n <= 0);
+
+		for (i = 0; i < n; i++) {
+			int err, cpu;
+			u32 cpu_phandle;
+			struct device_node *cpu_node;
+
+			err = of_property_read_u32_index(child_part, "affinity",
+							 i, &cpu_phandle);
+			if (WARN_ON(err))
+				continue;
+
+			cpu_node = of_find_node_by_phandle(cpu_phandle);
+			if (WARN_ON(!cpu_node))
+				continue;
+
+			cpu = of_cpu_node_to_id(cpu_node);
+			if (WARN_ON(cpu < 0))
+				continue;
+
+			pr_cont("%pOF[%d] ", cpu_node, cpu);
+
+			cpumask_set_cpu(cpu, &part->mask);
+		}
+
+		pr_cont("}\n");
+		part_idx++;
+	}
+
+	for (i = 0; i < 16; i++) {
+		unsigned int irq;
+		struct partition_desc *desc;
+		struct irq_fwspec ppi_fwspec = {
+			.fwnode		= gic_data.fwnode,
+			.param_count	= 3,
+			.param		= {
+				[0]	= GIC_IRQ_TYPE_PARTITION,
+				[1]	= i,
+				[2]	= IRQ_TYPE_NONE,
+			},
+		};
+
+		irq = irq_create_fwspec_mapping(&ppi_fwspec);
+		if (WARN_ON(!irq))
+			continue;
+		desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
+					     irq, &partition_domain_ops);
+		if (WARN_ON(!desc))
+			continue;
+
+		gic_data.ppi_descs[i] = desc;
+	}
+
+out_put_node:
+	of_node_put(parts_node);
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+	int ret;
+	struct resource r;
+	u32 gicv_idx;
+
+	gic_v3_kvm_info.type = GIC_V3;
+
+	gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+	if (!gic_v3_kvm_info.maint_irq)
+		return;
+
+	if (of_property_read_u32(node, "#redistributor-regions",
+				 &gicv_idx))
+		gicv_idx = 1;
+
+	gicv_idx += 3;	/* Also skip GICD, GICC, GICH */
+	ret = of_address_to_resource(node, gicv_idx, &r);
+	if (!ret)
+		gic_v3_kvm_info.vcpu = r;
+
+	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+	gic_set_kvm_info(&gic_v3_kvm_info);
+}
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+	void __iomem *dist_base;
+	struct redist_region *rdist_regs;
+	u64 redist_stride;
+	u32 nr_redist_regions;
+	int err, i;
+
+	dist_base = of_iomap(node, 0);
+	if (!dist_base) {
+		pr_err("%pOF: unable to map gic dist registers\n", node);
+		return -ENXIO;
+	}
+
+	err = gic_validate_dist_version(dist_base);
+	if (err) {
+		pr_err("%pOF: no distributor detected, giving up\n", node);
+		goto out_unmap_dist;
+	}
+
+	if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
+		nr_redist_regions = 1;
+
+	rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
+			     GFP_KERNEL);
+	if (!rdist_regs) {
+		err = -ENOMEM;
+		goto out_unmap_dist;
+	}
+
+	for (i = 0; i < nr_redist_regions; i++) {
+		struct resource res;
+		int ret;
+
+		ret = of_address_to_resource(node, 1 + i, &res);
+		rdist_regs[i].redist_base = of_iomap(node, 1 + i);
+		if (ret || !rdist_regs[i].redist_base) {
+			pr_err("%pOF: couldn't map region %d\n", node, i);
+			err = -ENODEV;
+			goto out_unmap_rdist;
+		}
+		rdist_regs[i].phys_base = res.start;
+	}
+
+	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+		redist_stride = 0;
+
+	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
+			     redist_stride, &node->fwnode);
+	if (err)
+		goto out_unmap_rdist;
+
+	gic_populate_ppi_partitions(node);
+
+	if (static_branch_likely(&supports_deactivate_key))
+		gic_of_setup_kvm_info(node);
+	return 0;
+
+out_unmap_rdist:
+	for (i = 0; i < nr_redist_regions; i++)
+		if (rdist_regs[i].redist_base)
+			iounmap(rdist_regs[i].redist_base);
+	kfree(rdist_regs);
+out_unmap_dist:
+	iounmap(dist_base);
+	return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
+
+#ifdef CONFIG_ACPI
+static struct
+{
+	void __iomem *dist_base;
+	struct redist_region *redist_regs;
+	u32 nr_redist_regions;
+	bool single_redist;
+	u32 maint_irq;
+	int maint_irq_mode;
+	phys_addr_t vcpu_base;
+} acpi_data __initdata;
+
+static void __init
+gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
+{
+	static int count = 0;
+
+	acpi_data.redist_regs[count].phys_base = phys_base;
+	acpi_data.redist_regs[count].redist_base = redist_base;
+	acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
+	count++;
+}
+
+static int __init
+gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
+			   const unsigned long end)
+{
+	struct acpi_madt_generic_redistributor *redist =
+			(struct acpi_madt_generic_redistributor *)header;
+	void __iomem *redist_base;
+
+	redist_base = ioremap(redist->base_address, redist->length);
+	if (!redist_base) {
+		pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
+		return -ENOMEM;
+	}
+
+	gic_acpi_register_redist(redist->base_address, redist_base);
+	return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+			 const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+				(struct acpi_madt_generic_interrupt *)header;
+	u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+	void __iomem *redist_base;
+
+	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
+	redist_base = ioremap(gicc->gicr_base_address, size);
+	if (!redist_base)
+		return -ENOMEM;
+
+	gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
+	return 0;
+}
+
+static int __init gic_acpi_collect_gicr_base(void)
+{
+	acpi_tbl_entry_handler redist_parser;
+	enum acpi_madt_type type;
+
+	if (acpi_data.single_redist) {
+		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
+		redist_parser = gic_acpi_parse_madt_gicc;
+	} else {
+		type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
+		redist_parser = gic_acpi_parse_madt_redist;
+	}
+
+	/* Collect redistributor base addresses in GICR entries */
+	if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
+		return 0;
+
+	pr_info("No valid GICR entries exist\n");
+	return -ENODEV;
+}
+
+static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
+				  const unsigned long end)
+{
+	/* Subtable presence means that redist exists, that's it */
+	return 0;
+}
+
+static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+				      const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+				(struct acpi_madt_generic_interrupt *)header;
+
+	/*
+	 * If GICC is enabled and has valid gicr base address, then it means
+	 * GICR base is presented via GICC
+	 */
+	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+		return 0;
+
+	/*
+	 * It's perfectly valid firmware can pass disabled GICC entry, driver
+	 * should not treat as errors, skip the entry instead of probe fail.
+	 */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
+	return -ENODEV;
+}
+
+static int __init gic_acpi_count_gicr_regions(void)
+{
+	int count;
+
+	/*
+	 * Count how many redistributor regions we have. It is not allowed
+	 * to mix redistributor description, GICR and GICC subtables have to be
+	 * mutually exclusive.
+	 */
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+				      gic_acpi_match_gicr, 0);
+	if (count > 0) {
+		acpi_data.single_redist = false;
+		return count;
+	}
+
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+				      gic_acpi_match_gicc, 0);
+	if (count > 0)
+		acpi_data.single_redist = true;
+
+	return count;
+}
+
+static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
+					   struct acpi_probe_entry *ape)
+{
+	struct acpi_madt_generic_distributor *dist;
+	int count;
+
+	dist = (struct acpi_madt_generic_distributor *)header;
+	if (dist->version != ape->driver_data)
+		return false;
+
+	/* We need to do that exercise anyway, the sooner the better */
+	count = gic_acpi_count_gicr_regions();
+	if (count <= 0)
+		return false;
+
+	acpi_data.nr_redist_regions = count;
+	return true;
+}
+
+static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
+						const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+		(struct acpi_madt_generic_interrupt *)header;
+	int maint_irq_mode;
+	static int first_madt = true;
+
+	/* Skip unusable CPUs */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
+	maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+		ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+
+	if (first_madt) {
+		first_madt = false;
+
+		acpi_data.maint_irq = gicc->vgic_interrupt;
+		acpi_data.maint_irq_mode = maint_irq_mode;
+		acpi_data.vcpu_base = gicc->gicv_base_address;
+
+		return 0;
+	}
+
+	/*
+	 * The maintenance interrupt and GICV should be the same for every CPU
+	 */
+	if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
+	    (acpi_data.maint_irq_mode != maint_irq_mode) ||
+	    (acpi_data.vcpu_base != gicc->gicv_base_address))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool __init gic_acpi_collect_virt_info(void)
+{
+	int count;
+
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+				      gic_acpi_parse_virt_madt_gicc, 0);
+
+	return (count > 0);
+}
+
+#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+	int irq;
+
+	if (!gic_acpi_collect_virt_info()) {
+		pr_warn("Unable to get hardware information used for virtualization\n");
+		return;
+	}
+
+	gic_v3_kvm_info.type = GIC_V3;
+
+	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+				acpi_data.maint_irq_mode,
+				ACPI_ACTIVE_HIGH);
+	if (irq <= 0)
+		return;
+
+	gic_v3_kvm_info.maint_irq = irq;
+
+	if (acpi_data.vcpu_base) {
+		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
+
+		vcpu->flags = IORESOURCE_MEM;
+		vcpu->start = acpi_data.vcpu_base;
+		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+	}
+
+	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+	gic_set_kvm_info(&gic_v3_kvm_info);
+}
+
+static int __init
+gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
+{
+	struct acpi_madt_generic_distributor *dist;
+	struct fwnode_handle *domain_handle;
+	size_t size;
+	int i, err;
+
+	/* Get distributor base address */
+	dist = (struct acpi_madt_generic_distributor *)header;
+	acpi_data.dist_base = ioremap(dist->base_address,
+				      ACPI_GICV3_DIST_MEM_SIZE);
+	if (!acpi_data.dist_base) {
+		pr_err("Unable to map GICD registers\n");
+		return -ENOMEM;
+	}
+
+	err = gic_validate_dist_version(acpi_data.dist_base);
+	if (err) {
+		pr_err("No distributor detected at @%p, giving up\n",
+		       acpi_data.dist_base);
+		goto out_dist_unmap;
+	}
+
+	size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
+	acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
+	if (!acpi_data.redist_regs) {
+		err = -ENOMEM;
+		goto out_dist_unmap;
+	}
+
+	err = gic_acpi_collect_gicr_base();
+	if (err)
+		goto out_redist_unmap;
+
+	domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
+	if (!domain_handle) {
+		err = -ENOMEM;
+		goto out_redist_unmap;
+	}
+
+	err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
+			     acpi_data.nr_redist_regions, 0, domain_handle);
+	if (err)
+		goto out_fwhandle_free;
+
+	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+
+	if (static_branch_likely(&supports_deactivate_key))
+		gic_acpi_setup_kvm_info();
+
+	return 0;
+
+out_fwhandle_free:
+	irq_domain_free_fwnode(domain_handle);
+out_redist_unmap:
+	for (i = 0; i < acpi_data.nr_redist_regions; i++)
+		if (acpi_data.redist_regs[i].redist_base)
+			iounmap(acpi_data.redist_regs[i].redist_base);
+	kfree(acpi_data.redist_regs);
+out_dist_unmap:
+	iounmap(acpi_data.dist_base);
+	return err;
+}
+IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
+		     gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
+		     gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
+		     gic_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
new file mode 100644
index 0000000..dba9d67
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/sched.h>
+
+#include <linux/irqchip/arm-gic-v4.h>
+
+/*
+ * WARNING: The blurb below assumes that you understand the
+ * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
+ * translated into GICv4 commands. So it effectively targets at most
+ * two individuals. You know who you are.
+ *
+ * The core GICv4 code is designed to *avoid* exposing too much of the
+ * core GIC code (that would in turn leak into the hypervisor code),
+ * and instead provide a hypervisor agnostic interface to the HW (of
+ * course, the astute reader will quickly realize that hypervisor
+ * agnostic actually means KVM-specific - what were you thinking?).
+ *
+ * In order to achieve a modicum of isolation, we try to hide most of
+ * the GICv4 "stuff" behind normal irqchip operations:
+ *
+ * - Any guest-visible VLPI is backed by a Linux interrupt (and a
+ *   physical LPI which gets unmapped when the guest maps the
+ *   VLPI). This allows the same DevID/EventID pair to be either
+ *   mapped to the LPI (host) or the VLPI (guest). Note that this is
+ *   exclusive, and you cannot have both.
+ *
+ * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
+ *
+ * - Guest INT/CLEAR commands are implemented through
+ *   irq_set_irqchip_state().
+ *
+ * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
+ *   issuing an INV after changing a priority) gets shoved into the
+ *   irq_set_vcpu_affinity() method. While this is quite horrible
+ *   (let's face it, this is the irqchip version of an ioctl), it
+ *   confines the crap to a single location. And map/unmap really is
+ *   about setting the affinity of a VLPI to a vcpu, so only INV is
+ *   majorly out of place. So there.
+ *
+ * A number of commands are simply not provided by this interface, as
+ * they do not make direct sense. For example, MAPD is purely local to
+ * the virtual ITS (because it references a virtual device, and the
+ * physical ITS is still very much in charge of the physical
+ * device). Same goes for things like MAPC (the physical ITS deals
+ * with the actual vPE affinity, and not the braindead concept of
+ * collection). SYNC is not provided either, as each and every command
+ * is followed by a VSYNC. This could be relaxed in the future, should
+ * this be seen as a bottleneck (yes, this means *never*).
+ *
+ * But handling VLPIs is only one side of the job of the GICv4
+ * code. The other (darker) side is to take care of the doorbell
+ * interrupts which are delivered when a VLPI targeting a non-running
+ * vcpu is being made pending.
+ *
+ * The choice made here is that each vcpu (VPE in old northern GICv4
+ * dialect) gets a single doorbell LPI, no matter how many interrupts
+ * are targeting it. This has a nice property, which is that the
+ * interrupt becomes a handle for the VPE, and that the hypervisor
+ * code can manipulate it through the normal interrupt API:
+ *
+ * - VMs (or rather the VM abstraction that matters to the GIC)
+ *   contain an irq domain where each interrupt maps to a VPE. In
+ *   turn, this domain sits on top of the normal LPI allocator, and a
+ *   specially crafted irq_chip implementation.
+ *
+ * - mask/unmask do what is expected on the doorbell interrupt.
+ *
+ * - irq_set_affinity is used to move a VPE from one redistributor to
+ *   another.
+ *
+ * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
+ *   creating a new sub-API, namely scheduling/descheduling a VPE
+ *   (which involves programming GICR_V{PROP,PEND}BASER) and
+ *   performing INVALL operations.
+ */
+
+static struct irq_domain *gic_domain;
+static const struct irq_domain_ops *vpe_domain_ops;
+
+int its_alloc_vcpu_irqs(struct its_vm *vm)
+{
+	int vpe_base_irq, i;
+
+	vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
+						      task_pid_nr(current));
+	if (!vm->fwnode)
+		goto err;
+
+	vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
+						 vm->fwnode, vpe_domain_ops,
+						 vm);
+	if (!vm->domain)
+		goto err;
+
+	for (i = 0; i < vm->nr_vpes; i++) {
+		vm->vpes[i]->its_vm = vm;
+		vm->vpes[i]->idai = true;
+	}
+
+	vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
+					       NUMA_NO_NODE, vm,
+					       false, NULL);
+	if (vpe_base_irq <= 0)
+		goto err;
+
+	for (i = 0; i < vm->nr_vpes; i++)
+		vm->vpes[i]->irq = vpe_base_irq + i;
+
+	return 0;
+
+err:
+	if (vm->domain)
+		irq_domain_remove(vm->domain);
+	if (vm->fwnode)
+		irq_domain_free_fwnode(vm->fwnode);
+
+	return -ENOMEM;
+}
+
+void its_free_vcpu_irqs(struct its_vm *vm)
+{
+	irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
+	irq_domain_remove(vm->domain);
+	irq_domain_free_fwnode(vm->fwnode);
+}
+
+static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
+{
+	return irq_set_vcpu_affinity(vpe->irq, info);
+}
+
+int its_schedule_vpe(struct its_vpe *vpe, bool on)
+{
+	struct its_cmd_info info;
+
+	WARN_ON(preemptible());
+
+	info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
+
+	return its_send_vpe_cmd(vpe, &info);
+}
+
+int its_invall_vpe(struct its_vpe *vpe)
+{
+	struct its_cmd_info info = {
+		.cmd_type = INVALL_VPE,
+	};
+
+	return its_send_vpe_cmd(vpe, &info);
+}
+
+int its_map_vlpi(int irq, struct its_vlpi_map *map)
+{
+	struct its_cmd_info info = {
+		.cmd_type = MAP_VLPI,
+		{
+			.map      = map,
+		},
+	};
+	int ret;
+
+	/*
+	 * The host will never see that interrupt firing again, so it
+	 * is vital that we don't do any lazy masking.
+	 */
+	irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+	ret = irq_set_vcpu_affinity(irq, &info);
+	if (ret)
+		irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+	return ret;
+}
+
+int its_get_vlpi(int irq, struct its_vlpi_map *map)
+{
+	struct its_cmd_info info = {
+		.cmd_type = GET_VLPI,
+		{
+			.map      = map,
+		},
+	};
+
+	return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_unmap_vlpi(int irq)
+{
+	irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+	return irq_set_vcpu_affinity(irq, NULL);
+}
+
+int its_prop_update_vlpi(int irq, u8 config, bool inv)
+{
+	struct its_cmd_info info = {
+		.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
+		{
+			.config   = config,
+		},
+	};
+
+	return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
+{
+	if (domain) {
+		pr_info("ITS: Enabling GICv4 support\n");
+		gic_domain = domain;
+		vpe_domain_ops = ops;
+		return 0;
+	}
+
+	pr_err("ITS: No GICv4 VPE domain allocated\n");
+	return -ENODEV;
+}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
new file mode 100644
index 0000000..ced10c4
--- /dev/null
+++ b/drivers/irqchip/irq-gic.c
@@ -0,0 +1,1677 @@
+/*
+ *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ *   from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ *   by the Distributor, and interrupts generated locally, to the
+ *   associated CPU. The base address of the CPU interface is usually
+ *   aliased so that the same address points to different chips depending
+ *   on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/virt.h>
+
+#include "irq-gic-common.h"
+
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+static void gic_check_cpu_features(void)
+{
+	WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+			TAINT_CPU_OUT_OF_SPEC,
+			"GICv3 system registers enabled, broken firmware!\n");
+}
+#else
+#define gic_check_cpu_features()	do { } while(0)
+#endif
+
+union gic_base {
+	void __iomem *common_base;
+	void __percpu * __iomem *percpu_base;
+};
+
+struct gic_chip_data {
+	struct irq_chip chip;
+	union gic_base dist_base;
+	union gic_base cpu_base;
+	void __iomem *raw_dist_base;
+	void __iomem *raw_cpu_base;
+	u32 percpu_offset;
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
+	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+	u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
+	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+	u32 __percpu *saved_ppi_enable;
+	u32 __percpu *saved_ppi_active;
+	u32 __percpu *saved_ppi_conf;
+#endif
+	struct irq_domain *domain;
+	unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+	void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+
+#ifdef CONFIG_BL_SWITCHER
+
+static DEFINE_RAW_SPINLOCK(cpu_map_lock);
+
+#define gic_lock_irqsave(f)		\
+	raw_spin_lock_irqsave(&cpu_map_lock, (f))
+#define gic_unlock_irqrestore(f)	\
+	raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
+
+#define gic_lock()			raw_spin_lock(&cpu_map_lock)
+#define gic_unlock()			raw_spin_unlock(&cpu_map_lock)
+
+#else
+
+#define gic_lock_irqsave(f)		do { (void)(f); } while(0)
+#define gic_unlock_irqrestore(f)	do { (void)(f); } while(0)
+
+#define gic_lock()			do { } while(0)
+#define gic_unlock()			do { } while(0)
+
+#endif
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering.  Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+
+static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
+
+static struct gic_kvm_info gic_v2_kvm_info;
+
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+	return raw_cpu_read(*base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+	return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+					 void __iomem *(*f)(union gic_base *))
+{
+	data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d)	((d)->dist_base.common_base)
+#define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d, f)
+#endif
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
+static inline bool cascading_gic_irq(struct irq_data *d)
+{
+	void *data = irq_data_get_irq_handler_data(d);
+
+	/*
+	 * If handler_data is set, this is a cascading interrupt, and
+	 * it cannot possibly be forwarded.
+	 */
+	return data != NULL;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
+}
+
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+	gic_mask_irq(d);
+	/*
+	 * When masking a forwarded interrupt, make sure it is
+	 * deactivated as well.
+	 *
+	 * This ensures that an interrupt that is getting
+	 * disabled/masked will not get "stuck", because there is
+	 * noone to deactivate it (guest is being terminated).
+	 */
+	if (irqd_is_forwarded_to_vcpu(d))
+		gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GIC_DIST_ENABLE_SET);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+	/* Do not deactivate an IRQ forwarded to a vcpu. */
+	if (irqd_is_forwarded_to_vcpu(d))
+		return;
+
+	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+}
+
+static int gic_irq_set_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which, bool val)
+{
+	u32 reg;
+
+	switch (which) {
+	case IRQCHIP_STATE_PENDING:
+		reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
+		break;
+
+	case IRQCHIP_STATE_ACTIVE:
+		reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
+		break;
+
+	case IRQCHIP_STATE_MASKED:
+		reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	gic_poke_irq(d, reg);
+	return 0;
+}
+
+static int gic_irq_get_irqchip_state(struct irq_data *d,
+				      enum irqchip_irq_state which, bool *val)
+{
+	switch (which) {
+	case IRQCHIP_STATE_PENDING:
+		*val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
+		break;
+
+	case IRQCHIP_STATE_ACTIVE:
+		*val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
+		break;
+
+	case IRQCHIP_STATE_MASKED:
+		*val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+	void __iomem *base = gic_dist_base(d);
+	unsigned int gicirq = gic_irq(d);
+
+	/* Interrupt configuration for SGIs can't be changed */
+	if (gicirq < 16)
+		return -EINVAL;
+
+	/* SPIs have restrictions on the supported types */
+	if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+			    type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	return gic_configure_irq(gicirq, type, base, NULL);
+}
+
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+	/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
+	if (cascading_gic_irq(d))
+		return -EINVAL;
+
+	if (vcpu)
+		irqd_set_forwarded_to_vcpu(d);
+	else
+		irqd_clr_forwarded_to_vcpu(d);
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
+	u32 val, mask, bit;
+	unsigned long flags;
+
+	if (!force)
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask_val);
+
+	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	gic_lock_irqsave(flags);
+	mask = 0xff << shift;
+	bit = gic_cpu_map[cpu] << shift;
+	val = readl_relaxed(reg) & ~mask;
+	writel_relaxed(val | bit, reg);
+	gic_unlock_irqrestore(flags);
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+	u32 irqstat, irqnr;
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+	do {
+		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
+
+		if (likely(irqnr > 15 && irqnr < 1020)) {
+			if (static_branch_likely(&supports_deactivate_key))
+				writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+			isb();
+			handle_domain_irq(gic->domain, irqnr, regs);
+			continue;
+		}
+		if (irqnr < 16) {
+			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+			if (static_branch_likely(&supports_deactivate_key))
+				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
+#ifdef CONFIG_SMP
+			/*
+			 * Ensure any shared data written by the CPU sending
+			 * the IPI is read after we've read the ACK register
+			 * on the GIC.
+			 *
+			 * Pairs with the write barrier in gic_raise_softirq
+			 */
+			smp_rmb();
+			handle_IPI(irqnr, regs);
+#endif
+			continue;
+		}
+		break;
+	} while (1);
+}
+
+static void gic_handle_cascade_irq(struct irq_desc *desc)
+{
+	struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int cascade_irq, gic_irq;
+	unsigned long status;
+
+	chained_irq_enter(chip, desc);
+
+	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+
+	gic_irq = (status & GICC_IAR_INT_ID_MASK);
+	if (gic_irq == GICC_INT_SPURIOUS)
+		goto out;
+
+	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
+		handle_bad_irq(desc);
+	} else {
+		isb();
+		generic_handle_irq(cascade_irq);
+	}
+
+ out:
+	chained_irq_exit(chip, desc);
+}
+
+static const struct irq_chip gic_chip = {
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+					 &gic_data[gic_nr]);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+	void __iomem *base = gic_data_dist_base(gic);
+	u32 mask, i;
+
+	for (i = mask = 0; i < 32; i += 4) {
+		mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+		mask |= mask >> 16;
+		mask |= mask >> 8;
+		if (mask)
+			break;
+	}
+
+	if (!mask && num_possible_cpus() > 1)
+		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+	return mask;
+}
+
+static bool gic_check_gicv2(void __iomem *base)
+{
+	u32 val = readl_relaxed(base + GIC_CPU_IDENT);
+	return (val & 0xff0fff) == 0x02043B;
+}
+
+static void gic_cpu_if_up(struct gic_chip_data *gic)
+{
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
+	u32 bypass = 0;
+	u32 mode = 0;
+	int i;
+
+	if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
+		mode = GIC_CPU_CTRL_EOImodeNS;
+
+	if (gic_check_gicv2(cpu_base))
+		for (i = 0; i < 4; i++)
+			writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
+
+	/*
+	* Preserve bypass disable bits to be written back later
+	*/
+	bypass = readl(cpu_base + GIC_CPU_CTRL);
+	bypass &= GICC_DIS_BYPASS_MASK;
+
+	writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
+}
+
+
+static void gic_dist_init(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	u32 cpumask;
+	unsigned int gic_irqs = gic->gic_irqs;
+	void __iomem *base = gic_data_dist_base(gic);
+
+	writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
+
+	/*
+	 * Set all global interrupts to this CPU only.
+	 */
+	cpumask = gic_get_cpumask(gic);
+	cpumask |= cpumask << 8;
+	cpumask |= cpumask << 16;
+	for (i = 32; i < gic_irqs; i += 4)
+		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+	gic_dist_config(base, gic_irqs, NULL);
+
+	writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
+}
+
+static int gic_cpu_init(struct gic_chip_data *gic)
+{
+	void __iomem *dist_base = gic_data_dist_base(gic);
+	void __iomem *base = gic_data_cpu_base(gic);
+	unsigned int cpu_mask, cpu = smp_processor_id();
+	int i;
+
+	/*
+	 * Setting up the CPU map is only relevant for the primary GIC
+	 * because any nested/secondary GICs do not directly interface
+	 * with the CPU(s).
+	 */
+	if (gic == &gic_data[0]) {
+		/*
+		 * Get what the GIC says our CPU mask is.
+		 */
+		if (WARN_ON(cpu >= NR_GIC_CPU_IF))
+			return -EINVAL;
+
+		gic_check_cpu_features();
+		cpu_mask = gic_get_cpumask(gic);
+		gic_cpu_map[cpu] = cpu_mask;
+
+		/*
+		 * Clear our mask from the other map entries in case they're
+		 * still undefined.
+		 */
+		for (i = 0; i < NR_GIC_CPU_IF; i++)
+			if (i != cpu)
+				gic_cpu_map[i] &= ~cpu_mask;
+	}
+
+	gic_cpu_config(dist_base, NULL);
+
+	writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
+	gic_cpu_if_up(gic);
+
+	return 0;
+}
+
+int gic_cpu_if_down(unsigned int gic_nr)
+{
+	void __iomem *cpu_base;
+	u32 val = 0;
+
+	if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
+		return -EINVAL;
+
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+	val = readl(cpu_base + GIC_CPU_CTRL);
+	val &= ~GICC_ENABLE;
+	writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+	return 0;
+}
+
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
+/*
+ * Saves the GIC distributor registers during suspend or idle.  Must be called
+ * with interrupts disabled but before powering down the GIC.  After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+void gic_dist_save(struct gic_chip_data *gic)
+{
+	unsigned int gic_irqs;
+	void __iomem *dist_base;
+	int i;
+
+	if (WARN_ON(!gic))
+		return;
+
+	gic_irqs = gic->gic_irqs;
+	dist_base = gic_data_dist_base(gic);
+
+	if (!dist_base)
+		return;
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+		gic->saved_spi_conf[i] =
+			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		gic->saved_spi_target[i] =
+			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+		gic->saved_spi_enable[i] =
+			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+		gic->saved_spi_active[i] =
+			readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle.  Must be called before enabling interrupts.  If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+void gic_dist_restore(struct gic_chip_data *gic)
+{
+	unsigned int gic_irqs;
+	unsigned int i;
+	void __iomem *dist_base;
+
+	if (WARN_ON(!gic))
+		return;
+
+	gic_irqs = gic->gic_irqs;
+	dist_base = gic_data_dist_base(gic);
+
+	if (!dist_base)
+		return;
+
+	writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+		writel_relaxed(gic->saved_spi_conf[i],
+			dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		writel_relaxed(GICD_INT_DEF_PRI_X4,
+			dist_base + GIC_DIST_PRI + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		writel_relaxed(gic->saved_spi_target[i],
+			dist_base + GIC_DIST_TARGET + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
+		writel_relaxed(gic->saved_spi_enable[i],
+			dist_base + GIC_DIST_ENABLE_SET + i * 4);
+	}
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+		writel_relaxed(gic->saved_spi_active[i],
+			dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+	}
+
+	writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
+}
+
+void gic_cpu_save(struct gic_chip_data *gic)
+{
+	int i;
+	u32 *ptr;
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+
+	if (WARN_ON(!gic))
+		return;
+
+	dist_base = gic_data_dist_base(gic);
+	cpu_base = gic_data_cpu_base(gic);
+
+	if (!dist_base || !cpu_base)
+		return;
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_active);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
+	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+void gic_cpu_restore(struct gic_chip_data *gic)
+{
+	int i;
+	u32 *ptr;
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+
+	if (WARN_ON(!gic))
+		return;
+
+	dist_base = gic_data_dist_base(gic);
+	cpu_base = gic_data_cpu_base(gic);
+
+	if (!dist_base || !cpu_base)
+		return;
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			       dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
+		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+	}
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_active);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+		writel_relaxed(GICD_INT_EN_CLR_X32,
+			       dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+		writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+	}
+
+	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
+	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+		writel_relaxed(GICD_INT_DEF_PRI_X4,
+					dist_base + GIC_DIST_PRI + i * 4);
+
+	writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
+	gic_cpu_if_up(gic);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
+{
+	int i;
+
+	for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+		/* Skip over unused GICs */
+		if (!gic_data[i].get_base)
+			continue;
+#endif
+		switch (cmd) {
+		case CPU_PM_ENTER:
+			gic_cpu_save(&gic_data[i]);
+			break;
+		case CPU_PM_ENTER_FAILED:
+		case CPU_PM_EXIT:
+			gic_cpu_restore(&gic_data[i]);
+			break;
+		case CPU_CLUSTER_PM_ENTER:
+			gic_dist_save(&gic_data[i]);
+			break;
+		case CPU_CLUSTER_PM_ENTER_FAILED:
+		case CPU_CLUSTER_PM_EXIT:
+			gic_dist_restore(&gic_data[i]);
+			break;
+		}
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+	.notifier_call = gic_notifier,
+};
+
+static int gic_pm_init(struct gic_chip_data *gic)
+{
+	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+		sizeof(u32));
+	if (WARN_ON(!gic->saved_ppi_enable))
+		return -ENOMEM;
+
+	gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+		sizeof(u32));
+	if (WARN_ON(!gic->saved_ppi_active))
+		goto free_ppi_enable;
+
+	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+		sizeof(u32));
+	if (WARN_ON(!gic->saved_ppi_conf))
+		goto free_ppi_active;
+
+	if (gic == &gic_data[0])
+		cpu_pm_register_notifier(&gic_notifier_block);
+
+	return 0;
+
+free_ppi_active:
+	free_percpu(gic->saved_ppi_active);
+free_ppi_enable:
+	free_percpu(gic->saved_ppi_enable);
+
+	return -ENOMEM;
+}
+#else
+static int gic_pm_init(struct gic_chip_data *gic)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+	int cpu;
+	unsigned long flags, map = 0;
+
+	if (unlikely(nr_cpu_ids == 1)) {
+		/* Only one CPU? let's do a self-IPI... */
+		writel_relaxed(2 << 24 | irq,
+			       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+		return;
+	}
+
+	gic_lock_irqsave(flags);
+
+	/* Convert our logical CPU mask into a physical one. */
+	for_each_cpu(cpu, mask)
+		map |= gic_cpu_map[cpu];
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before they observe us issuing the IPI.
+	 */
+	dmb(ishst);
+
+	/* this always happens on GIC0 */
+	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+	gic_unlock_irqrestore(flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+	BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+	cpu_id = 1 << cpu_id;
+	/* this always happens on GIC0 */
+	writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+	unsigned int cpu_bit;
+
+	if (cpu >= NR_GIC_CPU_IF)
+		return -1;
+	cpu_bit = gic_cpu_map[cpu];
+	if (cpu_bit & (cpu_bit - 1))
+		return -1;
+	return __ffs(cpu_bit);
+}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
+ * is also updated.  Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+	unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+	void __iomem *dist_base;
+	int i, ror_val, cpu = smp_processor_id();
+	u32 val, cur_target_mask, active_mask;
+
+	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	if (!dist_base)
+		return;
+	gic_irqs = gic_data[gic_nr].gic_irqs;
+
+	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+	cur_target_mask = 0x01010101 << cur_cpu_id;
+	ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+	gic_lock();
+
+	/* Update the target interface for this logical CPU */
+	gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+	/*
+	 * Find all the peripheral interrupts targetting the current
+	 * CPU interface and migrate them to the new CPU interface.
+	 * We skip DIST_TARGET 0 to 7 as they are read-only.
+	 */
+	for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+		val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+		active_mask = val & cur_target_mask;
+		if (active_mask) {
+			val &= ~active_mask;
+			val |= ror32(active_mask, ror_val);
+			writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+		}
+	}
+
+	gic_unlock();
+
+	/*
+	 * Now let's migrate and clear any potential SGIs that might be
+	 * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+	 * is a banked register, we can only forward the SGI using
+	 * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
+	 * doesn't use that information anyway.
+	 *
+	 * For the same reason we do not adjust SGI source information
+	 * for previously sent SGIs by us to other CPUs either.
+	 */
+	for (i = 0; i < 16; i += 4) {
+		int j;
+		val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+		if (!val)
+			continue;
+		writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+		for (j = i; j < i + 4; j++) {
+			if (val & 0xff)
+				writel_relaxed((1 << (new_cpu_id + 16)) | j,
+						dist_base + GIC_DIST_SOFTINT);
+			val >>= 8;
+		}
+	}
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+	if (!gic_dist_physaddr)
+		return 0;
+	return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+static void __init gic_init_physaddr(struct device_node *node)
+{
+	struct resource res;
+	if (of_address_to_resource(node, 0, &res) == 0) {
+		gic_dist_physaddr = res.start;
+		pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+	}
+}
+
+#else
+#define gic_init_physaddr(node)  do { } while (0)
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	struct gic_chip_data *gic = d->host_data;
+
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+				    handle_percpu_devid_irq, NULL, NULL);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	} else {
+		irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+				    handle_fasteoi_irq, NULL, NULL);
+		irq_set_probe(irq);
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+	}
+	return 0;
+}
+
+static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+}
+
+static int gic_irq_domain_translate(struct irq_domain *d,
+				    struct irq_fwspec *fwspec,
+				    unsigned long *hwirq,
+				    unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count < 3)
+			return -EINVAL;
+
+		/* Get the interrupt number and add 16 to skip over SGIs */
+		*hwirq = fwspec->param[1] + 16;
+
+		/*
+		 * For SPIs, we need to add 16 more to get the GIC irq
+		 * ID number
+		 */
+		if (!fwspec->param[0])
+			*hwirq += 16;
+
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+		/* Make it clear that broken DTs are... broken */
+		WARN_ON(*type == IRQ_TYPE_NONE);
+		return 0;
+	}
+
+	if (is_fwnode_irqchip(fwspec->fwnode)) {
+		if(fwspec->param_count != 2)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+
+		WARN_ON(*type == IRQ_TYPE_NONE);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+	gic_cpu_init(&gic_data[0]);
+	return 0;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	int i, ret;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	struct irq_fwspec *fwspec = arg;
+
+	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nr_irqs; i++) {
+		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
+	.translate = gic_irq_domain_translate,
+	.alloc = gic_irq_domain_alloc,
+	.free = irq_domain_free_irqs_top,
+};
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+	.map = gic_irq_domain_map,
+	.unmap = gic_irq_domain_unmap,
+};
+
+static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
+			  const char *name, bool use_eoimode1)
+{
+	/* Initialize irq_chip */
+	gic->chip = gic_chip;
+	gic->chip.name = name;
+	gic->chip.parent_device = dev;
+
+	if (use_eoimode1) {
+		gic->chip.irq_mask = gic_eoimode1_mask_irq;
+		gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
+		gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
+	}
+
+#ifdef CONFIG_SMP
+	if (gic == &gic_data[0])
+		gic->chip.irq_set_affinity = gic_set_affinity;
+#endif
+}
+
+static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+			  struct fwnode_handle *handle)
+{
+	irq_hw_number_t hwirq_base;
+	int gic_irqs, irq_base, ret;
+
+	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+		/* Frankein-GIC without banked registers... */
+		unsigned int cpu;
+
+		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+		if (WARN_ON(!gic->dist_base.percpu_base ||
+			    !gic->cpu_base.percpu_base)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		for_each_possible_cpu(cpu) {
+			u32 mpidr = cpu_logical_map(cpu);
+			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+			unsigned long offset = gic->percpu_offset * core_id;
+			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
+				gic->raw_dist_base + offset;
+			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
+				gic->raw_cpu_base + offset;
+		}
+
+		gic_set_base_accessor(gic, gic_get_percpu_base);
+	} else {
+		/* Normal, sane GIC... */
+		WARN(gic->percpu_offset,
+		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+		     gic->percpu_offset);
+		gic->dist_base.common_base = gic->raw_dist_base;
+		gic->cpu_base.common_base = gic->raw_cpu_base;
+		gic_set_base_accessor(gic, gic_get_common_base);
+	}
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The GIC only supports up to 1020 interrupt sources.
+	 */
+	gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+	gic_irqs = (gic_irqs + 1) * 32;
+	if (gic_irqs > 1020)
+		gic_irqs = 1020;
+	gic->gic_irqs = gic_irqs;
+
+	if (handle) {		/* DT/ACPI */
+		gic->domain = irq_domain_create_linear(handle, gic_irqs,
+						       &gic_irq_domain_hierarchy_ops,
+						       gic);
+	} else {		/* Legacy support */
+		/*
+		 * For primary GICs, skip over SGIs.
+		 * For secondary GICs, skip over PPIs, too.
+		 */
+		if (gic == &gic_data[0] && (irq_start & 31) > 0) {
+			hwirq_base = 16;
+			if (irq_start != -1)
+				irq_start = (irq_start & ~31) + 16;
+		} else {
+			hwirq_base = 32;
+		}
+
+		gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+
+		irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+					   numa_node_id());
+		if (irq_base < 0) {
+			WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+			     irq_start);
+			irq_base = irq_start;
+		}
+
+		gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
+					hwirq_base, &gic_irq_domain_ops, gic);
+	}
+
+	if (WARN_ON(!gic->domain)) {
+		ret = -ENODEV;
+		goto error;
+	}
+
+	gic_dist_init(gic);
+	ret = gic_cpu_init(gic);
+	if (ret)
+		goto error;
+
+	ret = gic_pm_init(gic);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+		free_percpu(gic->dist_base.percpu_base);
+		free_percpu(gic->cpu_base.percpu_base);
+	}
+
+	return ret;
+}
+
+static int __init __gic_init_bases(struct gic_chip_data *gic,
+				   int irq_start,
+				   struct fwnode_handle *handle)
+{
+	char *name;
+	int i, ret;
+
+	if (WARN_ON(!gic || gic->domain))
+		return -EINVAL;
+
+	if (gic == &gic_data[0]) {
+		/*
+		 * Initialize the CPU interface map to all CPUs.
+		 * It will be refined as each CPU probes its ID.
+		 * This is only necessary for the primary GIC.
+		 */
+		for (i = 0; i < NR_GIC_CPU_IF; i++)
+			gic_cpu_map[i] = 0xff;
+#ifdef CONFIG_SMP
+		set_smp_cross_call(gic_raise_softirq);
+#endif
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+					  "irqchip/arm/gic:starting",
+					  gic_starting_cpu, NULL);
+		set_handle_irq(gic_handle_irq);
+		if (static_branch_likely(&supports_deactivate_key))
+			pr_info("GIC: Using split EOI/Deactivate mode\n");
+	}
+
+	if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
+		name = kasprintf(GFP_KERNEL, "GICv2");
+		gic_init_chip(gic, NULL, name, true);
+	} else {
+		name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
+		gic_init_chip(gic, NULL, name, false);
+	}
+
+	ret = gic_init_bases(gic, irq_start, handle);
+	if (ret)
+		kfree(name);
+
+	return ret;
+}
+
+void __init gic_init(unsigned int gic_nr, int irq_start,
+		     void __iomem *dist_base, void __iomem *cpu_base)
+{
+	struct gic_chip_data *gic;
+
+	if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
+		return;
+
+	/*
+	 * Non-DT/ACPI systems won't run a hypervisor, so let's not
+	 * bother with these...
+	 */
+	static_branch_disable(&supports_deactivate_key);
+
+	gic = &gic_data[gic_nr];
+	gic->raw_dist_base = dist_base;
+	gic->raw_cpu_base = cpu_base;
+
+	__gic_init_bases(gic, irq_start, NULL);
+}
+
+static void gic_teardown(struct gic_chip_data *gic)
+{
+	if (WARN_ON(!gic))
+		return;
+
+	if (gic->raw_dist_base)
+		iounmap(gic->raw_dist_base);
+	if (gic->raw_cpu_base)
+		iounmap(gic->raw_cpu_base);
+}
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata;
+static bool gicv2_force_probe;
+
+static int __init gicv2_force_probe_cfg(char *buf)
+{
+	return strtobool(buf, &gicv2_force_probe);
+}
+early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
+
+static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
+{
+	struct resource cpuif_res;
+
+	of_address_to_resource(node, 1, &cpuif_res);
+
+	if (!is_hyp_mode_available())
+		return false;
+	if (resource_size(&cpuif_res) < SZ_8K) {
+		void __iomem *alt;
+		/*
+		 * Check for a stupid firmware that only exposes the
+		 * first page of a GICv2.
+		 */
+		if (!gic_check_gicv2(*base))
+			return false;
+
+		if (!gicv2_force_probe) {
+			pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
+			return false;
+		}
+
+		alt = ioremap(cpuif_res.start, SZ_8K);
+		if (!alt)
+			return false;
+		if (!gic_check_gicv2(alt + SZ_4K)) {
+			/*
+			 * The first page was that of a GICv2, and
+			 * the second was *something*. Let's trust it
+			 * to be a GICv2, and update the mapping.
+			 */
+			pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
+				&cpuif_res.start);
+			iounmap(*base);
+			*base = alt;
+			return true;
+		}
+
+		/*
+		 * We detected *two* initial GICv2 pages in a
+		 * row. Could be a GICv2 aliased over two 64kB
+		 * pages. Update the resource, map the iospace, and
+		 * pray.
+		 */
+		iounmap(alt);
+		alt = ioremap(cpuif_res.start, SZ_128K);
+		if (!alt)
+			return false;
+		pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
+			&cpuif_res.start);
+		cpuif_res.end = cpuif_res.start + SZ_128K -1;
+		iounmap(*base);
+		*base = alt;
+	}
+	if (resource_size(&cpuif_res) == SZ_128K) {
+		/*
+		 * Verify that we have the first 4kB of a GICv2
+		 * aliased over the first 64kB by checking the
+		 * GICC_IIDR register on both ends.
+		 */
+		if (!gic_check_gicv2(*base) ||
+		    !gic_check_gicv2(*base + 0xf000))
+			return false;
+
+		/*
+		 * Move the base up by 60kB, so that we have a 8kB
+		 * contiguous region, which allows us to use GICC_DIR
+		 * at its normal offset. Please pass me that bucket.
+		 */
+		*base += 0xf000;
+		cpuif_res.start += 0xf000;
+		pr_warn("GIC: Adjusting CPU interface base to %pa\n",
+			&cpuif_res.start);
+	}
+
+	return true;
+}
+
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+{
+	if (!gic || !node)
+		return -EINVAL;
+
+	gic->raw_dist_base = of_iomap(node, 0);
+	if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
+		goto error;
+
+	gic->raw_cpu_base = of_iomap(node, 1);
+	if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
+		goto error;
+
+	if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
+		gic->percpu_offset = 0;
+
+	return 0;
+
+error:
+	gic_teardown(gic);
+
+	return -ENOMEM;
+}
+
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+	int ret;
+
+	if (!dev || !dev->of_node || !gic || !irq)
+		return -EINVAL;
+
+	*gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
+	if (!*gic)
+		return -ENOMEM;
+
+	gic_init_chip(*gic, dev, dev->of_node->name, false);
+
+	ret = gic_of_setup(*gic, dev->of_node);
+	if (ret)
+		return ret;
+
+	ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+	if (ret) {
+		gic_teardown(*gic);
+		return ret;
+	}
+
+	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
+
+	return 0;
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+	int ret;
+	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+	gic_v2_kvm_info.type = GIC_V2;
+
+	gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+	if (!gic_v2_kvm_info.maint_irq)
+		return;
+
+	ret = of_address_to_resource(node, 2, vctrl_res);
+	if (ret)
+		return;
+
+	ret = of_address_to_resource(node, 3, vcpu_res);
+	if (ret)
+		return;
+
+	if (static_branch_likely(&supports_deactivate_key))
+		gic_set_kvm_info(&gic_v2_kvm_info);
+}
+
+int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
+{
+	struct gic_chip_data *gic;
+	int irq, ret;
+
+	if (WARN_ON(!node))
+		return -ENODEV;
+
+	if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
+		return -EINVAL;
+
+	gic = &gic_data[gic_cnt];
+
+	ret = gic_of_setup(gic, node);
+	if (ret)
+		return ret;
+
+	/*
+	 * Disable split EOI/Deactivate if either HYP is not available
+	 * or the CPU interface is too small.
+	 */
+	if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
+		static_branch_disable(&supports_deactivate_key);
+
+	ret = __gic_init_bases(gic, -1, &node->fwnode);
+	if (ret) {
+		gic_teardown(gic);
+		return ret;
+	}
+
+	if (!gic_cnt) {
+		gic_init_physaddr(node);
+		gic_of_setup_kvm_info(node);
+	}
+
+	if (parent) {
+		irq = irq_of_parse_and_map(node, 0);
+		gic_cascade_irq(gic_cnt, irq);
+	}
+
+	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+		gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
+
+	gic_cnt++;
+	return 0;
+}
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
+IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
+IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
+#else
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static struct
+{
+	phys_addr_t cpu_phys_base;
+	u32 maint_irq;
+	int maint_irq_mode;
+	phys_addr_t vctrl_base;
+	phys_addr_t vcpu_base;
+} acpi_data __initdata;
+
+static int __init
+gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
+			const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *processor;
+	phys_addr_t gic_cpu_base;
+	static int cpu_base_assigned;
+
+	processor = (struct acpi_madt_generic_interrupt *)header;
+
+	if (BAD_MADT_GICC_ENTRY(processor, end))
+		return -EINVAL;
+
+	/*
+	 * There is no support for non-banked GICv1/2 register in ACPI spec.
+	 * All CPU interface addresses have to be the same.
+	 */
+	gic_cpu_base = processor->base_address;
+	if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
+		return -EINVAL;
+
+	acpi_data.cpu_phys_base = gic_cpu_base;
+	acpi_data.maint_irq = processor->vgic_interrupt;
+	acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+				    ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+	acpi_data.vctrl_base = processor->gich_base_address;
+	acpi_data.vcpu_base = processor->gicv_base_address;
+
+	cpu_base_assigned = 1;
+	return 0;
+}
+
+/* The things you have to do to just *count* something... */
+static int __init acpi_dummy_func(struct acpi_subtable_header *header,
+				  const unsigned long end)
+{
+	return 0;
+}
+
+static bool __init acpi_gic_redist_is_present(void)
+{
+	return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+				     acpi_dummy_func, 0) > 0;
+}
+
+static bool __init gic_validate_dist(struct acpi_subtable_header *header,
+				     struct acpi_probe_entry *ape)
+{
+	struct acpi_madt_generic_distributor *dist;
+	dist = (struct acpi_madt_generic_distributor *)header;
+
+	return (dist->version == ape->driver_data &&
+		(dist->version != ACPI_MADT_GIC_VERSION_NONE ||
+		 !acpi_gic_redist_is_present()));
+}
+
+#define ACPI_GICV2_DIST_MEM_SIZE	(SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE	(SZ_8K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+	int irq;
+	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+	gic_v2_kvm_info.type = GIC_V2;
+
+	if (!acpi_data.vctrl_base)
+		return;
+
+	vctrl_res->flags = IORESOURCE_MEM;
+	vctrl_res->start = acpi_data.vctrl_base;
+	vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
+
+	if (!acpi_data.vcpu_base)
+		return;
+
+	vcpu_res->flags = IORESOURCE_MEM;
+	vcpu_res->start = acpi_data.vcpu_base;
+	vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+
+	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+				acpi_data.maint_irq_mode,
+				ACPI_ACTIVE_HIGH);
+	if (irq <= 0)
+		return;
+
+	gic_v2_kvm_info.maint_irq = irq;
+
+	gic_set_kvm_info(&gic_v2_kvm_info);
+}
+
+static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
+				   const unsigned long end)
+{
+	struct acpi_madt_generic_distributor *dist;
+	struct fwnode_handle *domain_handle;
+	struct gic_chip_data *gic = &gic_data[0];
+	int count, ret;
+
+	/* Collect CPU base addresses */
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+				      gic_acpi_parse_madt_cpu, 0);
+	if (count <= 0) {
+		pr_err("No valid GICC entries exist\n");
+		return -EINVAL;
+	}
+
+	gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+	if (!gic->raw_cpu_base) {
+		pr_err("Unable to map GICC registers\n");
+		return -ENOMEM;
+	}
+
+	dist = (struct acpi_madt_generic_distributor *)header;
+	gic->raw_dist_base = ioremap(dist->base_address,
+				     ACPI_GICV2_DIST_MEM_SIZE);
+	if (!gic->raw_dist_base) {
+		pr_err("Unable to map GICD registers\n");
+		gic_teardown(gic);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Disable split EOI/Deactivate if HYP is not available. ACPI
+	 * guarantees that we'll always have a GICv2, so the CPU
+	 * interface will always be the right size.
+	 */
+	if (!is_hyp_mode_available())
+		static_branch_disable(&supports_deactivate_key);
+
+	/*
+	 * Initialize GIC instance zero (no multi-GIC support).
+	 */
+	domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
+	if (!domain_handle) {
+		pr_err("Unable to allocate domain handle\n");
+		gic_teardown(gic);
+		return -ENOMEM;
+	}
+
+	ret = __gic_init_bases(gic, -1, domain_handle);
+	if (ret) {
+		pr_err("Failed to initialise GIC\n");
+		irq_domain_free_fwnode(domain_handle);
+		gic_teardown(gic);
+		return ret;
+	}
+
+	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+
+	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+		gicv2m_init(NULL, gic_data[0].domain);
+
+	if (static_branch_likely(&supports_deactivate_key))
+		gic_acpi_setup_kvm_info();
+
+	return 0;
+}
+IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
+		     gic_v2_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
+		     gic_v2_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 0000000..2a92f03
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS			32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE			8
+
+#define GFPIC_REG_IRQ_PENDING		0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL	0x08
+#define GFPIC_REG_IRQ_DISABLE		0x0c
+#define GFPIC_REG_IRQ_ENABLE		0x10
+
+struct goldfish_pic_data {
+	void __iomem *base;
+	struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+	struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	u32 pending, hwirq, virq;
+
+	chained_irq_enter(host_chip, desc);
+
+	pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+	while (pending) {
+		hwirq = __fls(pending);
+		virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+		generic_handle_irq(virq);
+		pending &= ~(1 << hwirq);
+	}
+
+	chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+				       struct device_node *parent)
+{
+	struct goldfish_pic_data *gfpic;
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	unsigned int parent_irq;
+	int ret = 0;
+
+	gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+	if (!gfpic) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	parent_irq = irq_of_parse_and_map(of_node, 0);
+	if (!parent_irq) {
+		pr_err("Failed to map parent IRQ!\n");
+		ret = -EINVAL;
+		goto out_free;
+	}
+
+	gfpic->base = of_iomap(of_node, 0);
+	if (!gfpic->base) {
+		pr_err("Failed to map base address!\n");
+		ret = -ENOMEM;
+		goto out_unmap_irq;
+	}
+
+	/* Mask interrupts. */
+	writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+	gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+				    handle_level_irq);
+	if (!gc) {
+		pr_err("Failed to allocate chip structures!\n");
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+
+	ct = gc->chip_types;
+	ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+	ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+	ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+	irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+			       IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+	gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+						  GFPIC_IRQ_BASE, 0,
+						  &goldfish_irq_domain_ops,
+						  NULL);
+	if (!gfpic->irq_domain) {
+		pr_err("Failed to add irqdomain!\n");
+		ret = -ENOMEM;
+		goto out_destroy_generic_chip;
+	}
+
+	irq_set_chained_handler_and_data(parent_irq,
+					 goldfish_pic_cascade, gfpic);
+
+	pr_info("Successfully registered.\n");
+	return 0;
+
+out_destroy_generic_chip:
+	irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+				 IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+	iounmap(gfpic->base);
+out_unmap_irq:
+	irq_dispose_mapping(parent_irq);
+out_free:
+	kfree(gfpic);
+out_err:
+	pr_err("Failed to initialize! (errno = %d)\n", ret);
+	return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
new file mode 100644
index 0000000..5b4fd2f
--- /dev/null
+++ b/drivers/irqchip/irq-hip04.c
@@ -0,0 +1,417 @@
+/*
+ * Hisilicon HiP04 INTC
+ *
+ * Copyright (C) 2002-2014 ARM Limited.
+ * Copyright (c) 2013-2014 Hisilicon Ltd.
+ * Copyright (c) 2013-2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the HIP04 INTC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ *   from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ *   by the Distributor, and interrupts generated locally, to the
+ *   associated CPU. The base address of the CPU interface is usually
+ *   aliased so that the same address points to different chips depending
+ *   on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+
+#define HIP04_MAX_IRQS		510
+
+struct hip04_irq_data {
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+	struct irq_domain *domain;
+	unsigned int nr_irqs;
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering.  Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_HIP04_CPU_IF 16
+static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
+
+static struct hip04_irq_data hip04_data __read_mostly;
+
+static inline void __iomem *hip04_dist_base(struct irq_data *d)
+{
+	struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
+	return hip04_data->dist_base;
+}
+
+static inline void __iomem *hip04_cpu_base(struct irq_data *d)
+{
+	struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
+	return hip04_data->cpu_base;
+}
+
+static inline unsigned int hip04_irq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void hip04_mask_irq(struct irq_data *d)
+{
+	u32 mask = 1 << (hip04_irq(d) % 32);
+
+	raw_spin_lock(&irq_controller_lock);
+	writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
+		       (hip04_irq(d) / 32) * 4);
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+static void hip04_unmask_irq(struct irq_data *d)
+{
+	u32 mask = 1 << (hip04_irq(d) % 32);
+
+	raw_spin_lock(&irq_controller_lock);
+	writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
+		       (hip04_irq(d) / 32) * 4);
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+static void hip04_eoi_irq(struct irq_data *d)
+{
+	writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	void __iomem *base = hip04_dist_base(d);
+	unsigned int irq = hip04_irq(d);
+	int ret;
+
+	/* Interrupt configuration for SGIs can't be changed */
+	if (irq < 16)
+		return -EINVAL;
+
+	/* SPIs have restrictions on the supported types */
+	if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+			 type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	raw_spin_lock(&irq_controller_lock);
+
+	ret = gic_configure_irq(irq, type, base, NULL);
+
+	raw_spin_unlock(&irq_controller_lock);
+
+	return ret;
+}
+
+#ifdef CONFIG_SMP
+static int hip04_irq_set_affinity(struct irq_data *d,
+				  const struct cpumask *mask_val,
+				  bool force)
+{
+	void __iomem *reg;
+	unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
+	u32 val, mask, bit;
+
+	if (!force)
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask_val);
+
+	if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	raw_spin_lock(&irq_controller_lock);
+	reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
+	mask = 0xffff << shift;
+	bit = hip04_cpu_map[cpu] << shift;
+	val = readl_relaxed(reg) & ~mask;
+	writel_relaxed(val | bit, reg);
+	raw_spin_unlock(&irq_controller_lock);
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
+{
+	u32 irqstat, irqnr;
+	void __iomem *cpu_base = hip04_data.cpu_base;
+
+	do {
+		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
+
+		if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
+			handle_domain_irq(hip04_data.domain, irqnr, regs);
+			continue;
+		}
+		if (irqnr < 16) {
+			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+			handle_IPI(irqnr, regs);
+#endif
+			continue;
+		}
+		break;
+	} while (1);
+}
+
+static struct irq_chip hip04_irq_chip = {
+	.name			= "HIP04 INTC",
+	.irq_mask		= hip04_mask_irq,
+	.irq_unmask		= hip04_unmask_irq,
+	.irq_eoi		= hip04_eoi_irq,
+	.irq_set_type		= hip04_irq_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= hip04_irq_set_affinity,
+#endif
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
+{
+	void __iomem *base = intc->dist_base;
+	u32 mask, i;
+
+	for (i = mask = 0; i < 32; i += 2) {
+		mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
+		mask |= mask >> 16;
+		if (mask)
+			break;
+	}
+
+	if (!mask)
+		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+	return mask;
+}
+
+static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
+{
+	unsigned int i;
+	u32 cpumask;
+	unsigned int nr_irqs = intc->nr_irqs;
+	void __iomem *base = intc->dist_base;
+
+	writel_relaxed(0, base + GIC_DIST_CTRL);
+
+	/*
+	 * Set all global interrupts to this CPU only.
+	 */
+	cpumask = hip04_get_cpumask(intc);
+	cpumask |= cpumask << 16;
+	for (i = 32; i < nr_irqs; i += 2)
+		writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
+
+	gic_dist_config(base, nr_irqs, NULL);
+
+	writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+
+static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
+{
+	void __iomem *dist_base = intc->dist_base;
+	void __iomem *base = intc->cpu_base;
+	unsigned int cpu_mask, cpu = smp_processor_id();
+	int i;
+
+	/*
+	 * Get what the GIC says our CPU mask is.
+	 */
+	BUG_ON(cpu >= NR_HIP04_CPU_IF);
+	cpu_mask = hip04_get_cpumask(intc);
+	hip04_cpu_map[cpu] = cpu_mask;
+
+	/*
+	 * Clear our mask from the other map entries in case they're
+	 * still undefined.
+	 */
+	for (i = 0; i < NR_HIP04_CPU_IF; i++)
+		if (i != cpu)
+			hip04_cpu_map[i] &= ~cpu_mask;
+
+	gic_cpu_config(dist_base, NULL);
+
+	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+	writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_SMP
+static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+	int cpu;
+	unsigned long flags, map = 0;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+	/* Convert our logical CPU mask into a physical one. */
+	for_each_cpu(cpu, mask)
+		map |= hip04_cpu_map[cpu];
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before they observe us issuing the IPI.
+	 */
+	dmb(ishst);
+
+	/* this always happens on GIC0 */
+	writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT);
+
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &hip04_irq_chip,
+					 handle_percpu_devid_irq);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	} else {
+		irq_set_chip_and_handler(irq, &hip04_irq_chip,
+					 handle_fasteoi_irq);
+		irq_set_probe(irq);
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int hip04_irq_domain_xlate(struct irq_domain *d,
+				  struct device_node *controller,
+				  const u32 *intspec, unsigned int intsize,
+				  unsigned long *out_hwirq,
+				  unsigned int *out_type)
+{
+	unsigned long ret = 0;
+
+	if (irq_domain_get_of_node(d) != controller)
+		return -EINVAL;
+	if (intsize < 3)
+		return -EINVAL;
+
+	/* Get the interrupt number and add 16 to skip over SGIs */
+	*out_hwirq = intspec[1] + 16;
+
+	/* For SPIs, we need to add 16 more to get the irq ID number */
+	if (!intspec[0])
+		*out_hwirq += 16;
+
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+	return ret;
+}
+
+static int hip04_irq_starting_cpu(unsigned int cpu)
+{
+	hip04_irq_cpu_init(&hip04_data);
+	return 0;
+}
+
+static const struct irq_domain_ops hip04_irq_domain_ops = {
+	.map	= hip04_irq_domain_map,
+	.xlate	= hip04_irq_domain_xlate,
+};
+
+static int __init
+hip04_of_init(struct device_node *node, struct device_node *parent)
+{
+	irq_hw_number_t hwirq_base = 16;
+	int nr_irqs, irq_base, i;
+
+	if (WARN_ON(!node))
+		return -ENODEV;
+
+	hip04_data.dist_base = of_iomap(node, 0);
+	WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
+
+	hip04_data.cpu_base = of_iomap(node, 1);
+	WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
+
+	/*
+	 * Initialize the CPU interface map to all CPUs.
+	 * It will be refined as each CPU probes its ID.
+	 */
+	for (i = 0; i < NR_HIP04_CPU_IF; i++)
+		hip04_cpu_map[i] = 0xffff;
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The HIP04 INTC only supports up to 510 interrupt sources.
+	 */
+	nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
+	nr_irqs = (nr_irqs + 1) * 32;
+	if (nr_irqs > HIP04_MAX_IRQS)
+		nr_irqs = HIP04_MAX_IRQS;
+	hip04_data.nr_irqs = nr_irqs;
+
+	nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+
+	irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
+	if (irq_base < 0) {
+		pr_err("failed to allocate IRQ numbers\n");
+		return -EINVAL;
+	}
+
+	hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
+						  hwirq_base,
+						  &hip04_irq_domain_ops,
+						  &hip04_data);
+
+	if (WARN_ON(!hip04_data.domain))
+		return -EINVAL;
+
+#ifdef CONFIG_SMP
+	set_smp_cross_call(hip04_raise_softirq);
+#endif
+	set_handle_irq(hip04_handle_irq);
+
+	hip04_irq_dist_init(&hip04_data);
+	cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
+			  hip04_irq_starting_cpu, NULL);
+	return 0;
+}
+IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
new file mode 100644
index 0000000..b0d4aab
--- /dev/null
+++ b/drivers/irqchip/irq-i8259.c
@@ -0,0 +1,374 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ * plus some generic x86 specific things if generic specifics makes
+ * any sense at all.
+ * this file should become arch/i386/kernel/irq.c when the old irq.c
+ * moves to arch independent land
+ */
+
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
+
+static struct irq_chip i8259A_chip = {
+	.name			= "XT-PIC",
+	.irq_mask		= disable_8259A_irq,
+	.irq_disable		= disable_8259A_irq,
+	.irq_unmask		= enable_8259A_irq,
+	.irq_mask_ack		= mask_and_ack_8259A,
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+void i8259_set_poll(int (*poll)(void))
+{
+	i8259_poll = poll;
+}
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define cached_master_mask	(cached_irq_mask)
+#define cached_slave_mask	(cached_irq_mask >> 8)
+
+static void disable_8259A_irq(struct irq_data *d)
+{
+	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	mask = 1 << irq;
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	cached_irq_mask |= mask;
+	if (irq & 8)
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+	else
+		outb(cached_master_mask, PIC_MASTER_IMR);
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_8259A_irq(struct irq_data *d)
+{
+	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	mask = ~(1 << irq);
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	cached_irq_mask &= mask;
+	if (irq & 8)
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+	else
+		outb(cached_master_mask, PIC_MASTER_IMR);
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+void make_8259A_irq(unsigned int irq)
+{
+	disable_irq_nosync(irq);
+	irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+	enable_irq(irq);
+}
+
+/*
+ * This function assumes to be called rarely. Switching between
+ * 8259A registers is slow.
+ * This has to be protected by the irq controller spinlock
+ * before being called.
+ */
+static inline int i8259A_irq_real(unsigned int irq)
+{
+	int value;
+	int irqmask = 1 << irq;
+
+	if (irq < 8) {
+		outb(0x0B, PIC_MASTER_CMD);	/* ISR register */
+		value = inb(PIC_MASTER_CMD) & irqmask;
+		outb(0x0A, PIC_MASTER_CMD);	/* back to the IRR register */
+		return value;
+	}
+	outb(0x0B, PIC_SLAVE_CMD);	/* ISR register */
+	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+	outb(0x0A, PIC_SLAVE_CMD);	/* back to the IRR register */
+	return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+static void mask_and_ack_8259A(struct irq_data *d)
+{
+	unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	irqmask = 1 << irq;
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	/*
+	 * Lightweight spurious IRQ detection. We do not want
+	 * to overdo spurious IRQ handling - it's usually a sign
+	 * of hardware problems, so we only do the checks we can
+	 * do without slowing down good hardware unnecessarily.
+	 *
+	 * Note that IRQ7 and IRQ15 (the two spurious IRQs
+	 * usually resulting from the 8259A-1|2 PICs) occur
+	 * even if the IRQ is masked in the 8259A. Thus we
+	 * can check spurious 8259A IRQs without doing the
+	 * quite slow i8259A_irq_real() call for every IRQ.
+	 * This does not cover 100% of spurious interrupts,
+	 * but should be enough to warn the user that there
+	 * is something bad going on ...
+	 */
+	if (cached_irq_mask & irqmask)
+		goto spurious_8259A_irq;
+	cached_irq_mask |= irqmask;
+
+handle_real_irq:
+	if (irq & 8) {
+		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+		outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+		outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+	} else {
+		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_master_mask, PIC_MASTER_IMR);
+		outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+	}
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+	return;
+
+spurious_8259A_irq:
+	/*
+	 * this is the slow path - should happen rarely.
+	 */
+	if (i8259A_irq_real(irq))
+		/*
+		 * oops, the IRQ _is_ in service according to the
+		 * 8259A - not spurious, go handle it.
+		 */
+		goto handle_real_irq;
+
+	{
+		static int spurious_irq_mask;
+		/*
+		 * At this point we can be sure the IRQ is spurious,
+		 * lets ACK and report it. [once per IRQ]
+		 */
+		if (!(spurious_irq_mask & irqmask)) {
+			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+			spurious_irq_mask |= irqmask;
+		}
+		atomic_inc(&irq_err_count);
+		/*
+		 * Theoretically we do not have to handle this IRQ,
+		 * but in Linux this does not cause problems and is
+		 * simpler for us.
+		 */
+		goto handle_real_irq;
+	}
+}
+
+static void i8259A_resume(void)
+{
+	if (i8259A_auto_eoi >= 0)
+		init_8259A(i8259A_auto_eoi);
+}
+
+static void i8259A_shutdown(void)
+{
+	/* Put the i8259A into a quiescent state that
+	 * the kernel initialization code can get it
+	 * out of.
+	 */
+	if (i8259A_auto_eoi >= 0) {
+		outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+		outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+	}
+}
+
+static struct syscore_ops i8259_syscore_ops = {
+	.resume = i8259A_resume,
+	.shutdown = i8259A_shutdown,
+};
+
+static int __init i8259A_init_sysfs(void)
+{
+	register_syscore_ops(&i8259_syscore_ops);
+	return 0;
+}
+
+device_initcall(i8259A_init_sysfs);
+
+static void init_8259A(int auto_eoi)
+{
+	unsigned long flags;
+
+	i8259A_auto_eoi = auto_eoi;
+
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+
+	/*
+	 * outb_p - this has to work on a wide range of PC hardware.
+	 */
+	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
+	outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
+	if (auto_eoi)	/* master does Auto EOI */
+		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+	else		/* master expects normal EOI */
+		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
+	outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
+	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+	if (auto_eoi)
+		/*
+		 * In AEOI mode we just have to mask the interrupt
+		 * when acking.
+		 */
+		i8259A_chip.irq_mask_ack = disable_8259A_irq;
+	else
+		i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+
+	udelay(100);		/* wait for 8259A to initialize */
+
+	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
+
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+	.handler = no_action,
+	.name = "cascade",
+	.flags = IRQF_NO_THREAD,
+};
+
+static struct resource pic1_io_resource = {
+	.name = "pic1",
+	.start = PIC_MASTER_CMD,
+	.end = PIC_MASTER_IMR,
+	.flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static struct resource pic2_io_resource = {
+	.name = "pic2",
+	.start = PIC_SLAVE_CMD,
+	.end = PIC_SLAVE_IMR,
+	.flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				 irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+	irq_set_probe(virq);
+	return 0;
+}
+
+static const struct irq_domain_ops i8259A_ops = {
+	.map = i8259A_irq_domain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
+{
+	struct irq_domain *domain;
+
+	insert_resource(&ioport_resource, &pic1_io_resource);
+	insert_resource(&ioport_resource, &pic2_io_resource);
+
+	init_8259A(0);
+
+	domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
+				       &i8259A_ops, NULL);
+	if (!domain)
+		panic("Failed to add i8259 IRQ domain");
+
+	setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+	return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+	__init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	int hwirq = i8259_poll();
+	unsigned int irq;
+
+	if (hwirq < 0)
+		return;
+
+	irq = irq_linear_revmap(domain, hwirq);
+	generic_handle_irq(irq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+	struct irq_domain *domain;
+	unsigned int parent_irq;
+
+	domain = __init_i8259_irqs(node);
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq) {
+		pr_err("Failed to map i8259 parent IRQ\n");
+		irq_domain_remove(domain);
+		return -ENODEV;
+	}
+
+	irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
+					 domain);
+	return 0;
+}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
new file mode 100644
index 0000000..d00489a
--- /dev/null
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG PowerDown Controller (PDC)
+ *
+ * Copyright 2010-2013 Imagination Technologies Ltd.
+ *
+ * Exposes the syswake and PDC peripheral wake interrupts to the system.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* PDC interrupt register numbers */
+
+#define PDC_IRQ_STATUS			0x310
+#define PDC_IRQ_ENABLE			0x314
+#define PDC_IRQ_CLEAR			0x318
+#define PDC_IRQ_ROUTE			0x31c
+#define PDC_SYS_WAKE_BASE		0x330
+#define PDC_SYS_WAKE_STRIDE		0x8
+#define PDC_SYS_WAKE_CONFIG_BASE	0x334
+#define PDC_SYS_WAKE_CONFIG_STRIDE	0x8
+
+/* PDC interrupt register field masks */
+
+#define PDC_IRQ_SYS3			0x08
+#define PDC_IRQ_SYS2			0x04
+#define PDC_IRQ_SYS1			0x02
+#define PDC_IRQ_SYS0			0x01
+#define PDC_IRQ_ROUTE_WU_EN_SYS3	0x08000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS2	0x04000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS1	0x02000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS0	0x01000000
+#define PDC_IRQ_ROUTE_WU_EN_WD		0x00040000
+#define PDC_IRQ_ROUTE_WU_EN_IR		0x00020000
+#define PDC_IRQ_ROUTE_WU_EN_RTC		0x00010000
+#define PDC_IRQ_ROUTE_EXT_EN_SYS3	0x00000800
+#define PDC_IRQ_ROUTE_EXT_EN_SYS2	0x00000400
+#define PDC_IRQ_ROUTE_EXT_EN_SYS1	0x00000200
+#define PDC_IRQ_ROUTE_EXT_EN_SYS0	0x00000100
+#define PDC_IRQ_ROUTE_EXT_EN_WD		0x00000004
+#define PDC_IRQ_ROUTE_EXT_EN_IR		0x00000002
+#define PDC_IRQ_ROUTE_EXT_EN_RTC	0x00000001
+#define PDC_SYS_WAKE_RESET		0x00000010
+#define PDC_SYS_WAKE_INT_MODE		0x0000000e
+#define PDC_SYS_WAKE_INT_MODE_SHIFT	1
+#define PDC_SYS_WAKE_PIN_VAL		0x00000001
+
+/* PDC interrupt constants */
+
+#define PDC_SYS_WAKE_INT_LOW		0x0
+#define PDC_SYS_WAKE_INT_HIGH		0x1
+#define PDC_SYS_WAKE_INT_DOWN		0x2
+#define PDC_SYS_WAKE_INT_UP		0x3
+#define PDC_SYS_WAKE_INT_CHANGE		0x6
+#define PDC_SYS_WAKE_INT_NONE		0x4
+
+/**
+ * struct pdc_intc_priv - private pdc interrupt data.
+ * @nr_perips:		Number of peripheral interrupt signals.
+ * @nr_syswakes:	Number of syswake signals.
+ * @perip_irqs:		List of peripheral IRQ numbers handled.
+ * @syswake_irq:	Shared PDC syswake IRQ number.
+ * @domain:		IRQ domain for PDC peripheral and syswake IRQs.
+ * @pdc_base:		Base of PDC registers.
+ * @irq_route:		Cached version of PDC_IRQ_ROUTE register.
+ * @lock:		Lock to protect the PDC syswake registers and the cached
+ *			values of those registers in this struct.
+ */
+struct pdc_intc_priv {
+	unsigned int		nr_perips;
+	unsigned int		nr_syswakes;
+	unsigned int		*perip_irqs;
+	unsigned int		syswake_irq;
+	struct irq_domain	*domain;
+	void __iomem		*pdc_base;
+
+	u32			irq_route;
+	raw_spinlock_t		lock;
+};
+
+static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
+		      unsigned int data)
+{
+	iowrite32(data, priv->pdc_base + reg_offs);
+}
+
+static unsigned int pdc_read(struct pdc_intc_priv *priv,
+			     unsigned int reg_offs)
+{
+	return ioread32(priv->pdc_base + reg_offs);
+}
+
+/* Generic IRQ callbacks */
+
+#define SYS0_HWIRQ	8
+
+static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
+{
+	return hw >= SYS0_HWIRQ;
+}
+
+static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
+{
+	return hw - SYS0_HWIRQ;
+}
+
+static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
+{
+	return SYS0_HWIRQ + syswake;
+}
+
+static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
+{
+	return (struct pdc_intc_priv *)data->domain->host_data;
+}
+
+/*
+ * perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains
+ * wake bits, therefore we cannot use the generic irqchip mask callbacks as they
+ * cache the mask.
+ */
+
+static void perip_irq_mask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_route &= ~data->mask;
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+}
+
+static void perip_irq_unmask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_route |= data->mask;
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+}
+
+static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	unsigned int syswake = hwirq_to_syswake(data->hwirq);
+	unsigned int irq_mode;
+	unsigned int soc_sys_wake_regoff, soc_sys_wake;
+
+	/* translate to syswake IRQ mode */
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_BOTH:
+		irq_mode = PDC_SYS_WAKE_INT_CHANGE;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		irq_mode = PDC_SYS_WAKE_INT_UP;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		irq_mode = PDC_SYS_WAKE_INT_DOWN;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		irq_mode = PDC_SYS_WAKE_INT_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		irq_mode = PDC_SYS_WAKE_INT_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	raw_spin_lock(&priv->lock);
+
+	/* set the IRQ mode */
+	soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
+	soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
+	soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
+	soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
+	pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+
+	/* and update the handler */
+	irq_setup_alt_chip(data, flow_type);
+
+	raw_spin_unlock(&priv->lock);
+
+	return 0;
+}
+
+/* applies to both peripheral and syswake interrupts */
+static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	irq_hw_number_t hw = data->hwirq;
+	unsigned int mask = (1 << 16) << hw;
+	unsigned int dst_irq;
+
+	raw_spin_lock(&priv->lock);
+	if (on)
+		priv->irq_route |= mask;
+	else
+		priv->irq_route &= ~mask;
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+
+	/* control the destination IRQ wakeup too for standby mode */
+	if (hwirq_is_syswake(hw))
+		dst_irq = priv->syswake_irq;
+	else
+		dst_irq = priv->perip_irqs[hw];
+	irq_set_irq_wake(dst_irq, on);
+
+	return 0;
+}
+
+static void pdc_intc_perip_isr(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct pdc_intc_priv *priv;
+	unsigned int i, irq_no;
+
+	priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+	/* find the peripheral number */
+	for (i = 0; i < priv->nr_perips; ++i)
+		if (irq == priv->perip_irqs[i])
+			goto found;
+
+	/* should never get here */
+	return;
+found:
+
+	/* pass on the interrupt */
+	irq_no = irq_linear_revmap(priv->domain, i);
+	generic_handle_irq(irq_no);
+}
+
+static void pdc_intc_syswake_isr(struct irq_desc *desc)
+{
+	struct pdc_intc_priv *priv;
+	unsigned int syswake, irq_no;
+	unsigned int status;
+
+	priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+	status = pdc_read(priv, PDC_IRQ_STATUS) &
+		 pdc_read(priv, PDC_IRQ_ENABLE);
+	status &= (1 << priv->nr_syswakes) - 1;
+
+	for (syswake = 0; status; status >>= 1, ++syswake) {
+		/* Has this sys_wake triggered? */
+		if (!(status & 1))
+			continue;
+
+		irq_no = irq_linear_revmap(priv->domain,
+					   syswake_to_hwirq(syswake));
+		generic_handle_irq(irq_no);
+	}
+}
+
+static void pdc_intc_setup(struct pdc_intc_priv *priv)
+{
+	int i;
+	unsigned int soc_sys_wake_regoff;
+	unsigned int soc_sys_wake;
+
+	/*
+	 * Mask all syswake interrupts before routing, or we could receive an
+	 * interrupt before we're ready to handle it.
+	 */
+	pdc_write(priv, PDC_IRQ_ENABLE, 0);
+
+	/*
+	 * Enable routing of all syswakes
+	 * Disable all wake sources
+	 */
+	priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
+				PDC_IRQ_ROUTE_EXT_EN_SYS0);
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+
+	/* Initialise syswake IRQ */
+	for (i = 0; i < priv->nr_syswakes; ++i) {
+		/* set the IRQ mode to none */
+		soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
+		soc_sys_wake = PDC_SYS_WAKE_INT_NONE
+				<< PDC_SYS_WAKE_INT_MODE_SHIFT;
+		pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+	}
+}
+
+static int pdc_intc_probe(struct platform_device *pdev)
+{
+	struct pdc_intc_priv *priv;
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *res_regs;
+	struct irq_chip_generic *gc;
+	unsigned int i;
+	int irq, ret;
+	u32 val;
+
+	if (!node)
+		return -ENOENT;
+
+	/* Get registers */
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		return -ENOENT;
+	}
+
+	/* Allocate driver data */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		return -ENOMEM;
+	}
+	raw_spin_lock_init(&priv->lock);
+	platform_set_drvdata(pdev, priv);
+
+	/* Ioremap the registers */
+	priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
+				      resource_size(res_regs));
+	if (!priv->pdc_base)
+		return -EIO;
+
+	/* Get number of peripherals */
+	ret = of_property_read_u32(node, "num-perips", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "No num-perips node property found\n");
+		return -EINVAL;
+	}
+	if (val > SYS0_HWIRQ) {
+		dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
+		return -EINVAL;
+	}
+	priv->nr_perips = val;
+
+	/* Get number of syswakes */
+	ret = of_property_read_u32(node, "num-syswakes", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "No num-syswakes node property found\n");
+		return -EINVAL;
+	}
+	if (val > SYS0_HWIRQ) {
+		dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
+		return -EINVAL;
+	}
+	priv->nr_syswakes = val;
+
+	/* Get peripheral IRQ numbers */
+	priv->perip_irqs = devm_kcalloc(&pdev->dev, 4, priv->nr_perips,
+					GFP_KERNEL);
+	if (!priv->perip_irqs) {
+		dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < priv->nr_perips; ++i) {
+		irq = platform_get_irq(pdev, 1 + i);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
+			return irq;
+		}
+		priv->perip_irqs[i] = irq;
+	}
+	/* check if too many were provided */
+	if (platform_get_irq(pdev, 1 + i) >= 0) {
+		dev_err(&pdev->dev, "surplus perip IRQs detected\n");
+		return -EINVAL;
+	}
+
+	/* Get syswake IRQ number */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find syswake IRQ\n");
+		return irq;
+	}
+	priv->syswake_irq = irq;
+
+	/* Set up an IRQ domain */
+	priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+					     priv);
+	if (unlikely(!priv->domain)) {
+		dev_err(&pdev->dev, "cannot add IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Set up 2 generic irq chips with 2 chip types.
+	 * The first one for peripheral irqs (only 1 chip type used)
+	 * The second one for syswake irqs (edge and level chip types)
+	 */
+	ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
+					     handle_level_irq, 0, 0,
+					     IRQ_GC_INIT_NESTED_LOCK);
+	if (ret)
+		goto err_generic;
+
+	/* peripheral interrupt chip */
+
+	gc = irq_get_domain_generic_chip(priv->domain, 0);
+	gc->unused	= ~(BIT(priv->nr_perips) - 1);
+	gc->reg_base	= priv->pdc_base;
+	/*
+	 * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
+	 * they cache the mask
+	 */
+	gc->chip_types[0].regs.mask		= PDC_IRQ_ROUTE;
+	gc->chip_types[0].chip.irq_mask		= perip_irq_mask;
+	gc->chip_types[0].chip.irq_unmask	= perip_irq_unmask;
+	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;
+
+	/* syswake interrupt chip */
+
+	gc = irq_get_domain_generic_chip(priv->domain, 8);
+	gc->unused	= ~(BIT(priv->nr_syswakes) - 1);
+	gc->reg_base	= priv->pdc_base;
+
+	/* edge interrupts */
+	gc->chip_types[0].type			= IRQ_TYPE_EDGE_BOTH;
+	gc->chip_types[0].handler		= handle_edge_irq;
+	gc->chip_types[0].regs.ack		= PDC_IRQ_CLEAR;
+	gc->chip_types[0].regs.mask		= PDC_IRQ_ENABLE;
+	gc->chip_types[0].chip.irq_ack		= irq_gc_ack_set_bit;
+	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
+	gc->chip_types[0].chip.irq_set_type	= syswake_irq_set_type;
+	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;
+	/* for standby we pass on to the shared syswake IRQ */
+	gc->chip_types[0].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;
+
+	/* level interrupts */
+	gc->chip_types[1].type			= IRQ_TYPE_LEVEL_MASK;
+	gc->chip_types[1].handler		= handle_level_irq;
+	gc->chip_types[1].regs.ack		= PDC_IRQ_CLEAR;
+	gc->chip_types[1].regs.mask		= PDC_IRQ_ENABLE;
+	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
+	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
+	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
+	gc->chip_types[1].chip.irq_set_type	= syswake_irq_set_type;
+	gc->chip_types[1].chip.irq_set_wake	= pdc_irq_set_wake;
+	/* for standby we pass on to the shared syswake IRQ */
+	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;
+
+	/* Set up the hardware to enable interrupt routing */
+	pdc_intc_setup(priv);
+
+	/* Setup chained handlers for the peripheral IRQs */
+	for (i = 0; i < priv->nr_perips; ++i) {
+		irq = priv->perip_irqs[i];
+		irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr,
+						 priv);
+	}
+
+	/* Setup chained handler for the syswake IRQ */
+	irq_set_chained_handler_and_data(priv->syswake_irq,
+					 pdc_intc_syswake_isr, priv);
+
+	dev_info(&pdev->dev,
+		 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+		 priv->nr_perips,
+		 priv->nr_syswakes);
+
+	return 0;
+err_generic:
+	irq_domain_remove(priv->domain);
+	return ret;
+}
+
+static int pdc_intc_remove(struct platform_device *pdev)
+{
+	struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
+
+	irq_domain_remove(priv->domain);
+	return 0;
+}
+
+static const struct of_device_id pdc_intc_match[] = {
+	{ .compatible = "img,pdc-intc" },
+	{}
+};
+
+static struct platform_driver pdc_intc_driver = {
+	.driver = {
+		.name		= "pdc-intc",
+		.of_match_table	= pdc_intc_match,
+	},
+	.probe = pdc_intc_probe,
+	.remove = pdc_intc_remove,
+};
+
+static int __init pdc_intc_init(void)
+{
+	return platform_driver_register(&pdc_intc_driver);
+}
+core_initcall(pdc_intc_init);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
new file mode 100644
index 0000000..4760307
--- /dev/null
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/syscore_ops.h>
+
+#define IMR_NUM			4
+#define GPC_MAX_IRQS            (IMR_NUM * 32)
+
+#define GPC_IMR1_CORE0		0x30
+#define GPC_IMR1_CORE1		0x40
+
+struct gpcv2_irqchip_data {
+	struct raw_spinlock	rlock;
+	void __iomem		*gpc_base;
+	u32			wakeup_sources[IMR_NUM];
+	u32			saved_irq_mask[IMR_NUM];
+	u32			cpu2wakeup;
+};
+
+static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+
+static int gpcv2_wakeup_source_save(void)
+{
+	struct gpcv2_irqchip_data *cd;
+	void __iomem *reg;
+	int i;
+
+	cd = imx_gpcv2_instance;
+	if (!cd)
+		return 0;
+
+	for (i = 0; i < IMR_NUM; i++) {
+		reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+		cd->saved_irq_mask[i] = readl_relaxed(reg);
+		writel_relaxed(cd->wakeup_sources[i], reg);
+	}
+
+	return 0;
+}
+
+static void gpcv2_wakeup_source_restore(void)
+{
+	struct gpcv2_irqchip_data *cd;
+	void __iomem *reg;
+	int i;
+
+	cd = imx_gpcv2_instance;
+	if (!cd)
+		return;
+
+	for (i = 0; i < IMR_NUM; i++) {
+		reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+		writel_relaxed(cd->saved_irq_mask[i], reg);
+	}
+}
+
+static struct syscore_ops imx_gpcv2_syscore_ops = {
+	.suspend	= gpcv2_wakeup_source_save,
+	.resume		= gpcv2_wakeup_source_restore,
+};
+
+static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	unsigned int idx = d->hwirq / 32;
+	unsigned long flags;
+	void __iomem *reg;
+	u32 mask, val;
+
+	raw_spin_lock_irqsave(&cd->rlock, flags);
+	reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
+	mask = 1 << d->hwirq % 32;
+	val = cd->wakeup_sources[idx];
+
+	cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
+	raw_spin_unlock_irqrestore(&cd->rlock, flags);
+
+	/*
+	 * Do *not* call into the parent, as the GIC doesn't have any
+	 * wake-up facility...
+	 */
+
+	return 0;
+}
+
+static void imx_gpcv2_irq_unmask(struct irq_data *d)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	void __iomem *reg;
+	u32 val;
+
+	raw_spin_lock(&cd->rlock);
+	reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+	val = readl_relaxed(reg);
+	val &= ~(1 << d->hwirq % 32);
+	writel_relaxed(val, reg);
+	raw_spin_unlock(&cd->rlock);
+
+	irq_chip_unmask_parent(d);
+}
+
+static void imx_gpcv2_irq_mask(struct irq_data *d)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	void __iomem *reg;
+	u32 val;
+
+	raw_spin_lock(&cd->rlock);
+	reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+	val = readl_relaxed(reg);
+	val |= 1 << (d->hwirq % 32);
+	writel_relaxed(val, reg);
+	raw_spin_unlock(&cd->rlock);
+
+	irq_chip_mask_parent(d);
+}
+
+static struct irq_chip gpcv2_irqchip_data_chip = {
+	.name			= "GPCv2",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= imx_gpcv2_irq_mask,
+	.irq_unmask		= imx_gpcv2_irq_unmask,
+	.irq_set_wake		= imx_gpcv2_irq_set_wake,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpcv2_domain_translate(struct irq_domain *d,
+				      struct irq_fwspec *fwspec,
+				      unsigned long *hwirq,
+				      unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2];
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
+				  unsigned int irq, unsigned int nr_irqs,
+				  void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	int err;
+	int i;
+
+	err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
+	if (err)
+		return err;
+
+	if (hwirq >= GPC_MAX_IRQS)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+				&gpcv2_irqchip_data_chip, domain->host_data);
+	}
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+	.translate	= imx_gpcv2_domain_translate,
+	.alloc		= imx_gpcv2_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct gpcv2_irqchip_data *cd;
+	int i;
+
+	if (!parent) {
+		pr_err("%pOF: no parent, giving up\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to get parent domain\n", node);
+		return -ENXIO;
+	}
+
+	cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
+	if (!cd) {
+		pr_err("kzalloc failed!\n");
+		return -ENOMEM;
+	}
+
+	raw_spin_lock_init(&cd->rlock);
+
+	cd->gpc_base = of_iomap(node, 0);
+	if (!cd->gpc_base) {
+		pr_err("fsl-gpcv2: unable to map gpc registers\n");
+		kfree(cd);
+		return -ENOMEM;
+	}
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+				node, &gpcv2_irqchip_data_domain_ops, cd);
+	if (!domain) {
+		iounmap(cd->gpc_base);
+		kfree(cd);
+		return -ENOMEM;
+	}
+	irq_set_default_host(domain);
+
+	/* Initially mask all interrupts */
+	for (i = 0; i < IMR_NUM; i++) {
+		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
+		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
+		cd->wakeup_sources[i] = ~0;
+	}
+
+	/* Let CORE0 as the default CPU to wake up by GPC */
+	cd->cpu2wakeup = GPC_IMR1_CORE0;
+
+	/*
+	 * Due to hardware design failure, need to make sure GPR
+	 * interrupt(#32) is unmasked during RUN mode to avoid entering
+	 * DSM by mistake.
+	 */
+	writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
+
+	imx_gpcv2_instance = cd;
+	register_syscore_ops(&imx_gpcv2_syscore_ops);
+
+	/*
+	 * Clear the OF_POPULATED flag set in of_irq_init so that
+	 * later the GPC power domain driver will not be skipped.
+	 */
+	of_node_clear_flag(node, OF_POPULATED);
+	return 0;
+}
+
+IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
new file mode 100644
index 0000000..2ff0898
--- /dev/null
+++ b/drivers/irqchip/irq-ingenic.c
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  JZ4740 platform IRQ support
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General	 Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/ingenic.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/mach-jz4740/irq.h>
+
+struct ingenic_intc_data {
+	void __iomem *base;
+	unsigned num_chips;
+};
+
+#define JZ_REG_INTC_STATUS	0x00
+#define JZ_REG_INTC_MASK	0x04
+#define JZ_REG_INTC_SET_MASK	0x08
+#define JZ_REG_INTC_CLEAR_MASK	0x0c
+#define JZ_REG_INTC_PENDING	0x10
+#define CHIP_SIZE		0x20
+
+static irqreturn_t intc_cascade(int irq, void *data)
+{
+	struct ingenic_intc_data *intc = irq_get_handler_data(irq);
+	uint32_t irq_reg;
+	unsigned i;
+
+	for (i = 0; i < intc->num_chips; i++) {
+		irq_reg = readl(intc->base + (i * CHIP_SIZE) +
+				JZ_REG_INTC_PENDING);
+		if (!irq_reg)
+			continue;
+
+		generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
+{
+	struct irq_chip_regs *regs = &gc->chip_types->regs;
+
+	writel(mask, gc->reg_base + regs->enable);
+	writel(~mask, gc->reg_base + regs->disable);
+}
+
+void ingenic_intc_irq_suspend(struct irq_data *data)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	intc_irq_set_mask(gc, gc->wake_active);
+}
+
+void ingenic_intc_irq_resume(struct irq_data *data)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	intc_irq_set_mask(gc, gc->mask_cache);
+}
+
+static struct irqaction intc_cascade_action = {
+	.handler = intc_cascade,
+	.name = "SoC intc cascade interrupt",
+};
+
+static int __init ingenic_intc_of_init(struct device_node *node,
+				       unsigned num_chips)
+{
+	struct ingenic_intc_data *intc;
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	struct irq_domain *domain;
+	int parent_irq, err = 0;
+	unsigned i;
+
+	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+	if (!intc) {
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq) {
+		err = -EINVAL;
+		goto out_free;
+	}
+
+	err = irq_set_handler_data(parent_irq, intc);
+	if (err)
+		goto out_unmap_irq;
+
+	intc->num_chips = num_chips;
+	intc->base = of_iomap(node, 0);
+	if (!intc->base) {
+		err = -ENODEV;
+		goto out_unmap_irq;
+	}
+
+	for (i = 0; i < num_chips; i++) {
+		/* Mask all irqs */
+		writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
+		       JZ_REG_INTC_SET_MASK);
+
+		gc = irq_alloc_generic_chip("INTC", 1,
+					    JZ4740_IRQ_BASE + (i * 32),
+					    intc->base + (i * CHIP_SIZE),
+					    handle_level_irq);
+
+		gc->wake_enabled = IRQ_MSK(32);
+
+		ct = gc->chip_types;
+		ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
+		ct->regs.disable = JZ_REG_INTC_SET_MASK;
+		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+		ct->chip.irq_mask = irq_gc_mask_disable_reg;
+		ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+		ct->chip.irq_set_wake = irq_gc_set_wake;
+		ct->chip.irq_suspend = ingenic_intc_irq_suspend;
+		ct->chip.irq_resume = ingenic_intc_irq_resume;
+
+		irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0,
+				       IRQ_NOPROBE | IRQ_LEVEL);
+	}
+
+	domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
+				       &irq_domain_simple_ops, NULL);
+	if (!domain)
+		pr_warn("unable to register IRQ domain\n");
+
+	setup_irq(parent_irq, &intc_cascade_action);
+	return 0;
+
+out_unmap_irq:
+	irq_dispose_mapping(parent_irq);
+out_free:
+	kfree(intc);
+out_err:
+	return err;
+}
+
+static int __init intc_1chip_of_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	return ingenic_intc_of_init(node, 1);
+}
+IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
+
+static int __init intc_2chip_of_init(struct device_node *node,
+	struct device_node *parent)
+{
+	return ingenic_intc_of_init(node, 2);
+}
+IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
+IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
+IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
new file mode 100644
index 0000000..033bccb
--- /dev/null
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -0,0 +1,113 @@
+/*
+ * J-Core SoC AIC driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define JCORE_AIC_MAX_HWIRQ	127
+#define JCORE_AIC1_MIN_HWIRQ	16
+#define JCORE_AIC2_MIN_HWIRQ	64
+
+#define JCORE_AIC1_INTPRI_REG	8
+
+static struct irq_chip jcore_aic;
+
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+	if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+		handle_percpu_irq(desc);
+	else
+		handle_simple_irq(desc);
+}
+
+static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+				   irq_hw_number_t hwirq)
+{
+	struct irq_chip *aic = d->host_data;
+
+	irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops jcore_aic_irqdomain_ops = {
+	.map = jcore_aic_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static void noop(struct irq_data *data)
+{
+}
+
+static int __init aic_irq_of_init(struct device_node *node,
+				  struct device_node *parent)
+{
+	unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
+	unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
+	struct irq_domain *domain;
+
+	pr_info("Initializing J-Core AIC\n");
+
+	/* AIC1 needs priority initialization to receive interrupts. */
+	if (of_device_is_compatible(node, "jcore,aic1")) {
+		unsigned cpu;
+
+		for_each_present_cpu(cpu) {
+			void __iomem *base = of_iomap(node, cpu);
+
+			if (!base) {
+				pr_err("Unable to map AIC for cpu %u\n", cpu);
+				return -ENOMEM;
+			}
+			__raw_writel(0xffffffff, base + JCORE_AIC1_INTPRI_REG);
+			iounmap(base);
+		}
+		min_irq = JCORE_AIC1_MIN_HWIRQ;
+	}
+
+	/*
+	 * The irq chip framework requires either mask/unmask or enable/disable
+	 * function pointers to be provided, but the hardware does not have any
+	 * such mechanism; the only interrupt masking is at the cpu level and
+	 * it affects all interrupts. We provide dummy mask/unmask. The hardware
+	 * handles all interrupt control and clears pending status when the cpu
+	 * accepts the interrupt.
+	 */
+	jcore_aic.irq_mask = noop;
+	jcore_aic.irq_unmask = noop;
+	jcore_aic.name = "AIC";
+
+	domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
+				       &jcore_aic);
+	if (!domain)
+		return -ENOMEM;
+	irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(jcore_aic2, "jcore,aic2", aic_irq_of_init);
+IRQCHIP_DECLARE(jcore_aic1, "jcore,aic1", aic_irq_of_init);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
new file mode 100644
index 0000000..efbcf84
--- /dev/null
+++ b/drivers/irqchip/irq-keystone.c
@@ -0,0 +1,240 @@
+/*
+ * Texas Instruments Keystone IRQ controller IP driver
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc.
+ * Author: Sajesh Kumar Saran <sajesh@ti.com>
+ *	   Grygorii Strashko <grygorii.strashko@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* The source ID bits start from 4 to 31 (total 28 bits)*/
+#define BIT_OFS			4
+#define KEYSTONE_N_IRQ		(32 - BIT_OFS)
+
+struct keystone_irq_device {
+	struct device		*dev;
+	struct irq_chip		 chip;
+	u32			 mask;
+	int			 irq;
+	struct irq_domain	*irqd;
+	struct regmap		*devctrl_regs;
+	u32			devctrl_offset;
+	raw_spinlock_t		wa_lock;
+};
+
+static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
+{
+	int ret;
+	u32 val = 0;
+
+	ret = regmap_read(kirq->devctrl_regs, kirq->devctrl_offset, &val);
+	if (ret < 0)
+		dev_dbg(kirq->dev, "irq read failed ret(%d)\n", ret);
+	return val;
+}
+
+static inline void
+keystone_irq_writel(struct keystone_irq_device *kirq, u32 value)
+{
+	int ret;
+
+	ret = regmap_write(kirq->devctrl_regs, kirq->devctrl_offset, value);
+	if (ret < 0)
+		dev_dbg(kirq->dev, "irq write failed ret(%d)\n", ret);
+}
+
+static void keystone_irq_setmask(struct irq_data *d)
+{
+	struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
+
+	kirq->mask |= BIT(d->hwirq);
+	dev_dbg(kirq->dev, "mask %lu [%x]\n", d->hwirq, kirq->mask);
+}
+
+static void keystone_irq_unmask(struct irq_data *d)
+{
+	struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
+
+	kirq->mask &= ~BIT(d->hwirq);
+	dev_dbg(kirq->dev, "unmask %lu [%x]\n", d->hwirq, kirq->mask);
+}
+
+static void keystone_irq_ack(struct irq_data *d)
+{
+	/* nothing to do here */
+}
+
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
+{
+	struct keystone_irq_device *kirq = keystone_irq;
+	unsigned long wa_lock_flags;
+	unsigned long pending;
+	int src, virq;
+
+	dev_dbg(kirq->dev, "start irq %d\n", irq);
+
+	pending = keystone_irq_readl(kirq);
+	keystone_irq_writel(kirq, pending);
+
+	dev_dbg(kirq->dev, "pending 0x%lx, mask 0x%x\n", pending, kirq->mask);
+
+	pending = (pending >> BIT_OFS) & ~kirq->mask;
+
+	dev_dbg(kirq->dev, "pending after mask 0x%lx\n", pending);
+
+	for (src = 0; src < KEYSTONE_N_IRQ; src++) {
+		if (BIT(src) & pending) {
+			virq = irq_find_mapping(kirq->irqd, src);
+			dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n",
+				src, virq);
+			if (!virq)
+				dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
+					 src, virq);
+			raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
+			generic_handle_irq(virq);
+			raw_spin_unlock_irqrestore(&kirq->wa_lock,
+						   wa_lock_flags);
+		}
+	}
+
+	dev_dbg(kirq->dev, "end irq %d\n", irq);
+	return IRQ_HANDLED;
+}
+
+static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
+				irq_hw_number_t hw)
+{
+	struct keystone_irq_device *kirq = h->host_data;
+
+	irq_set_chip_data(virq, kirq);
+	irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
+	irq_set_probe(virq);
+	return 0;
+}
+
+static const struct irq_domain_ops keystone_irq_ops = {
+	.map	= keystone_irq_map,
+	.xlate	= irq_domain_xlate_onecell,
+};
+
+static int keystone_irq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct keystone_irq_device *kirq;
+	int ret;
+
+	if (np == NULL)
+		return -EINVAL;
+
+	kirq = devm_kzalloc(dev, sizeof(*kirq), GFP_KERNEL);
+	if (!kirq)
+		return -ENOMEM;
+
+	kirq->devctrl_regs =
+		syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+	if (IS_ERR(kirq->devctrl_regs))
+		return PTR_ERR(kirq->devctrl_regs);
+
+	ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
+					 &kirq->devctrl_offset);
+	if (ret) {
+		dev_err(dev, "couldn't read the devctrl_offset offset!\n");
+		return ret;
+	}
+
+	kirq->irq = platform_get_irq(pdev, 0);
+	if (kirq->irq < 0) {
+		dev_err(dev, "no irq resource %d\n", kirq->irq);
+		return kirq->irq;
+	}
+
+	kirq->dev = dev;
+	kirq->mask = ~0x0;
+	kirq->chip.name		= "keystone-irq";
+	kirq->chip.irq_ack	= keystone_irq_ack;
+	kirq->chip.irq_mask	= keystone_irq_setmask;
+	kirq->chip.irq_unmask	= keystone_irq_unmask;
+
+	kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
+					   &keystone_irq_ops, kirq);
+	if (!kirq->irqd) {
+		dev_err(dev, "IRQ domain registration failed\n");
+		return -ENODEV;
+	}
+
+	raw_spin_lock_init(&kirq->wa_lock);
+
+	platform_set_drvdata(pdev, kirq);
+
+	ret = request_irq(kirq->irq, keystone_irq_handler,
+			  0, dev_name(dev), kirq);
+	if (ret) {
+		irq_domain_remove(kirq->irqd);
+		return ret;
+	}
+
+	/* clear all source bits */
+	keystone_irq_writel(kirq, ~0x0);
+
+	dev_info(dev, "irqchip registered, nr_irqs %u\n", KEYSTONE_N_IRQ);
+
+	return 0;
+}
+
+static int keystone_irq_remove(struct platform_device *pdev)
+{
+	struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
+	int hwirq;
+
+	free_irq(kirq->irq, kirq);
+
+	for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
+		irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
+
+	irq_domain_remove(kirq->irqd);
+	return 0;
+}
+
+static const struct of_device_id keystone_irq_dt_ids[] = {
+	{ .compatible = "ti,keystone-irq", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
+
+static struct platform_driver keystone_irq_device_driver = {
+	.probe		= keystone_irq_probe,
+	.remove		= keystone_irq_remove,
+	.driver		= {
+		.name	= "keystone_irq",
+		.of_match_table	= of_match_ptr(keystone_irq_dt_ids),
+	}
+};
+
+module_platform_driver(keystone_irq_device_driver);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_AUTHOR("Sajesh Kumar Saran");
+MODULE_AUTHOR("Grygorii Strashko");
+MODULE_DESCRIPTION("Keystone IRQ chip");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 0000000..a48357d
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/exception.h>
+
+#define LPC32XX_INTC_MASK		0x00
+#define LPC32XX_INTC_RAW		0x04
+#define LPC32XX_INTC_STAT		0x08
+#define LPC32XX_INTC_POL		0x0C
+#define LPC32XX_INTC_TYPE		0x10
+#define LPC32XX_INTC_FIQ		0x14
+
+#define NR_LPC32XX_IC_IRQS		32
+
+struct lpc32xx_irq_chip {
+	void __iomem *base;
+	struct irq_domain *domain;
+	struct irq_chip chip;
+};
+
+static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
+
+static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
+{
+	return readl_relaxed(ic->base + reg);
+}
+
+static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
+				    u32 reg, u32 val)
+{
+	writel_relaxed(val, ic->base + reg);
+}
+
+static void lpc32xx_irq_mask(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_unmask(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_ack(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 mask = BIT(d->hwirq);
+
+	lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
+}
+
+static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+	bool high, edge;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		edge = true;
+		high = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		edge = true;
+		high = false;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		edge = false;
+		high = true;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		edge = false;
+		high = false;
+		break;
+	default:
+		pr_info("unsupported irq type %d\n", type);
+		return -EINVAL;
+	}
+
+	irqd_set_trigger_type(d, type);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
+	if (high)
+		val |= mask;
+	else
+		val &= ~mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
+	if (edge) {
+		val |= mask;
+		irq_set_handler_locked(d, handle_edge_irq);
+	} else {
+		val &= ~mask;
+		irq_set_handler_locked(d, handle_level_irq);
+	}
+	lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
+
+	return 0;
+}
+
+static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
+{
+	struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
+	u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+	while (hwirq) {
+		irq = __ffs(hwirq);
+		hwirq &= ~BIT(irq);
+		handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+	}
+}
+
+static void lpc32xx_sic_handler(struct irq_desc *desc)
+{
+	struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+	chained_irq_enter(chip, desc);
+
+	while (hwirq) {
+		irq = __ffs(hwirq);
+		hwirq &= ~BIT(irq);
+		generic_handle_irq(irq_find_mapping(ic->domain, irq));
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
+				  irq_hw_number_t hw)
+{
+	struct lpc32xx_irq_chip *ic = id->host_data;
+
+	irq_set_chip_data(virq, ic);
+	irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_noprobe(virq);
+
+	return 0;
+}
+
+static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
+{
+	irq_set_chip_and_handler(virq, NULL, NULL);
+}
+
+static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
+	.map    = lpc32xx_irq_domain_map,
+	.unmap	= lpc32xx_irq_domain_unmap,
+	.xlate  = irq_domain_xlate_twocell,
+};
+
+static int __init lpc32xx_of_ic_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	struct lpc32xx_irq_chip *irqc;
+	bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
+	const __be32 *reg = of_get_property(node, "reg", NULL);
+	u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
+
+	irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+	if (!irqc)
+		return -ENOMEM;
+
+	irqc->base = of_iomap(node, 0);
+	if (!irqc->base) {
+		pr_err("%pOF: unable to map registers\n", node);
+		kfree(irqc);
+		return -EINVAL;
+	}
+
+	irqc->chip.irq_ack = lpc32xx_irq_ack;
+	irqc->chip.irq_mask = lpc32xx_irq_mask;
+	irqc->chip.irq_unmask = lpc32xx_irq_unmask;
+	irqc->chip.irq_set_type = lpc32xx_irq_set_type;
+	if (is_mic)
+		irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
+	else
+		irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
+
+	irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
+					     &lpc32xx_irq_domain_ops, irqc);
+	if (!irqc->domain) {
+		pr_err("unable to add irq domain\n");
+		iounmap(irqc->base);
+		kfree(irqc->chip.name);
+		kfree(irqc);
+		return -ENODEV;
+	}
+
+	if (is_mic) {
+		lpc32xx_mic_irqc = irqc;
+		set_handle_irq(lpc32xx_handle_irq);
+	} else {
+		for (i = 0; i < of_irq_count(node); i++) {
+			parent_irq = irq_of_parse_and_map(node, i);
+			if (parent_irq)
+				irq_set_chained_handler_and_data(parent_irq,
+						 lpc32xx_sic_handler, irqc);
+		}
+	}
+
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_POL,  0x00);
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
+IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 0000000..c671b32
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,436 @@
+/*
+ * Freescale SCFG MSI(-X) support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/dma-iommu.h>
+
+#define MSI_IRQS_PER_MSIR	32
+#define MSI_MSIR_OFFSET		4
+
+#define MSI_LS1043V1_1_IRQS_PER_MSIR	8
+#define MSI_LS1043V1_1_MSIR_OFFSET	0x10
+
+struct ls_scfg_msi_cfg {
+	u32 ibs_shift; /* Shift of interrupt bit select */
+	u32 msir_irqs; /* The irq number per MSIR */
+	u32 msir_base; /* The base address of MSIR */
+};
+
+struct ls_scfg_msir {
+	struct ls_scfg_msi *msi_data;
+	unsigned int index;
+	unsigned int gic_irq;
+	unsigned int bit_start;
+	unsigned int bit_end;
+	unsigned int srs; /* Shared interrupt register select */
+	void __iomem *reg;
+};
+
+struct ls_scfg_msi {
+	spinlock_t		lock;
+	struct platform_device	*pdev;
+	struct irq_domain	*parent;
+	struct irq_domain	*msi_domain;
+	void __iomem		*regs;
+	phys_addr_t		msiir_addr;
+	struct ls_scfg_msi_cfg	*cfg;
+	u32			msir_num;
+	struct ls_scfg_msir	*msir;
+	u32			irqs_num;
+	unsigned long		*used;
+};
+
+static struct irq_chip ls_scfg_msi_irq_chip = {
+	.name = "MSI",
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info ls_scfg_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS |
+		   MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX),
+	.chip	= &ls_scfg_msi_irq_chip,
+};
+
+static int msi_affinity_flag = 1;
+
+static int __init early_parse_ls_scfg_msi(char *p)
+{
+	if (p && strncmp(p, "no-affinity", 11) == 0)
+		msi_affinity_flag = 0;
+	else
+		msi_affinity_flag = 1;
+
+	return 0;
+}
+early_param("lsmsi", early_parse_ls_scfg_msi);
+
+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+	msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+	msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+	msg->data = data->hwirq;
+
+	if (msi_affinity_flag) {
+		const struct cpumask *mask;
+
+		mask = irq_data_get_effective_affinity_mask(data);
+		msg->data |= cpumask_first(mask);
+	}
+
+	iommu_dma_map_msi_msg(data->irq, msg);
+}
+
+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+				    const struct cpumask *mask, bool force)
+{
+	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
+	u32 cpu;
+
+	if (!msi_affinity_flag)
+		return -EINVAL;
+
+	if (!force)
+		cpu = cpumask_any_and(mask, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask);
+
+	if (cpu >= msi_data->msir_num)
+		return -EINVAL;
+
+	if (msi_data->msir[cpu].gic_irq <= 0) {
+		pr_warn("cannot bind the irq to cpu%d\n", cpu);
+		return -EINVAL;
+	}
+
+	irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ls_scfg_msi_parent_chip = {
+	.name			= "SCFG",
+	.irq_compose_msi_msg	= ls_scfg_msi_compose_msg,
+	.irq_set_affinity	= ls_scfg_msi_set_affinity,
+};
+
+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+					unsigned int virq,
+					unsigned int nr_irqs,
+					void *args)
+{
+	struct ls_scfg_msi *msi_data = domain->host_data;
+	int pos, err = 0;
+
+	WARN_ON(nr_irqs != 1);
+
+	spin_lock(&msi_data->lock);
+	pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
+	if (pos < msi_data->irqs_num)
+		__set_bit(pos, msi_data->used);
+	else
+		err = -ENOSPC;
+	spin_unlock(&msi_data->lock);
+
+	if (err)
+		return err;
+
+	irq_domain_set_info(domain, virq, pos,
+			    &ls_scfg_msi_parent_chip, msi_data,
+			    handle_simple_irq, NULL, NULL);
+
+	return 0;
+}
+
+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
+	int pos;
+
+	pos = d->hwirq;
+	if (pos < 0 || pos >= msi_data->irqs_num) {
+		pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+		return;
+	}
+
+	spin_lock(&msi_data->lock);
+	__clear_bit(pos, msi_data->used);
+	spin_unlock(&msi_data->lock);
+}
+
+static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+	.alloc	= ls_scfg_msi_domain_irq_alloc,
+	.free	= ls_scfg_msi_domain_irq_free,
+};
+
+static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+{
+	struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
+	struct ls_scfg_msi *msi_data = msir->msi_data;
+	unsigned long val;
+	int pos, size, virq, hwirq;
+
+	chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+	val = ioread32be(msir->reg);
+
+	pos = msir->bit_start;
+	size = msir->bit_end + 1;
+
+	for_each_set_bit_from(pos, &val, size) {
+		hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
+			msir->srs;
+		virq = irq_find_mapping(msi_data->parent, hwirq);
+		if (virq)
+			generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+{
+	/* Initialize MSI domain parent */
+	msi_data->parent = irq_domain_add_linear(NULL,
+						 msi_data->irqs_num,
+						 &ls_scfg_msi_domain_ops,
+						 msi_data);
+	if (!msi_data->parent) {
+		dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi_data->msi_domain = pci_msi_create_irq_domain(
+				of_node_to_fwnode(msi_data->pdev->dev.of_node),
+				&ls_scfg_msi_domain_info,
+				msi_data->parent);
+	if (!msi_data->msi_domain) {
+		dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi_data->parent);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
+{
+	struct ls_scfg_msir *msir;
+	int virq, i, hwirq;
+
+	virq = platform_get_irq(msi_data->pdev, index);
+	if (virq <= 0)
+		return -ENODEV;
+
+	msir = &msi_data->msir[index];
+	msir->index = index;
+	msir->msi_data = msi_data;
+	msir->gic_irq = virq;
+	msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
+
+	if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
+		msir->bit_start = 32 - ((msir->index + 1) *
+				  MSI_LS1043V1_1_IRQS_PER_MSIR);
+		msir->bit_end = msir->bit_start +
+				MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
+	} else {
+		msir->bit_start = 0;
+		msir->bit_end = msi_data->cfg->msir_irqs - 1;
+	}
+
+	irq_set_chained_handler_and_data(msir->gic_irq,
+					 ls_scfg_msi_irq_handler,
+					 msir);
+
+	if (msi_affinity_flag) {
+		/* Associate MSIR interrupt to the cpu */
+		irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
+		msir->srs = 0; /* This value is determined by the CPU */
+	} else
+		msir->srs = index;
+
+	/* Release the hwirqs corresponding to this MSIR */
+	if (!msi_affinity_flag || msir->index == 0) {
+		for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+			hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+			bitmap_clear(msi_data->used, hwirq, 1);
+		}
+	}
+
+	return 0;
+}
+
+static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
+{
+	struct ls_scfg_msi *msi_data = msir->msi_data;
+	int i, hwirq;
+
+	if (msir->gic_irq > 0)
+		irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
+
+	for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+		hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+		bitmap_set(msi_data->used, hwirq, 1);
+	}
+
+	return 0;
+}
+
+static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
+	.ibs_shift = 3,
+	.msir_irqs = MSI_IRQS_PER_MSIR,
+	.msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
+	.ibs_shift = 2,
+	.msir_irqs = MSI_IRQS_PER_MSIR,
+	.msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
+	.ibs_shift = 2,
+	.msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
+	.msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
+};
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+	/* The following two misspelled compatibles are obsolete */
+	{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
+	{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
+
+	{ .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
+	{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
+	{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
+	{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
+	{ .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
+
+static int ls_scfg_msi_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct ls_scfg_msi *msi_data;
+	struct resource *res;
+	int i, ret;
+
+	match = of_match_device(ls_scfg_msi_id, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+	if (!msi_data)
+		return -ENOMEM;
+
+	msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(msi_data->regs)) {
+		dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+		return PTR_ERR(msi_data->regs);
+	}
+	msi_data->msiir_addr = res->start;
+
+	msi_data->pdev = pdev;
+	spin_lock_init(&msi_data->lock);
+
+	msi_data->irqs_num = MSI_IRQS_PER_MSIR *
+			     (1 << msi_data->cfg->ibs_shift);
+	msi_data->used = devm_kcalloc(&pdev->dev,
+				    BITS_TO_LONGS(msi_data->irqs_num),
+				    sizeof(*msi_data->used),
+				    GFP_KERNEL);
+	if (!msi_data->used)
+		return -ENOMEM;
+	/*
+	 * Reserve all the hwirqs
+	 * The available hwirqs will be released in ls1_msi_setup_hwirq()
+	 */
+	bitmap_set(msi_data->used, 0, msi_data->irqs_num);
+
+	msi_data->msir_num = of_irq_count(pdev->dev.of_node);
+
+	if (msi_affinity_flag) {
+		u32 cpu_num;
+
+		cpu_num = num_possible_cpus();
+		if (msi_data->msir_num >= cpu_num)
+			msi_data->msir_num = cpu_num;
+		else
+			msi_affinity_flag = 0;
+	}
+
+	msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
+				      sizeof(*msi_data->msir),
+				      GFP_KERNEL);
+	if (!msi_data->msir)
+		return -ENOMEM;
+
+	for (i = 0; i < msi_data->msir_num; i++)
+		ls_scfg_msi_setup_hwirq(msi_data, i);
+
+	ret = ls_scfg_msi_domains_init(msi_data);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, msi_data);
+
+	return 0;
+}
+
+static int ls_scfg_msi_remove(struct platform_device *pdev)
+{
+	struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < msi_data->msir_num; i++)
+		ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
+
+	irq_domain_remove(msi_data->msi_domain);
+	irq_domain_remove(msi_data->parent);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver ls_scfg_msi_driver = {
+	.driver = {
+		.name = "ls-scfg-msi",
+		.of_match_table = ls_scfg_msi_id,
+	},
+	.probe = ls_scfg_msi_probe,
+	.remove = ls_scfg_msi_remove,
+};
+
+module_platform_driver(ls_scfg_msi_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
new file mode 100644
index 0000000..567b29c
--- /dev/null
+++ b/drivers/irqchip/irq-mbigen.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved.
+ * Author: Jun Ma <majun258@huawei.com>
+ * Author: Yun Wu <wuyun.wu@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Interrupt numbers per mbigen node supported */
+#define IRQS_PER_MBIGEN_NODE		128
+
+/* 64 irqs (Pin0-pin63) are reserved for each mbigen chip */
+#define RESERVED_IRQ_PER_MBIGEN_CHIP	64
+
+/* The maximum IRQ pin number of mbigen chip(start from 0) */
+#define MAXIMUM_IRQ_PIN_NUM		1407
+
+/**
+ * In mbigen vector register
+ * bit[21:12]:	event id value
+ * bit[11:0]:	device id
+ */
+#define IRQ_EVENT_ID_SHIFT		12
+#define IRQ_EVENT_ID_MASK		0x3ff
+
+/* register range of each mbigen node */
+#define MBIGEN_NODE_OFFSET		0x1000
+
+/* offset of vector register in mbigen node */
+#define REG_MBIGEN_VEC_OFFSET		0x200
+
+/**
+ * offset of clear register in mbigen node
+ * This register is used to clear the status
+ * of interrupt
+ */
+#define REG_MBIGEN_CLEAR_OFFSET		0xa000
+
+/**
+ * offset of interrupt type register
+ * This register is used to configure interrupt
+ * trigger type
+ */
+#define REG_MBIGEN_TYPE_OFFSET		0x0
+
+/**
+ * struct mbigen_device - holds the information of mbigen device.
+ *
+ * @pdev:		pointer to the platform device structure of mbigen chip.
+ * @base:		mapped address of this mbigen chip.
+ */
+struct mbigen_device {
+	struct platform_device	*pdev;
+	void __iomem		*base;
+};
+
+static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+{
+	unsigned int nid, pin;
+
+	hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
+	nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+	pin = hwirq % IRQS_PER_MBIGEN_NODE;
+
+	return pin * 4 + nid * MBIGEN_NODE_OFFSET
+			+ REG_MBIGEN_VEC_OFFSET;
+}
+
+static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+					u32 *mask, u32 *addr)
+{
+	unsigned int nid, irq_ofst, ofst;
+
+	hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
+	nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+	irq_ofst = hwirq % IRQS_PER_MBIGEN_NODE;
+
+	*mask = 1 << (irq_ofst % 32);
+	ofst = irq_ofst / 32 * 4;
+
+	*addr = ofst + nid * MBIGEN_NODE_OFFSET
+		+ REG_MBIGEN_TYPE_OFFSET;
+}
+
+static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
+					u32 *mask, u32 *addr)
+{
+	unsigned int ofst = (hwirq / 32) * 4;
+
+	*mask = 1 << (hwirq % 32);
+	*addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
+}
+
+static void mbigen_eoi_irq(struct irq_data *data)
+{
+	void __iomem *base = data->chip_data;
+	u32 mask, addr;
+
+	get_mbigen_clear_reg(data->hwirq, &mask, &addr);
+
+	writel_relaxed(mask, base + addr);
+
+	irq_chip_eoi_parent(data);
+}
+
+static int mbigen_set_type(struct irq_data *data, unsigned int type)
+{
+	void __iomem *base = data->chip_data;
+	u32 mask, addr, val;
+
+	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	get_mbigen_type_reg(data->hwirq, &mask, &addr);
+
+	val = readl_relaxed(base + addr);
+
+	if (type == IRQ_TYPE_LEVEL_HIGH)
+		val |= mask;
+	else
+		val &= ~mask;
+
+	writel_relaxed(val, base + addr);
+
+	return 0;
+}
+
+static struct irq_chip mbigen_irq_chip = {
+	.name =			"mbigen-v2",
+	.irq_mask =		irq_chip_mask_parent,
+	.irq_unmask =		irq_chip_unmask_parent,
+	.irq_eoi =		mbigen_eoi_irq,
+	.irq_set_type =		mbigen_set_type,
+	.irq_set_affinity =	irq_chip_set_affinity_parent,
+};
+
+static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	struct irq_data *d = irq_get_irq_data(desc->irq);
+	void __iomem *base = d->chip_data;
+	u32 val;
+
+	base += get_mbigen_vec_reg(d->hwirq);
+	val = readl_relaxed(base);
+
+	val &= ~(IRQ_EVENT_ID_MASK << IRQ_EVENT_ID_SHIFT);
+	val |= (msg->data << IRQ_EVENT_ID_SHIFT);
+
+	/* The address of doorbell is encoded in mbigen register by default
+	 * So,we don't need to program the doorbell address at here
+	 */
+	writel_relaxed(val, base);
+}
+
+static int mbigen_domain_translate(struct irq_domain *d,
+				    struct irq_fwspec *fwspec,
+				    unsigned long *hwirq,
+				    unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 2)
+			return -EINVAL;
+
+		if ((fwspec->param[0] > MAXIMUM_IRQ_PIN_NUM) ||
+			(fwspec->param[0] < RESERVED_IRQ_PER_MBIGEN_CHIP))
+			return -EINVAL;
+		else
+			*hwirq = fwspec->param[0];
+
+		/* If there is no valid irq type, just use the default type */
+		if ((fwspec->param[1] == IRQ_TYPE_EDGE_RISING) ||
+			(fwspec->param[1] == IRQ_TYPE_LEVEL_HIGH))
+			*type = fwspec->param[1];
+		else
+			return -EINVAL;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int mbigen_irq_domain_alloc(struct irq_domain *domain,
+					unsigned int virq,
+					unsigned int nr_irqs,
+					void *args)
+{
+	struct irq_fwspec *fwspec = args;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	struct mbigen_device *mgn_chip;
+	int i, err;
+
+	err = mbigen_domain_translate(domain, fwspec, &hwirq, &type);
+	if (err)
+		return err;
+
+	err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+	if (err)
+		return err;
+
+	mgn_chip = platform_msi_get_host_data(domain);
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+				      &mbigen_irq_chip, mgn_chip->base);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mbigen_domain_ops = {
+	.translate	= mbigen_domain_translate,
+	.alloc		= mbigen_irq_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int mbigen_of_create_domain(struct platform_device *pdev,
+				   struct mbigen_device *mgn_chip)
+{
+	struct device *parent;
+	struct platform_device *child;
+	struct irq_domain *domain;
+	struct device_node *np;
+	u32 num_pins;
+
+	for_each_child_of_node(pdev->dev.of_node, np) {
+		if (!of_property_read_bool(np, "interrupt-controller"))
+			continue;
+
+		parent = platform_bus_type.dev_root;
+		child = of_platform_device_create(np, NULL, parent);
+		if (!child)
+			return -ENOMEM;
+
+		if (of_property_read_u32(child->dev.of_node, "num-pins",
+					 &num_pins) < 0) {
+			dev_err(&pdev->dev, "No num-pins property\n");
+			return -EINVAL;
+		}
+
+		domain = platform_msi_create_device_domain(&child->dev, num_pins,
+							   mbigen_write_msg,
+							   &mbigen_domain_ops,
+							   mgn_chip);
+		if (!domain)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int mbigen_acpi_create_domain(struct platform_device *pdev,
+				     struct mbigen_device *mgn_chip)
+{
+	struct irq_domain *domain;
+	u32 num_pins = 0;
+	int ret;
+
+	/*
+	 * "num-pins" is the total number of interrupt pins implemented in
+	 * this mbigen instance, and mbigen is an interrupt controller
+	 * connected to ITS  converting wired interrupts into MSI, so we
+	 * use "num-pins" to alloc MSI vectors which are needed by client
+	 * devices connected to it.
+	 *
+	 * Here is the DSDT device node used for mbigen in firmware:
+	 *	Device(MBI0) {
+	 *		Name(_HID, "HISI0152")
+	 *		Name(_UID, Zero)
+	 *		Name(_CRS, ResourceTemplate() {
+	 *			Memory32Fixed(ReadWrite, 0xa0080000, 0x10000)
+	 *		})
+	 *
+	 *		Name(_DSD, Package () {
+	 *			ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+	 *			Package () {
+	 *				Package () {"num-pins", 378}
+	 *			}
+	 *		})
+	 *	}
+	 */
+	ret = device_property_read_u32(&pdev->dev, "num-pins", &num_pins);
+	if (ret || num_pins == 0)
+		return -EINVAL;
+
+	domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
+						   mbigen_write_msg,
+						   &mbigen_domain_ops,
+						   mgn_chip);
+	if (!domain)
+		return -ENOMEM;
+
+	return 0;
+}
+#else
+static inline int mbigen_acpi_create_domain(struct platform_device *pdev,
+					    struct mbigen_device *mgn_chip)
+{
+	return -ENODEV;
+}
+#endif
+
+static int mbigen_device_probe(struct platform_device *pdev)
+{
+	struct mbigen_device *mgn_chip;
+	struct resource *res;
+	int err;
+
+	mgn_chip = devm_kzalloc(&pdev->dev, sizeof(*mgn_chip), GFP_KERNEL);
+	if (!mgn_chip)
+		return -ENOMEM;
+
+	mgn_chip->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	mgn_chip->base = devm_ioremap(&pdev->dev, res->start,
+				      resource_size(res));
+	if (!mgn_chip->base) {
+		dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+		return -ENOMEM;
+	}
+
+	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
+		err = mbigen_of_create_domain(pdev, mgn_chip);
+	else if (ACPI_COMPANION(&pdev->dev))
+		err = mbigen_acpi_create_domain(pdev, mgn_chip);
+	else
+		err = -EINVAL;
+
+	if (err) {
+		dev_err(&pdev->dev, "Failed to create mbi-gen@%p irqdomain",
+			mgn_chip->base);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, mgn_chip);
+	return 0;
+}
+
+static const struct of_device_id mbigen_of_match[] = {
+	{ .compatible = "hisilicon,mbigen-v2" },
+	{ /* END */ }
+};
+MODULE_DEVICE_TABLE(of, mbigen_of_match);
+
+static const struct acpi_device_id mbigen_acpi_match[] = {
+	{ "HISI0152", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
+
+static struct platform_driver mbigen_platform_driver = {
+	.driver = {
+		.name		= "Hisilicon MBIGEN-V2",
+		.of_match_table	= mbigen_of_match,
+		.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+	},
+	.probe			= mbigen_device_probe,
+};
+
+module_platform_driver(mbigen_platform_driver);
+
+MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
+MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon MBI Generator driver");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 0000000..7b531fd
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define NUM_CHANNEL 8
+#define MAX_INPUT_MUX 256
+
+#define REG_EDGE_POL	0x00
+#define REG_PIN_03_SEL	0x04
+#define REG_PIN_47_SEL	0x08
+#define REG_FILTER_SEL	0x0c
+
+#define REG_EDGE_POL_MASK(x)	(BIT(x) | BIT(16 + (x)))
+#define REG_EDGE_POL_EDGE(x)	BIT(x)
+#define REG_EDGE_POL_LOW(x)	BIT(16 + (x))
+#define REG_PIN_SEL_SHIFT(x)	(((x) % 4) * 8)
+#define REG_FILTER_SEL_SHIFT(x)	((x) * 4)
+
+struct meson_gpio_irq_params {
+	unsigned int nr_hwirq;
+};
+
+static const struct meson_gpio_irq_params meson8_params = {
+	.nr_hwirq = 134,
+};
+
+static const struct meson_gpio_irq_params meson8b_params = {
+	.nr_hwirq = 119,
+};
+
+static const struct meson_gpio_irq_params gxbb_params = {
+	.nr_hwirq = 133,
+};
+
+static const struct meson_gpio_irq_params gxl_params = {
+	.nr_hwirq = 110,
+};
+
+static const struct meson_gpio_irq_params axg_params = {
+	.nr_hwirq = 100,
+};
+
+static const struct of_device_id meson_irq_gpio_matches[] = {
+	{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+	{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+	{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+	{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+	{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+	{ }
+};
+
+struct meson_gpio_irq_controller {
+	unsigned int nr_hwirq;
+	void __iomem *base;
+	u32 channel_irqs[NUM_CHANNEL];
+	DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+	spinlock_t lock;
+};
+
+static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+				       unsigned int reg, u32 mask, u32 val)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(ctl->base + reg);
+	tmp &= ~mask;
+	tmp |= val;
+	writel_relaxed(tmp, ctl->base + reg);
+}
+
+static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+{
+	return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+}
+
+static int
+meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+			       unsigned long  hwirq,
+			       u32 **channel_hwirq)
+{
+	unsigned int reg, idx;
+
+	spin_lock(&ctl->lock);
+
+	/* Find a free channel */
+	idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
+	if (idx >= NUM_CHANNEL) {
+		spin_unlock(&ctl->lock);
+		pr_err("No channel available\n");
+		return -ENOSPC;
+	}
+
+	/* Mark the channel as used */
+	set_bit(idx, ctl->channel_map);
+
+	/*
+	 * Setup the mux of the channel to route the signal of the pad
+	 * to the appropriate input of the GIC
+	 */
+	reg = meson_gpio_irq_channel_to_reg(idx);
+	meson_gpio_irq_update_bits(ctl, reg,
+				   0xff << REG_PIN_SEL_SHIFT(idx),
+				   hwirq << REG_PIN_SEL_SHIFT(idx));
+
+	/*
+	 * Get the hwirq number assigned to this channel through
+	 * a pointer the channel_irq table. The added benifit of this
+	 * method is that we can also retrieve the channel index with
+	 * it, using the table base.
+	 */
+	*channel_hwirq = &(ctl->channel_irqs[idx]);
+
+	spin_unlock(&ctl->lock);
+
+	pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+		 hwirq, idx, **channel_hwirq);
+
+	return 0;
+}
+
+static unsigned int
+meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
+			       u32 *channel_hwirq)
+{
+	return channel_hwirq - ctl->channel_irqs;
+}
+
+static void
+meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
+			       u32 *channel_hwirq)
+{
+	unsigned int idx;
+
+	idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+	clear_bit(idx, ctl->channel_map);
+}
+
+static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
+				     unsigned int type,
+				     u32 *channel_hwirq)
+{
+	u32 val = 0;
+	unsigned int idx;
+
+	idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+	/*
+	 * The controller has a filter block to operate in either LEVEL or
+	 * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
+	 * EDGE_FALLING support (which the GIC does not support), the filter
+	 * block is also able to invert the input signal it gets before
+	 * providing it to the GIC.
+	 */
+	type &= IRQ_TYPE_SENSE_MASK;
+
+	if (type == IRQ_TYPE_EDGE_BOTH)
+		return -EINVAL;
+
+	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+		val |= REG_EDGE_POL_EDGE(idx);
+
+	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+		val |= REG_EDGE_POL_LOW(idx);
+
+	spin_lock(&ctl->lock);
+
+	meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+				   REG_EDGE_POL_MASK(idx), val);
+
+	spin_unlock(&ctl->lock);
+
+	return 0;
+}
+
+static unsigned int meson_gpio_irq_type_output(unsigned int type)
+{
+	unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
+
+	type &= ~IRQ_TYPE_SENSE_MASK;
+
+	/*
+	 * The polarity of the signal provided to the GIC should always
+	 * be high.
+	 */
+	if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+		type |= IRQ_TYPE_LEVEL_HIGH;
+	else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+		type |= IRQ_TYPE_EDGE_RISING;
+
+	return type;
+}
+
+static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	struct meson_gpio_irq_controller *ctl = data->domain->host_data;
+	u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
+	int ret;
+
+	ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+	if (ret)
+		return ret;
+
+	return irq_chip_set_type_parent(data,
+					meson_gpio_irq_type_output(type));
+}
+
+static struct irq_chip meson_gpio_irq_chip = {
+	.name			= "meson-gpio-irqchip",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_type		= meson_gpio_irq_set_type,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+	.flags			= IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
+					   struct irq_fwspec *fwspec,
+					   unsigned long *hwirq,
+					   unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+		*hwirq	= fwspec->param[0];
+		*type	= fwspec->param[1];
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
+					   unsigned int virq,
+					   u32 hwirq,
+					   unsigned int type)
+{
+	struct irq_fwspec fwspec;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = 0;	/* SPI */
+	fwspec.param[1] = hwirq;
+	fwspec.param[2] = meson_gpio_irq_type_output(type);
+
+	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
+				       unsigned int virq,
+				       unsigned int nr_irqs,
+				       void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct meson_gpio_irq_controller *ctl = domain->host_data;
+	unsigned long hwirq;
+	u32 *channel_hwirq;
+	unsigned int type;
+	int ret;
+
+	if (WARN_ON(nr_irqs != 1))
+		return -EINVAL;
+
+	ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
+	if (ret)
+		return ret;
+
+	ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
+					      *channel_hwirq, type);
+	if (ret < 0) {
+		pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
+		meson_gpio_irq_release_channel(ctl, channel_hwirq);
+		return ret;
+	}
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				      &meson_gpio_irq_chip, channel_hwirq);
+
+	return 0;
+}
+
+static void meson_gpio_irq_domain_free(struct irq_domain *domain,
+				       unsigned int virq,
+				       unsigned int nr_irqs)
+{
+	struct meson_gpio_irq_controller *ctl = domain->host_data;
+	struct irq_data *irq_data;
+	u32 *channel_hwirq;
+
+	if (WARN_ON(nr_irqs != 1))
+		return;
+
+	irq_domain_free_irqs_parent(domain, virq, 1);
+
+	irq_data = irq_domain_get_irq_data(domain, virq);
+	channel_hwirq = irq_data_get_irq_chip_data(irq_data);
+
+	meson_gpio_irq_release_channel(ctl, channel_hwirq);
+}
+
+static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
+	.alloc		= meson_gpio_irq_domain_alloc,
+	.free		= meson_gpio_irq_domain_free,
+	.translate	= meson_gpio_irq_domain_translate,
+};
+
+static int __init meson_gpio_irq_parse_dt(struct device_node *node,
+					  struct meson_gpio_irq_controller *ctl)
+{
+	const struct of_device_id *match;
+	const struct meson_gpio_irq_params *params;
+	int ret;
+
+	match = of_match_node(meson_irq_gpio_matches, node);
+	if (!match)
+		return -ENODEV;
+
+	params = match->data;
+	ctl->nr_hwirq = params->nr_hwirq;
+
+	ret = of_property_read_variable_u32_array(node,
+						  "amlogic,channel-interrupts",
+						  ctl->channel_irqs,
+						  NUM_CHANNEL,
+						  NUM_CHANNEL);
+	if (ret < 0) {
+		pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init meson_gpio_irq_of_init(struct device_node *node,
+					 struct device_node *parent)
+{
+	struct irq_domain *domain, *parent_domain;
+	struct meson_gpio_irq_controller *ctl;
+	int ret;
+
+	if (!parent) {
+		pr_err("missing parent interrupt node\n");
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("unable to obtain parent domain\n");
+		return -ENXIO;
+	}
+
+	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+	if (!ctl)
+		return -ENOMEM;
+
+	spin_lock_init(&ctl->lock);
+
+	ctl->base = of_iomap(node, 0);
+	if (!ctl->base) {
+		ret = -ENOMEM;
+		goto free_ctl;
+	}
+
+	ret = meson_gpio_irq_parse_dt(node, ctl);
+	if (ret)
+		goto free_channel_irqs;
+
+	domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
+					     of_node_to_fwnode(node),
+					     &meson_gpio_irq_domain_ops,
+					     ctl);
+	if (!domain) {
+		pr_err("failed to add domain\n");
+		ret = -ENODEV;
+		goto free_channel_irqs;
+	}
+
+	pr_info("%d to %d gpio interrupt mux initialized\n",
+		ctl->nr_hwirq, NUM_CHANNEL);
+
+	return 0;
+
+free_channel_irqs:
+	iounmap(ctl->base);
+free_ctl:
+	kfree(ctl);
+
+	return ret;
+}
+
+IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
+		meson_gpio_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
new file mode 100644
index 0000000..66f97fd
--- /dev/null
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2005  MIPS Technologies, Inc.	All rights reserved.
+ *	Author: Maciej W. Rozycki <macro@mips.com>
+ *
+ * This file define the irq handler for MIPS CPU interrupts.
+ *
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * Almost all MIPS CPUs define 8 interrupt sources.  They are typically
+ * level triggered (i.e., cannot be cleared from CPU; must be cleared from
+ * device).
+ *
+ * The first two are software interrupts (i.e. not exposed as pins) which
+ * may be used for IPIs in multi-threaded single-core systems.
+ *
+ * The last one is usually the CPU timer interrupt if the counter register
+ * is present, or for old CPUs with an external FPU by convention it's the
+ * FPU exception interrupt.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/setup.h>
+
+static struct irq_domain *irq_domain;
+static struct irq_domain *ipi_domain;
+
+static inline void unmask_mips_irq(struct irq_data *d)
+{
+	set_c0_status(IE_SW0 << d->hwirq);
+	irq_enable_hazard();
+}
+
+static inline void mask_mips_irq(struct irq_data *d)
+{
+	clear_c0_status(IE_SW0 << d->hwirq);
+	irq_disable_hazard();
+}
+
+static struct irq_chip mips_cpu_irq_controller = {
+	.name		= "MIPS",
+	.irq_ack	= mask_mips_irq,
+	.irq_mask	= mask_mips_irq,
+	.irq_mask_ack	= mask_mips_irq,
+	.irq_unmask	= unmask_mips_irq,
+	.irq_eoi	= unmask_mips_irq,
+	.irq_disable	= mask_mips_irq,
+	.irq_enable	= unmask_mips_irq,
+};
+
+/*
+ * Basically the same as above but taking care of all the MT stuff
+ */
+
+static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
+{
+	unsigned int vpflags = dvpe();
+
+	clear_c0_cause(C_SW0 << d->hwirq);
+	evpe(vpflags);
+	unmask_mips_irq(d);
+	return 0;
+}
+
+/*
+ * While we ack the interrupt interrupts are disabled and thus we don't need
+ * to deal with concurrency issues.  Same for mips_cpu_irq_end.
+ */
+static void mips_mt_cpu_irq_ack(struct irq_data *d)
+{
+	unsigned int vpflags = dvpe();
+	clear_c0_cause(C_SW0 << d->hwirq);
+	evpe(vpflags);
+	mask_mips_irq(d);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
+static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned long flags;
+	int vpflags;
+
+	local_irq_save(flags);
+
+	/* We can only send IPIs to VPEs within the local core */
+	WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
+
+	vpflags = dvpe();
+	settc(cpu_vpe_id(&cpu_data[cpu]));
+	write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
+	evpe(vpflags);
+
+	local_irq_restore(flags);
+}
+
+#endif /* CONFIG_GENERIC_IRQ_IPI */
+
+static struct irq_chip mips_mt_cpu_irq_controller = {
+	.name		= "MIPS",
+	.irq_startup	= mips_mt_cpu_irq_startup,
+	.irq_ack	= mips_mt_cpu_irq_ack,
+	.irq_mask	= mask_mips_irq,
+	.irq_mask_ack	= mips_mt_cpu_irq_ack,
+	.irq_unmask	= unmask_mips_irq,
+	.irq_eoi	= unmask_mips_irq,
+	.irq_disable	= mask_mips_irq,
+	.irq_enable	= unmask_mips_irq,
+#ifdef CONFIG_GENERIC_IRQ_IPI
+	.ipi_send_single = mips_mt_send_ipi,
+#endif
+};
+
+asmlinkage void __weak plat_irq_dispatch(void)
+{
+	unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
+	unsigned int virq;
+	int irq;
+
+	if (!pending) {
+		spurious_interrupt();
+		return;
+	}
+
+	pending >>= CAUSEB_IP;
+	while (pending) {
+		irq = fls(pending) - 1;
+		if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
+			virq = irq_linear_revmap(ipi_domain, irq);
+		else
+			virq = irq_linear_revmap(irq_domain, irq);
+		do_IRQ(virq);
+		pending &= ~BIT(irq);
+	}
+}
+
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+			     irq_hw_number_t hw)
+{
+	struct irq_chip *chip;
+
+	if (hw < 2 && cpu_has_mipsmt) {
+		/* Software interrupts are used for MT/CMT IPI */
+		chip = &mips_mt_cpu_irq_controller;
+	} else {
+		chip = &mips_cpu_irq_controller;
+	}
+
+	if (cpu_has_vint)
+		set_vi_handler(hw, plat_irq_dispatch);
+
+	irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+	.map = mips_cpu_intc_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
+struct cpu_ipi_domain_state {
+	DECLARE_BITMAP(allocated, 2);
+};
+
+static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
+			      unsigned int nr_irqs, void *arg)
+{
+	struct cpu_ipi_domain_state *state = domain->host_data;
+	unsigned int i, hwirq;
+	int ret;
+
+	for (i = 0; i < nr_irqs; i++) {
+		hwirq = find_first_zero_bit(state->allocated, 2);
+		if (hwirq == 2)
+			return -EBUSY;
+		bitmap_set(state->allocated, hwirq, 1);
+
+		ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
+						    &mips_mt_cpu_irq_controller,
+						    NULL);
+		if (ret)
+			return ret;
+
+		ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
+			      enum irq_domain_bus_token bus_token)
+{
+	bool is_ipi;
+
+	switch (bus_token) {
+	case DOMAIN_BUS_IPI:
+		is_ipi = d->bus_token == bus_token;
+		return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
+	default:
+		return 0;
+	}
+}
+
+static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
+	.alloc	= mips_cpu_ipi_alloc,
+	.match	= mips_cpu_ipi_match,
+};
+
+static void mips_cpu_register_ipi_domain(struct device_node *of_node)
+{
+	struct cpu_ipi_domain_state *ipi_domain_state;
+
+	ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
+	ipi_domain = irq_domain_add_hierarchy(irq_domain,
+					      IRQ_DOMAIN_FLAG_IPI_SINGLE,
+					      2, of_node,
+					      &mips_cpu_ipi_chip_ops,
+					      ipi_domain_state);
+	if (!ipi_domain)
+		panic("Failed to add MIPS CPU IPI domain");
+	irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+}
+
+#else /* !CONFIG_GENERIC_IRQ_IPI */
+
+static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
+
+#endif /* !CONFIG_GENERIC_IRQ_IPI */
+
+static void __init __mips_cpu_irq_init(struct device_node *of_node)
+{
+	/* Mask interrupts. */
+	clear_c0_status(ST0_IM);
+	clear_c0_cause(CAUSEF_IP);
+
+	irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+					   &mips_cpu_intc_irq_domain_ops,
+					   NULL);
+	if (!irq_domain)
+		panic("Failed to add irqdomain for MIPS CPU");
+
+	/*
+	 * Only proceed to register the software interrupt IPI implementation
+	 * for CPUs which implement the MIPS MT (multi-threading) ASE.
+	 */
+	if (cpu_has_mipsmt)
+		mips_cpu_register_ipi_domain(of_node);
+}
+
+void __init mips_cpu_irq_init(void)
+{
+	__mips_cpu_irq_init(NULL);
+}
+
+int __init mips_cpu_irq_of_init(struct device_node *of_node,
+				struct device_node *parent)
+{
+	__mips_cpu_irq_init(of_node);
+	return 0;
+}
+IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
new file mode 100644
index 0000000..d32268c
--- /dev/null
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -0,0 +1,805 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#define pr_fmt(fmt) "irq-mips-gic: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips-cps.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+#define GIC_MAX_INTRS		256
+#define GIC_MAX_LONGS		BITS_TO_LONGS(GIC_MAX_INTRS)
+
+/* Add 2 to convert GIC CPU pin to core interrupt */
+#define GIC_CPU_PIN_OFFSET	2
+
+/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
+#define GIC_PIN_TO_VEC_OFFSET	1
+
+/* Convert between local/shared IRQ number and GIC HW IRQ number. */
+#define GIC_LOCAL_HWIRQ_BASE	0
+#define GIC_LOCAL_TO_HWIRQ(x)	(GIC_LOCAL_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_LOCAL(x)	((x) - GIC_LOCAL_HWIRQ_BASE)
+#define GIC_SHARED_HWIRQ_BASE	GIC_NUM_LOCAL_INTRS
+#define GIC_SHARED_TO_HWIRQ(x)	(GIC_SHARED_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_SHARED(x)	((x) - GIC_SHARED_HWIRQ_BASE)
+
+void __iomem *mips_gic_base;
+
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
+
+static DEFINE_SPINLOCK(gic_lock);
+static struct irq_domain *gic_irq_domain;
+static struct irq_domain *gic_ipi_domain;
+static int gic_shared_intrs;
+static unsigned int gic_cpu_pin;
+static unsigned int timer_cpu_pin;
+static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+
+static struct gic_all_vpes_chip_data {
+	u32	map;
+	bool	mask;
+} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
+
+static void gic_clear_pcpu_masks(unsigned int intr)
+{
+	unsigned int i;
+
+	/* Clear the interrupt's bit in all pcpu_masks */
+	for_each_possible_cpu(i)
+		clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
+}
+
+static bool gic_local_irq_is_routable(int intr)
+{
+	u32 vpe_ctl;
+
+	/* All local interrupts are routable in EIC mode. */
+	if (cpu_has_veic)
+		return true;
+
+	vpe_ctl = read_gic_vl_ctl();
+	switch (intr) {
+	case GIC_LOCAL_INT_TIMER:
+		return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
+	case GIC_LOCAL_INT_PERFCTR:
+		return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
+	case GIC_LOCAL_INT_FDC:
+		return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
+	case GIC_LOCAL_INT_SWINT0:
+	case GIC_LOCAL_INT_SWINT1:
+		return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
+	default:
+		return true;
+	}
+}
+
+static void gic_bind_eic_interrupt(int irq, int set)
+{
+	/* Convert irq vector # to hw int # */
+	irq -= GIC_PIN_TO_VEC_OFFSET;
+
+	/* Set irq to use shadow set */
+	write_gic_vl_eic_shadow_set(irq, set);
+}
+
+static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
+{
+	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
+
+	write_gic_wedge(GIC_WEDGE_RW | hwirq);
+}
+
+int gic_get_c0_compare_int(void)
+{
+	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
+		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+	return irq_create_mapping(gic_irq_domain,
+				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
+}
+
+int gic_get_c0_perfcount_int(void)
+{
+	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
+		/* Is the performance counter shared with the timer? */
+		if (cp0_perfcount_irq < 0)
+			return -1;
+		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+	}
+	return irq_create_mapping(gic_irq_domain,
+				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
+}
+
+int gic_get_c0_fdc_int(void)
+{
+	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
+		/* Is the FDC IRQ even present? */
+		if (cp0_fdc_irq < 0)
+			return -1;
+		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+	}
+
+	return irq_create_mapping(gic_irq_domain,
+				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
+}
+
+static void gic_handle_shared_int(bool chained)
+{
+	unsigned int intr, virq;
+	unsigned long *pcpu_mask;
+	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
+
+	/* Get per-cpu bitmaps */
+	pcpu_mask = this_cpu_ptr(pcpu_masks);
+
+	if (mips_cm_is64)
+		__ioread64_copy(pending, addr_gic_pend(),
+				DIV_ROUND_UP(gic_shared_intrs, 64));
+	else
+		__ioread32_copy(pending, addr_gic_pend(),
+				DIV_ROUND_UP(gic_shared_intrs, 32));
+
+	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
+
+	for_each_set_bit(intr, pending, gic_shared_intrs) {
+		virq = irq_linear_revmap(gic_irq_domain,
+					 GIC_SHARED_TO_HWIRQ(intr));
+		if (chained)
+			generic_handle_irq(virq);
+		else
+			do_IRQ(virq);
+	}
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+	unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+	write_gic_rmask(intr);
+	gic_clear_pcpu_masks(intr);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+	unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
+	unsigned int cpu;
+
+	write_gic_smask(intr);
+
+	gic_clear_pcpu_masks(intr);
+	cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+	set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
+}
+
+static void gic_ack_irq(struct irq_data *d)
+{
+	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+	write_gic_wedge(irq);
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+	unsigned int irq, pol, trig, dual;
+	unsigned long flags;
+
+	irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+	spin_lock_irqsave(&gic_lock, flags);
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_FALLING:
+		pol = GIC_POL_FALLING_EDGE;
+		trig = GIC_TRIG_EDGE;
+		dual = GIC_DUAL_SINGLE;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		pol = GIC_POL_RISING_EDGE;
+		trig = GIC_TRIG_EDGE;
+		dual = GIC_DUAL_SINGLE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		pol = 0; /* Doesn't matter */
+		trig = GIC_TRIG_EDGE;
+		dual = GIC_DUAL_DUAL;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		pol = GIC_POL_ACTIVE_LOW;
+		trig = GIC_TRIG_LEVEL;
+		dual = GIC_DUAL_SINGLE;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+	default:
+		pol = GIC_POL_ACTIVE_HIGH;
+		trig = GIC_TRIG_LEVEL;
+		dual = GIC_DUAL_SINGLE;
+		break;
+	}
+
+	change_gic_pol(irq, pol);
+	change_gic_trig(irq, trig);
+	change_gic_dual(irq, dual);
+
+	if (trig == GIC_TRIG_EDGE)
+		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
+						 handle_edge_irq, NULL);
+	else
+		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+						 handle_level_irq, NULL);
+	spin_unlock_irqrestore(&gic_lock, flags);
+
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+			    bool force)
+{
+	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+	unsigned long flags;
+	unsigned int cpu;
+
+	cpu = cpumask_first_and(cpumask, cpu_online_mask);
+	if (cpu >= NR_CPUS)
+		return -EINVAL;
+
+	/* Assumption : cpumask refers to a single CPU */
+	spin_lock_irqsave(&gic_lock, flags);
+
+	/* Re-route this IRQ */
+	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+	/* Update the pcpu_masks */
+	gic_clear_pcpu_masks(irq);
+	if (read_gic_mask(irq))
+		set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+	spin_unlock_irqrestore(&gic_lock, flags);
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip gic_level_irq_controller = {
+	.name			=	"MIPS GIC",
+	.irq_mask		=	gic_mask_irq,
+	.irq_unmask		=	gic_unmask_irq,
+	.irq_set_type		=	gic_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	=	gic_set_affinity,
+#endif
+};
+
+static struct irq_chip gic_edge_irq_controller = {
+	.name			=	"MIPS GIC",
+	.irq_ack		=	gic_ack_irq,
+	.irq_mask		=	gic_mask_irq,
+	.irq_unmask		=	gic_unmask_irq,
+	.irq_set_type		=	gic_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	=	gic_set_affinity,
+#endif
+	.ipi_send_single	=	gic_send_ipi,
+};
+
+static void gic_handle_local_int(bool chained)
+{
+	unsigned long pending, masked;
+	unsigned int intr, virq;
+
+	pending = read_gic_vl_pend();
+	masked = read_gic_vl_mask();
+
+	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
+
+	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
+		virq = irq_linear_revmap(gic_irq_domain,
+					 GIC_LOCAL_TO_HWIRQ(intr));
+		if (chained)
+			generic_handle_irq(virq);
+		else
+			do_IRQ(virq);
+	}
+}
+
+static void gic_mask_local_irq(struct irq_data *d)
+{
+	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+	write_gic_vl_rmask(BIT(intr));
+}
+
+static void gic_unmask_local_irq(struct irq_data *d)
+{
+	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+	write_gic_vl_smask(BIT(intr));
+}
+
+static struct irq_chip gic_local_irq_controller = {
+	.name			=	"MIPS GIC Local",
+	.irq_mask		=	gic_mask_local_irq,
+	.irq_unmask		=	gic_unmask_local_irq,
+};
+
+static void gic_mask_local_irq_all_vpes(struct irq_data *d)
+{
+	struct gic_all_vpes_chip_data *cd;
+	unsigned long flags;
+	int intr, cpu;
+
+	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+	cd = irq_data_get_irq_chip_data(d);
+	cd->mask = false;
+
+	spin_lock_irqsave(&gic_lock, flags);
+	for_each_online_cpu(cpu) {
+		write_gic_vl_other(mips_cm_vp_id(cpu));
+		write_gic_vo_rmask(BIT(intr));
+	}
+	spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+{
+	struct gic_all_vpes_chip_data *cd;
+	unsigned long flags;
+	int intr, cpu;
+
+	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+	cd = irq_data_get_irq_chip_data(d);
+	cd->mask = true;
+
+	spin_lock_irqsave(&gic_lock, flags);
+	for_each_online_cpu(cpu) {
+		write_gic_vl_other(mips_cm_vp_id(cpu));
+		write_gic_vo_smask(BIT(intr));
+	}
+	spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+{
+	struct gic_all_vpes_chip_data *cd;
+	unsigned int intr;
+
+	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+	cd = irq_data_get_irq_chip_data(d);
+
+	write_gic_vl_map(intr, cd->map);
+	if (cd->mask)
+		write_gic_vl_smask(BIT(intr));
+}
+
+static struct irq_chip gic_all_vpes_local_irq_controller = {
+	.name			= "MIPS GIC Local",
+	.irq_mask		= gic_mask_local_irq_all_vpes,
+	.irq_unmask		= gic_unmask_local_irq_all_vpes,
+	.irq_cpu_online		= gic_all_vpes_irq_cpu_online,
+};
+
+static void __gic_irq_dispatch(void)
+{
+	gic_handle_local_int(false);
+	gic_handle_shared_int(false);
+}
+
+static void gic_irq_dispatch(struct irq_desc *desc)
+{
+	gic_handle_local_int(true);
+	gic_handle_shared_int(true);
+}
+
+static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				     irq_hw_number_t hw, unsigned int cpu)
+{
+	int intr = GIC_HWIRQ_TO_SHARED(hw);
+	struct irq_data *data;
+	unsigned long flags;
+
+	data = irq_get_irq_data(virq);
+
+	spin_lock_irqsave(&gic_lock, flags);
+	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+	irq_data_update_effective_affinity(data, cpumask_of(cpu));
+	spin_unlock_irqrestore(&gic_lock, flags);
+
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type)
+{
+	if (intsize != 3)
+		return -EINVAL;
+
+	if (intspec[0] == GIC_SHARED)
+		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
+	else if (intspec[0] == GIC_LOCAL)
+		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
+	else
+		return -EINVAL;
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+			      irq_hw_number_t hwirq)
+{
+	struct gic_all_vpes_chip_data *cd;
+	unsigned long flags;
+	unsigned int intr;
+	int err, cpu;
+	u32 map;
+
+	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+		/* verify that shared irqs don't conflict with an IPI irq */
+		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
+			return -EBUSY;
+
+		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+						    &gic_level_irq_controller,
+						    NULL);
+		if (err)
+			return err;
+
+		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
+	}
+
+	intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+	map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
+	switch (intr) {
+	case GIC_LOCAL_INT_TIMER:
+		/* CONFIG_MIPS_CMP workaround (see __gic_init) */
+		map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
+		/* fall-through */
+	case GIC_LOCAL_INT_PERFCTR:
+	case GIC_LOCAL_INT_FDC:
+		/*
+		 * HACK: These are all really percpu interrupts, but
+		 * the rest of the MIPS kernel code does not use the
+		 * percpu IRQ API for them.
+		 */
+		cd = &gic_all_vpes_chip_data[intr];
+		cd->map = map;
+		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+						    &gic_all_vpes_local_irq_controller,
+						    cd);
+		if (err)
+			return err;
+
+		irq_set_handler(virq, handle_percpu_irq);
+		break;
+
+	default:
+		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+						    &gic_local_irq_controller,
+						    NULL);
+		if (err)
+			return err;
+
+		irq_set_handler(virq, handle_percpu_devid_irq);
+		irq_set_percpu_devid(virq);
+		break;
+	}
+
+	if (!gic_local_irq_is_routable(intr))
+		return -EPERM;
+
+	spin_lock_irqsave(&gic_lock, flags);
+	for_each_online_cpu(cpu) {
+		write_gic_vl_other(mips_cm_vp_id(cpu));
+		write_gic_vo_map(intr, map);
+	}
+	spin_unlock_irqrestore(&gic_lock, flags);
+
+	return 0;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	struct irq_fwspec *fwspec = arg;
+	irq_hw_number_t hwirq;
+
+	if (fwspec->param[0] == GIC_SHARED)
+		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+	else
+		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
+
+	return gic_irq_domain_map(d, virq, hwirq);
+}
+
+void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+			 unsigned int nr_irqs)
+{
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+	.xlate = gic_irq_domain_xlate,
+	.alloc = gic_irq_domain_alloc,
+	.free = gic_irq_domain_free,
+	.map = gic_irq_domain_map,
+};
+
+static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type)
+{
+	/*
+	 * There's nothing to translate here. hwirq is dynamically allocated and
+	 * the irq type is always edge triggered.
+	 * */
+	*out_hwirq = 0;
+	*out_type = IRQ_TYPE_EDGE_RISING;
+
+	return 0;
+}
+
+static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	struct cpumask *ipimask = arg;
+	irq_hw_number_t hwirq, base_hwirq;
+	int cpu, ret, i;
+
+	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
+	if (base_hwirq == gic_shared_intrs)
+		return -ENOMEM;
+
+	/* check that we have enough space */
+	for (i = base_hwirq; i < nr_irqs; i++) {
+		if (!test_bit(i, ipi_available))
+			return -EBUSY;
+	}
+	bitmap_clear(ipi_available, base_hwirq, nr_irqs);
+
+	/* map the hwirq for each cpu consecutively */
+	i = 0;
+	for_each_cpu(cpu, ipimask) {
+		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
+
+		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
+						    &gic_edge_irq_controller,
+						    NULL);
+		if (ret)
+			goto error;
+
+		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
+						    &gic_edge_irq_controller,
+						    NULL);
+		if (ret)
+			goto error;
+
+		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
+		if (ret)
+			goto error;
+
+		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
+		if (ret)
+			goto error;
+
+		i++;
+	}
+
+	return 0;
+error:
+	bitmap_set(ipi_available, base_hwirq, nr_irqs);
+	return ret;
+}
+
+void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+			 unsigned int nr_irqs)
+{
+	irq_hw_number_t base_hwirq;
+	struct irq_data *data;
+
+	data = irq_get_irq_data(virq);
+	if (!data)
+		return;
+
+	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
+	bitmap_set(ipi_available, base_hwirq, nr_irqs);
+}
+
+int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+			 enum irq_domain_bus_token bus_token)
+{
+	bool is_ipi;
+
+	switch (bus_token) {
+	case DOMAIN_BUS_IPI:
+		is_ipi = d->bus_token == bus_token;
+		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
+		break;
+	default:
+		return 0;
+	}
+}
+
+static const struct irq_domain_ops gic_ipi_domain_ops = {
+	.xlate = gic_ipi_domain_xlate,
+	.alloc = gic_ipi_domain_alloc,
+	.free = gic_ipi_domain_free,
+	.match = gic_ipi_domain_match,
+};
+
+static int gic_cpu_startup(unsigned int cpu)
+{
+	/* Enable or disable EIC */
+	change_gic_vl_ctl(GIC_VX_CTL_EIC,
+			  cpu_has_veic ? GIC_VX_CTL_EIC : 0);
+
+	/* Clear all local IRQ masks (ie. disable all local interrupts) */
+	write_gic_vl_rmask(~0);
+
+	/* Invoke irq_cpu_online callbacks to enable desired interrupts */
+	irq_cpu_online();
+
+	return 0;
+}
+
+static int __init gic_of_init(struct device_node *node,
+			      struct device_node *parent)
+{
+	unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
+	unsigned long reserved;
+	phys_addr_t gic_base;
+	struct resource res;
+	size_t gic_len;
+
+	/* Find the first available CPU vector. */
+	i = 0;
+	reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
+	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
+					   i++, &cpu_vec))
+		reserved |= BIT(cpu_vec);
+
+	cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
+	if (cpu_vec == hweight_long(ST0_IM)) {
+		pr_err("No CPU vectors available\n");
+		return -ENODEV;
+	}
+
+	if (of_address_to_resource(node, 0, &res)) {
+		/*
+		 * Probe the CM for the GIC base address if not specified
+		 * in the device-tree.
+		 */
+		if (mips_cm_present()) {
+			gic_base = read_gcr_gic_base() &
+				~CM_GCR_GIC_BASE_GICEN;
+			gic_len = 0x20000;
+			pr_warn("Using inherited base address %pa\n",
+				&gic_base);
+		} else {
+			pr_err("Failed to get memory range\n");
+			return -ENODEV;
+		}
+	} else {
+		gic_base = res.start;
+		gic_len = resource_size(&res);
+	}
+
+	if (mips_cm_present()) {
+		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
+		/* Ensure GIC region is enabled before trying to access it */
+		__sync();
+	}
+
+	mips_gic_base = ioremap_nocache(gic_base, gic_len);
+
+	gicconfig = read_gic_config();
+	gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
+	gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+	gic_shared_intrs = (gic_shared_intrs + 1) * 8;
+
+	if (cpu_has_veic) {
+		/* Always use vector 1 in EIC mode */
+		gic_cpu_pin = 0;
+		timer_cpu_pin = gic_cpu_pin;
+		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
+			       __gic_irq_dispatch);
+	} else {
+		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
+		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
+					gic_irq_dispatch);
+		/*
+		 * With the CMP implementation of SMP (deprecated), other CPUs
+		 * are started by the bootloader and put into a timer based
+		 * waiting poll loop. We must not re-route those CPU's local
+		 * timer interrupts as the wait instruction will never finish,
+		 * so just handle whatever CPU interrupt it is routed to by
+		 * default.
+		 *
+		 * This workaround should be removed when CMP support is
+		 * dropped.
+		 */
+		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
+		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
+			timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
+			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
+						GIC_CPU_PIN_OFFSET +
+						timer_cpu_pin,
+						gic_irq_dispatch);
+		} else {
+			timer_cpu_pin = gic_cpu_pin;
+		}
+	}
+
+	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
+					       gic_shared_intrs, 0,
+					       &gic_irq_domain_ops, NULL);
+	if (!gic_irq_domain) {
+		pr_err("Failed to add IRQ domain");
+		return -ENXIO;
+	}
+
+	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+						  node, &gic_ipi_domain_ops, NULL);
+	if (!gic_ipi_domain) {
+		pr_err("Failed to add IPI domain");
+		return -ENXIO;
+	}
+
+	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
+
+	if (node &&
+	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+		bitmap_set(ipi_resrv, v[0], v[1]);
+	} else {
+		/*
+		 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+		 * meeting the requirements of arch/mips SMP.
+		 */
+		num_ipis = 2 * num_possible_cpus();
+		bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
+	}
+
+	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+
+	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
+
+	/* Setup defaults */
+	for (i = 0; i < gic_shared_intrs; i++) {
+		change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
+		change_gic_trig(i, GIC_TRIG_LEVEL);
+		write_gic_rmask(i);
+	}
+
+	return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+				 "irqchip/mips/gic:starting",
+				 gic_cpu_startup, NULL);
+}
+IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
new file mode 100644
index 0000000..25f32e1
--- /dev/null
+++ b/drivers/irqchip/irq-mmp.c
@@ -0,0 +1,488 @@
+/*
+ *  linux/arch/arm/mach-mmp/irq.c
+ *
+ *  Generic IRQ handling, GPIO IRQ demultiplexing, etc.
+ *  Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
+ *
+ *  Author:	Bin Yang <bin.yang@marvell.com>
+ *              Haojian Zhuang <haojian.zhuang@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/hardirq.h>
+
+#define MAX_ICU_NR		16
+
+#define PJ1_INT_SEL		0x10c
+#define PJ4_INT_SEL		0x104
+
+/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
+#define SEL_INT_PENDING		(1 << 6)
+#define SEL_INT_NUM_MASK	0x3f
+
+struct icu_chip_data {
+	int			nr_irqs;
+	unsigned int		virq_base;
+	unsigned int		cascade_irq;
+	void __iomem		*reg_status;
+	void __iomem		*reg_mask;
+	unsigned int		conf_enable;
+	unsigned int		conf_disable;
+	unsigned int		conf_mask;
+	unsigned int		clr_mfp_irq_base;
+	unsigned int		clr_mfp_hwirq;
+	struct irq_domain	*domain;
+};
+
+struct mmp_intc_conf {
+	unsigned int	conf_enable;
+	unsigned int	conf_disable;
+	unsigned int	conf_mask;
+};
+
+static void __iomem *mmp_icu_base;
+static struct icu_chip_data icu_data[MAX_ICU_NR];
+static int max_icu_nr;
+
+extern void mmp2_clear_pmic_int(void);
+
+static void icu_mask_ack_irq(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+	int hwirq;
+	u32 r;
+
+	hwirq = d->irq - data->virq_base;
+	if (data == &icu_data[0]) {
+		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+		r &= ~data->conf_mask;
+		r |= data->conf_disable;
+		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+	} else {
+#ifdef CONFIG_CPU_MMP2
+		if ((data->virq_base == data->clr_mfp_irq_base)
+			&& (hwirq == data->clr_mfp_hwirq))
+			mmp2_clear_pmic_int();
+#endif
+		r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+		writel_relaxed(r, data->reg_mask);
+	}
+}
+
+static void icu_mask_irq(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+	int hwirq;
+	u32 r;
+
+	hwirq = d->irq - data->virq_base;
+	if (data == &icu_data[0]) {
+		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+		r &= ~data->conf_mask;
+		r |= data->conf_disable;
+		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+	} else {
+		r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+		writel_relaxed(r, data->reg_mask);
+	}
+}
+
+static void icu_unmask_irq(struct irq_data *d)
+{
+	struct irq_domain *domain = d->domain;
+	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+	int hwirq;
+	u32 r;
+
+	hwirq = d->irq - data->virq_base;
+	if (data == &icu_data[0]) {
+		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+		r &= ~data->conf_mask;
+		r |= data->conf_enable;
+		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+	} else {
+		r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
+		writel_relaxed(r, data->reg_mask);
+	}
+}
+
+struct irq_chip icu_irq_chip = {
+	.name		= "icu_irq",
+	.irq_mask	= icu_mask_irq,
+	.irq_mask_ack	= icu_mask_ack_irq,
+	.irq_unmask	= icu_unmask_irq,
+};
+
+static void icu_mux_irq_demux(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_domain *domain;
+	struct icu_chip_data *data;
+	int i;
+	unsigned long mask, status, n;
+
+	for (i = 1; i < max_icu_nr; i++) {
+		if (irq == icu_data[i].cascade_irq) {
+			domain = icu_data[i].domain;
+			data = (struct icu_chip_data *)domain->host_data;
+			break;
+		}
+	}
+	if (i >= max_icu_nr) {
+		pr_err("Spurious irq %d in MMP INTC\n", irq);
+		return;
+	}
+
+	mask = readl_relaxed(data->reg_mask);
+	while (1) {
+		status = readl_relaxed(data->reg_status) & ~mask;
+		if (status == 0)
+			break;
+		for_each_set_bit(n, &status, BITS_PER_LONG) {
+			generic_handle_irq(icu_data[i].virq_base + n);
+		}
+	}
+}
+
+static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			      irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+	return 0;
+}
+
+static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq,
+				unsigned int *out_type)
+{
+	*out_hwirq = intspec[0];
+	return 0;
+}
+
+const struct irq_domain_ops mmp_irq_domain_ops = {
+	.map		= mmp_irq_domain_map,
+	.xlate		= mmp_irq_domain_xlate,
+};
+
+static const struct mmp_intc_conf mmp_conf = {
+	.conf_enable	= 0x51,
+	.conf_disable	= 0x0,
+	.conf_mask	= 0x7f,
+};
+
+static const struct mmp_intc_conf mmp2_conf = {
+	.conf_enable	= 0x20,
+	.conf_disable	= 0x0,
+	.conf_mask	= 0x7f,
+};
+
+static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
+{
+	int hwirq;
+
+	hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
+	if (!(hwirq & SEL_INT_PENDING))
+		return;
+	hwirq &= SEL_INT_NUM_MASK;
+	handle_domain_irq(icu_data[0].domain, hwirq, regs);
+}
+
+static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
+{
+	int hwirq;
+
+	hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
+	if (!(hwirq & SEL_INT_PENDING))
+		return;
+	hwirq &= SEL_INT_NUM_MASK;
+	handle_domain_irq(icu_data[0].domain, hwirq, regs);
+}
+
+/* MMP (ARMv5) */
+void __init icu_init_irq(void)
+{
+	int irq;
+
+	max_icu_nr = 1;
+	mmp_icu_base = ioremap(0xd4282000, 0x1000);
+	icu_data[0].conf_enable = mmp_conf.conf_enable;
+	icu_data[0].conf_disable = mmp_conf.conf_disable;
+	icu_data[0].conf_mask = mmp_conf.conf_mask;
+	icu_data[0].nr_irqs = 64;
+	icu_data[0].virq_base = 0;
+	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[0]);
+	for (irq = 0; irq < 64; irq++) {
+		icu_mask_irq(irq_get_irq_data(irq));
+		irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+	}
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp_handle_irq);
+}
+
+/* MMP2 (ARMv7) */
+void __init mmp2_init_icu(void)
+{
+	int irq, end;
+
+	max_icu_nr = 8;
+	mmp_icu_base = ioremap(0xd4282000, 0x1000);
+	icu_data[0].conf_enable = mmp2_conf.conf_enable;
+	icu_data[0].conf_disable = mmp2_conf.conf_disable;
+	icu_data[0].conf_mask = mmp2_conf.conf_mask;
+	icu_data[0].nr_irqs = 64;
+	icu_data[0].virq_base = 0;
+	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[0]);
+	icu_data[1].reg_status = mmp_icu_base + 0x150;
+	icu_data[1].reg_mask = mmp_icu_base + 0x168;
+	icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
+				icu_data[0].nr_irqs;
+	icu_data[1].clr_mfp_hwirq = 1;		/* offset to IRQ_MMP2_PMIC_BASE */
+	icu_data[1].nr_irqs = 2;
+	icu_data[1].cascade_irq = 4;
+	icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
+	icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
+						   icu_data[1].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[1]);
+	icu_data[2].reg_status = mmp_icu_base + 0x154;
+	icu_data[2].reg_mask = mmp_icu_base + 0x16c;
+	icu_data[2].nr_irqs = 2;
+	icu_data[2].cascade_irq = 5;
+	icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
+	icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
+						   icu_data[2].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[2]);
+	icu_data[3].reg_status = mmp_icu_base + 0x180;
+	icu_data[3].reg_mask = mmp_icu_base + 0x17c;
+	icu_data[3].nr_irqs = 3;
+	icu_data[3].cascade_irq = 9;
+	icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
+	icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
+						   icu_data[3].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[3]);
+	icu_data[4].reg_status = mmp_icu_base + 0x158;
+	icu_data[4].reg_mask = mmp_icu_base + 0x170;
+	icu_data[4].nr_irqs = 5;
+	icu_data[4].cascade_irq = 17;
+	icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
+	icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
+						   icu_data[4].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[4]);
+	icu_data[5].reg_status = mmp_icu_base + 0x15c;
+	icu_data[5].reg_mask = mmp_icu_base + 0x174;
+	icu_data[5].nr_irqs = 15;
+	icu_data[5].cascade_irq = 35;
+	icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
+	icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
+						   icu_data[5].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[5]);
+	icu_data[6].reg_status = mmp_icu_base + 0x160;
+	icu_data[6].reg_mask = mmp_icu_base + 0x178;
+	icu_data[6].nr_irqs = 2;
+	icu_data[6].cascade_irq = 51;
+	icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
+	icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
+						   icu_data[6].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[6]);
+	icu_data[7].reg_status = mmp_icu_base + 0x188;
+	icu_data[7].reg_mask = mmp_icu_base + 0x184;
+	icu_data[7].nr_irqs = 2;
+	icu_data[7].cascade_irq = 55;
+	icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
+	icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
+						   icu_data[7].virq_base, 0,
+						   &irq_domain_simple_ops,
+						   &icu_data[7]);
+	end = icu_data[7].virq_base + icu_data[7].nr_irqs;
+	for (irq = 0; irq < end; irq++) {
+		icu_mask_irq(irq_get_irq_data(irq));
+		if (irq == icu_data[1].cascade_irq ||
+		    irq == icu_data[2].cascade_irq ||
+		    irq == icu_data[3].cascade_irq ||
+		    irq == icu_data[4].cascade_irq ||
+		    irq == icu_data[5].cascade_irq ||
+		    irq == icu_data[6].cascade_irq ||
+		    irq == icu_data[7].cascade_irq) {
+			irq_set_chip(irq, &icu_irq_chip);
+			irq_set_chained_handler(irq, icu_mux_irq_demux);
+		} else {
+			irq_set_chip_and_handler(irq, &icu_irq_chip,
+						 handle_level_irq);
+		}
+	}
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp2_handle_irq);
+}
+
+#ifdef CONFIG_OF
+static int __init mmp_init_bases(struct device_node *node)
+{
+	int ret, nr_irqs, irq, i = 0;
+
+	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
+	if (ret) {
+		pr_err("Not found mrvl,intc-nr-irqs property\n");
+		return ret;
+	}
+
+	mmp_icu_base = of_iomap(node, 0);
+	if (!mmp_icu_base) {
+		pr_err("Failed to get interrupt controller register\n");
+		return -ENOMEM;
+	}
+
+	icu_data[0].virq_base = 0;
+	icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
+						   &mmp_irq_domain_ops,
+						   &icu_data[0]);
+	for (irq = 0; irq < nr_irqs; irq++) {
+		ret = irq_create_mapping(icu_data[0].domain, irq);
+		if (!ret) {
+			pr_err("Failed to mapping hwirq\n");
+			goto err;
+		}
+		if (!irq)
+			icu_data[0].virq_base = ret;
+	}
+	icu_data[0].nr_irqs = nr_irqs;
+	return 0;
+err:
+	if (icu_data[0].virq_base) {
+		for (i = 0; i < irq; i++)
+			irq_dispose_mapping(icu_data[0].virq_base + i);
+	}
+	irq_domain_remove(icu_data[0].domain);
+	iounmap(mmp_icu_base);
+	return -EINVAL;
+}
+
+static int __init mmp_of_init(struct device_node *node,
+			      struct device_node *parent)
+{
+	int ret;
+
+	ret = mmp_init_bases(node);
+	if (ret < 0)
+		return ret;
+
+	icu_data[0].conf_enable = mmp_conf.conf_enable;
+	icu_data[0].conf_disable = mmp_conf.conf_disable;
+	icu_data[0].conf_mask = mmp_conf.conf_mask;
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp_handle_irq);
+	max_icu_nr = 1;
+	return 0;
+}
+IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
+
+static int __init mmp2_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	int ret;
+
+	ret = mmp_init_bases(node);
+	if (ret < 0)
+		return ret;
+
+	icu_data[0].conf_enable = mmp2_conf.conf_enable;
+	icu_data[0].conf_disable = mmp2_conf.conf_disable;
+	icu_data[0].conf_mask = mmp2_conf.conf_mask;
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp2_handle_irq);
+	max_icu_nr = 1;
+	return 0;
+}
+IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+
+static int __init mmp2_mux_of_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct resource res;
+	int i, ret, irq, j = 0;
+	u32 nr_irqs, mfp_irq;
+
+	if (!parent)
+		return -ENODEV;
+
+	i = max_icu_nr;
+	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
+				   &nr_irqs);
+	if (ret) {
+		pr_err("Not found mrvl,intc-nr-irqs property\n");
+		return -EINVAL;
+	}
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret < 0) {
+		pr_err("Not found reg property\n");
+		return -EINVAL;
+	}
+	icu_data[i].reg_status = mmp_icu_base + res.start;
+	ret = of_address_to_resource(node, 1, &res);
+	if (ret < 0) {
+		pr_err("Not found reg property\n");
+		return -EINVAL;
+	}
+	icu_data[i].reg_mask = mmp_icu_base + res.start;
+	icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
+	if (!icu_data[i].cascade_irq)
+		return -EINVAL;
+
+	icu_data[i].virq_base = 0;
+	icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
+						   &mmp_irq_domain_ops,
+						   &icu_data[i]);
+	for (irq = 0; irq < nr_irqs; irq++) {
+		ret = irq_create_mapping(icu_data[i].domain, irq);
+		if (!ret) {
+			pr_err("Failed to mapping hwirq\n");
+			goto err;
+		}
+		if (!irq)
+			icu_data[i].virq_base = ret;
+	}
+	icu_data[i].nr_irqs = nr_irqs;
+	if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
+				  &mfp_irq)) {
+		icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
+		icu_data[i].clr_mfp_hwirq = mfp_irq;
+	}
+	irq_set_chained_handler(icu_data[i].cascade_irq,
+				icu_mux_irq_demux);
+	max_icu_nr++;
+	return 0;
+err:
+	if (icu_data[i].virq_base) {
+		for (j = 0; j < irq; j++)
+			irq_dispose_mapping(icu_data[i].virq_base + j);
+	}
+	irq_domain_remove(icu_data[i].domain);
+	return -EINVAL;
+}
+IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
+#endif
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
new file mode 100644
index 0000000..b63e40c
--- /dev/null
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot IRQ controller driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+
+#define ICPU_CFG_INTR_INTR_STICKY	0x10
+#define ICPU_CFG_INTR_INTR_ENA		0x18
+#define ICPU_CFG_INTR_INTR_ENA_CLR	0x1c
+#define ICPU_CFG_INTR_INTR_ENA_SET	0x20
+#define ICPU_CFG_INTR_DST_INTR_IDENT(x)	(0x38 + 0x4 * (x))
+#define ICPU_CFG_INTR_INTR_TRIGGER(x)	(0x5c + 0x4 * (x))
+
+#define OCELOT_NR_IRQ 24
+
+static void ocelot_irq_unmask(struct irq_data *data)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	struct irq_chip_type *ct = irq_data_get_chip_type(data);
+	unsigned int mask = data->mask;
+	u32 val;
+
+	irq_gc_lock(gc);
+	val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(0)) |
+	      irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(1));
+	if (!(val & mask))
+		irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_STICKY);
+
+	*ct->mask_cache &= ~mask;
+	irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_ENA_SET);
+	irq_gc_unlock(gc);
+}
+
+static void ocelot_irq_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct irq_domain *d = irq_desc_get_handler_data(desc);
+	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
+	u32 reg = irq_reg_readl(gc, ICPU_CFG_INTR_DST_INTR_IDENT(0));
+
+	chained_irq_enter(chip, desc);
+
+	while (reg) {
+		u32 hwirq = __fls(reg);
+
+		generic_handle_irq(irq_find_mapping(d, hwirq));
+		reg &= ~(BIT(hwirq));
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int __init ocelot_irq_init(struct device_node *node,
+				  struct device_node *parent)
+{
+	struct irq_domain *domain;
+	struct irq_chip_generic *gc;
+	int parent_irq, ret;
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq)
+		return -EINVAL;
+
+	domain = irq_domain_add_linear(node, OCELOT_NR_IRQ,
+				       &irq_generic_chip_ops, NULL);
+	if (!domain) {
+		pr_err("%s: unable to add irq domain\n", node->name);
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, OCELOT_NR_IRQ, 1,
+					     "icpu", handle_level_irq,
+					     0, 0, 0);
+	if (ret) {
+		pr_err("%s: unable to alloc irq domain gc\n", node->name);
+		goto err_domain_remove;
+	}
+
+	gc = irq_get_domain_generic_chip(domain, 0);
+	gc->reg_base = of_iomap(node, 0);
+	if (!gc->reg_base) {
+		pr_err("%s: unable to map resource\n", node->name);
+		ret = -ENOMEM;
+		goto err_gc_free;
+	}
+
+	gc->chip_types[0].regs.ack = ICPU_CFG_INTR_INTR_STICKY;
+	gc->chip_types[0].regs.mask = ICPU_CFG_INTR_INTR_ENA_CLR;
+	gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+	gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+	gc->chip_types[0].chip.irq_unmask = ocelot_irq_unmask;
+
+	/* Mask and ack all interrupts */
+	irq_reg_writel(gc, 0, ICPU_CFG_INTR_INTR_ENA);
+	irq_reg_writel(gc, 0xffffffff, ICPU_CFG_INTR_INTR_STICKY);
+
+	irq_set_chained_handler_and_data(parent_irq, ocelot_irq_handler,
+					 domain);
+
+	return 0;
+
+err_gc_free:
+	irq_free_generic_chip(gc);
+
+err_domain_remove:
+	irq_domain_remove(domain);
+
+	return ret;
+}
+IRQCHIP_DECLARE(ocelot_icpu, "mscc,ocelot-icpu-intr", ocelot_irq_init);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
new file mode 100644
index 0000000..18c65c1
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Youlin.Pei <youlin.pei@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#define CIRQ_ACK	0x40
+#define CIRQ_MASK_SET	0xc0
+#define CIRQ_MASK_CLR	0x100
+#define CIRQ_SENS_SET	0x180
+#define CIRQ_SENS_CLR	0x1c0
+#define CIRQ_POL_SET	0x240
+#define CIRQ_POL_CLR	0x280
+#define CIRQ_CONTROL	0x300
+
+#define CIRQ_EN	0x1
+#define CIRQ_EDGE	0x2
+#define CIRQ_FLUSH	0x4
+
+struct mtk_cirq_chip_data {
+	void __iomem *base;
+	unsigned int ext_irq_start;
+	unsigned int ext_irq_end;
+	struct irq_domain *domain;
+};
+
+static struct mtk_cirq_chip_data *cirq_data;
+
+static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
+{
+	struct mtk_cirq_chip_data *chip_data = data->chip_data;
+	unsigned int cirq_num = data->hwirq;
+	u32 mask = 1 << (cirq_num % 32);
+
+	writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
+}
+
+static void mtk_cirq_mask(struct irq_data *data)
+{
+	mtk_cirq_write_mask(data, CIRQ_MASK_SET);
+	irq_chip_mask_parent(data);
+}
+
+static void mtk_cirq_unmask(struct irq_data *data)
+{
+	mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
+	irq_chip_unmask_parent(data);
+}
+
+static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
+{
+	int ret;
+
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_FALLING:
+		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
+		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		mtk_cirq_write_mask(data, CIRQ_POL_SET);
+		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
+		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		mtk_cirq_write_mask(data, CIRQ_POL_SET);
+		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
+		break;
+	default:
+		break;
+	}
+
+	data = data->parent_data;
+	ret = data->chip->irq_set_type(data, type);
+	return ret;
+}
+
+static struct irq_chip mtk_cirq_chip = {
+	.name			= "MT_CIRQ",
+	.irq_mask		= mtk_cirq_mask,
+	.irq_unmask		= mtk_cirq_unmask,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_type		= mtk_cirq_set_type,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int mtk_cirq_domain_translate(struct irq_domain *d,
+				     struct irq_fwspec *fwspec,
+				     unsigned long *hwirq,
+				     unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		/* cirq support irq number check */
+		if (fwspec->param[1] < cirq_data->ext_irq_start ||
+		    fwspec->param[1] > cirq_data->ext_irq_end)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *arg)
+{
+	int ret;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	struct irq_fwspec *fwspec = arg;
+	struct irq_fwspec parent_fwspec = *fwspec;
+
+	ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(nr_irqs != 1))
+		return -EINVAL;
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				      &mtk_cirq_chip,
+				      domain->host_data);
+
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops cirq_domain_ops = {
+	.translate	= mtk_cirq_domain_translate,
+	.alloc		= mtk_cirq_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_cirq_suspend(void)
+{
+	u32 value, mask;
+	unsigned int irq, hwirq_num;
+	bool pending, masked;
+	int i, pendret, maskret;
+
+	/*
+	 * When external interrupts happened, CIRQ will record the status
+	 * even CIRQ is not enabled. When execute flush command, CIRQ will
+	 * resend the signals according to the status. So if don't clear the
+	 * status, CIRQ will resend the wrong signals.
+	 *
+	 * arch_suspend_disable_irqs() will be called before CIRQ suspend
+	 * callback. If clear all the status simply, the external interrupts
+	 * which happened between arch_suspend_disable_irqs and CIRQ suspend
+	 * callback will be lost. Using following steps to avoid this issue;
+	 *
+	 * - Iterate over all the CIRQ supported interrupts;
+	 * - For each interrupt, inspect its pending and masked status at GIC
+	 *   level;
+	 * - If pending and unmasked, it happened between
+	 *   arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
+	 *   it. Otherwise, ACK it.
+	 */
+	hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
+	for (i = 0; i < hwirq_num; i++) {
+		irq = irq_find_mapping(cirq_data->domain, i);
+		if (irq) {
+			pendret = irq_get_irqchip_state(irq,
+							IRQCHIP_STATE_PENDING,
+							&pending);
+
+			maskret = irq_get_irqchip_state(irq,
+							IRQCHIP_STATE_MASKED,
+							&masked);
+
+			if (pendret == 0 && maskret == 0 &&
+			    (pending && !masked))
+				continue;
+		}
+
+		mask = 1 << (i % 32);
+		writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
+	}
+
+	/* set edge_only mode, record edge-triggerd interrupts */
+	/* enable cirq */
+	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+	value |= (CIRQ_EDGE | CIRQ_EN);
+	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
+
+	return 0;
+}
+
+static void mtk_cirq_resume(void)
+{
+	u32 value;
+
+	/* flush recored interrupts, will send signals to parent controller */
+	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+	writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
+
+	/* disable cirq */
+	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+	value &= ~(CIRQ_EDGE | CIRQ_EN);
+	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
+}
+
+static struct syscore_ops mtk_cirq_syscore_ops = {
+	.suspend	= mtk_cirq_suspend,
+	.resume		= mtk_cirq_resume,
+};
+
+static void mtk_cirq_syscore_init(void)
+{
+	register_syscore_ops(&mtk_cirq_syscore_ops);
+}
+#else
+static inline void mtk_cirq_syscore_init(void) {}
+#endif
+
+static int __init mtk_cirq_of_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct irq_domain *domain, *domain_parent;
+	unsigned int irq_num;
+	int ret;
+
+	domain_parent = irq_find_host(parent);
+	if (!domain_parent) {
+		pr_err("mtk_cirq: interrupt-parent not found\n");
+		return -EINVAL;
+	}
+
+	cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
+	if (!cirq_data)
+		return -ENOMEM;
+
+	cirq_data->base = of_iomap(node, 0);
+	if (!cirq_data->base) {
+		pr_err("mtk_cirq: unable to map cirq register\n");
+		ret = -ENXIO;
+		goto out_free;
+	}
+
+	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
+					 &cirq_data->ext_irq_start);
+	if (ret)
+		goto out_unmap;
+
+	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
+					 &cirq_data->ext_irq_end);
+	if (ret)
+		goto out_unmap;
+
+	irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
+	domain = irq_domain_add_hierarchy(domain_parent, 0,
+					  irq_num, node,
+					  &cirq_domain_ops, cirq_data);
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+	cirq_data->domain = domain;
+
+	mtk_cirq_syscore_init();
+
+	return 0;
+
+out_unmap:
+	iounmap(cirq_data->base);
+out_free:
+	kfree(cirq_data);
+	return ret;
+}
+
+IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
new file mode 100644
index 0000000..90aaf19
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Joe.C <yingjoe.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct mtk_sysirq_chip_data {
+	spinlock_t lock;
+	u32 nr_intpol_bases;
+	void __iomem **intpol_bases;
+	u32 *intpol_words;
+	u8 *intpol_idx;
+	u16 *which_word;
+};
+
+static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+{
+	irq_hw_number_t hwirq = data->hwirq;
+	struct mtk_sysirq_chip_data *chip_data = data->chip_data;
+	u8 intpol_idx = chip_data->intpol_idx[hwirq];
+	void __iomem *base;
+	u32 offset, reg_index, value;
+	unsigned long flags;
+	int ret;
+
+	base = chip_data->intpol_bases[intpol_idx];
+	reg_index = chip_data->which_word[hwirq];
+	offset = hwirq & 0x1f;
+
+	spin_lock_irqsave(&chip_data->lock, flags);
+	value = readl_relaxed(base + reg_index * 4);
+	if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
+		if (type == IRQ_TYPE_LEVEL_LOW)
+			type = IRQ_TYPE_LEVEL_HIGH;
+		else
+			type = IRQ_TYPE_EDGE_RISING;
+		value |= (1 << offset);
+	} else {
+		value &= ~(1 << offset);
+	}
+
+	writel_relaxed(value, base + reg_index * 4);
+
+	data = data->parent_data;
+	ret = data->chip->irq_set_type(data, type);
+	spin_unlock_irqrestore(&chip_data->lock, flags);
+	return ret;
+}
+
+static struct irq_chip mtk_sysirq_chip = {
+	.name			= "MT_SYSIRQ",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_type		= mtk_sysirq_set_type,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static int mtk_sysirq_domain_translate(struct irq_domain *d,
+				       struct irq_fwspec *fwspec,
+				       unsigned long *hwirq,
+				       unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *arg)
+{
+	int i;
+	irq_hw_number_t hwirq;
+	struct irq_fwspec *fwspec = arg;
+	struct irq_fwspec gic_fwspec = *fwspec;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;
+
+	/* sysirq doesn't support PPI */
+	if (fwspec->param[0])
+		return -EINVAL;
+
+	hwirq = fwspec->param[1];
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &mtk_sysirq_chip,
+					      domain->host_data);
+
+	gic_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
+}
+
+static const struct irq_domain_ops sysirq_domain_ops = {
+	.translate	= mtk_sysirq_domain_translate,
+	.alloc		= mtk_sysirq_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init mtk_sysirq_of_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	struct irq_domain *domain, *domain_parent;
+	struct mtk_sysirq_chip_data *chip_data;
+	int ret, size, intpol_num = 0, nr_intpol_bases = 0, i = 0;
+
+	domain_parent = irq_find_host(parent);
+	if (!domain_parent) {
+		pr_err("mtk_sysirq: interrupt-parent not found\n");
+		return -EINVAL;
+	}
+
+	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+	if (!chip_data)
+		return -ENOMEM;
+
+	while (of_get_address(node, i++, NULL, NULL))
+		nr_intpol_bases++;
+
+	if (nr_intpol_bases == 0) {
+		pr_err("mtk_sysirq: base address not specified\n");
+		ret = -EINVAL;
+		goto out_free_chip;
+	}
+
+	chip_data->intpol_words = kcalloc(nr_intpol_bases,
+					  sizeof(*chip_data->intpol_words),
+					  GFP_KERNEL);
+	if (!chip_data->intpol_words) {
+		ret = -ENOMEM;
+		goto out_free_chip;
+	}
+
+	chip_data->intpol_bases = kcalloc(nr_intpol_bases,
+					  sizeof(*chip_data->intpol_bases),
+					  GFP_KERNEL);
+	if (!chip_data->intpol_bases) {
+		ret = -ENOMEM;
+		goto out_free_intpol_words;
+	}
+
+	for (i = 0; i < nr_intpol_bases; i++) {
+		struct resource res;
+
+		ret = of_address_to_resource(node, i, &res);
+		size = resource_size(&res);
+		intpol_num += size * 8;
+		chip_data->intpol_words[i] = size / 4;
+		chip_data->intpol_bases[i] = of_iomap(node, i);
+		if (ret || !chip_data->intpol_bases[i]) {
+			pr_err("%pOF: couldn't map region %d\n", node, i);
+			ret = -ENODEV;
+			goto out_free_intpol;
+		}
+	}
+
+	chip_data->intpol_idx = kcalloc(intpol_num,
+					sizeof(*chip_data->intpol_idx),
+					GFP_KERNEL);
+	if (!chip_data->intpol_idx) {
+		ret = -ENOMEM;
+		goto out_free_intpol;
+	}
+
+	chip_data->which_word = kcalloc(intpol_num,
+					sizeof(*chip_data->which_word),
+					GFP_KERNEL);
+	if (!chip_data->which_word) {
+		ret = -ENOMEM;
+		goto out_free_intpol_idx;
+	}
+
+	/*
+	 * assign an index of the intpol_bases for each irq
+	 * to set it fast later
+	 */
+	for (i = 0; i < intpol_num ; i++) {
+		u32 word = i / 32, j;
+
+		for (j = 0; word >= chip_data->intpol_words[j] ; j++)
+			word -= chip_data->intpol_words[j];
+
+		chip_data->intpol_idx[i] = j;
+		chip_data->which_word[i] = word;
+	}
+
+	domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node,
+					  &sysirq_domain_ops, chip_data);
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out_free_which_word;
+	}
+	spin_lock_init(&chip_data->lock);
+
+	return 0;
+
+out_free_which_word:
+	kfree(chip_data->which_word);
+out_free_intpol_idx:
+	kfree(chip_data->intpol_idx);
+out_free_intpol:
+	for (i = 0; i < nr_intpol_bases; i++)
+		if (chip_data->intpol_bases[i])
+			iounmap(chip_data->intpol_bases[i]);
+	kfree(chip_data->intpol_bases);
+out_free_intpol_words:
+	kfree(chip_data->intpol_words);
+out_free_chip:
+	kfree(chip_data);
+	return ret;
+}
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
new file mode 100644
index 0000000..3be5c5d
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_SETSPI_NSR_OFFSET	0x0
+#define GICP_CLRSPI_NSR_OFFSET	0x8
+
+struct mvebu_gicp_spi_range {
+	unsigned int start;
+	unsigned int count;
+};
+
+struct mvebu_gicp {
+	struct mvebu_gicp_spi_range *spi_ranges;
+	unsigned int spi_ranges_cnt;
+	unsigned int spi_cnt;
+	unsigned long *spi_bitmap;
+	spinlock_t spi_lock;
+	struct resource *res;
+	struct device *dev;
+};
+
+static int gicp_idx_to_spi(struct mvebu_gicp *gicp, int idx)
+{
+	int i;
+
+	for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+		struct mvebu_gicp_spi_range *r = &gicp->spi_ranges[i];
+
+		if (idx < r->count)
+			return r->start + idx;
+
+		idx -= r->count;
+	}
+
+	return -EINVAL;
+}
+
+static void gicp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mvebu_gicp *gicp = data->chip_data;
+	phys_addr_t setspi = gicp->res->start + GICP_SETSPI_NSR_OFFSET;
+	phys_addr_t clrspi = gicp->res->start + GICP_CLRSPI_NSR_OFFSET;
+
+	msg[0].data = data->hwirq;
+	msg[0].address_lo = lower_32_bits(setspi);
+	msg[0].address_hi = upper_32_bits(setspi);
+	msg[1].data = data->hwirq;
+	msg[1].address_lo = lower_32_bits(clrspi);
+	msg[1].address_hi = upper_32_bits(clrspi);
+}
+
+static struct irq_chip gicp_irq_chip = {
+	.name			= "GICP",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_compose_msi_msg	= gicp_compose_msi_msg,
+};
+
+static int gicp_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *args)
+{
+	struct mvebu_gicp *gicp = domain->host_data;
+	struct irq_fwspec fwspec;
+	unsigned int hwirq;
+	int ret;
+
+	spin_lock(&gicp->spi_lock);
+	hwirq = find_first_zero_bit(gicp->spi_bitmap, gicp->spi_cnt);
+	if (hwirq == gicp->spi_cnt) {
+		spin_unlock(&gicp->spi_lock);
+		return -ENOSPC;
+	}
+	__set_bit(hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = GIC_SPI;
+	fwspec.param[1] = gicp_idx_to_spi(gicp, hwirq) - 32;
+	/*
+	 * Assume edge rising for now, it will be properly set when
+	 * ->set_type() is called
+	 */
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret) {
+		dev_err(gicp->dev, "Cannot allocate parent IRQ\n");
+		goto free_hwirq;
+	}
+
+	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &gicp_irq_chip, gicp);
+	if (ret)
+		goto free_irqs_parent;
+
+	return 0;
+
+free_irqs_parent:
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+free_hwirq:
+	spin_lock(&gicp->spi_lock);
+	__clear_bit(hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+	return ret;
+}
+
+static void gicp_irq_domain_free(struct irq_domain *domain,
+				 unsigned int virq, unsigned int nr_irqs)
+{
+	struct mvebu_gicp *gicp = domain->host_data;
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	if (d->hwirq >= gicp->spi_cnt) {
+		dev_err(gicp->dev, "Invalid hwirq %lu\n", d->hwirq);
+		return;
+	}
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+	spin_lock(&gicp->spi_lock);
+	__clear_bit(d->hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+}
+
+static const struct irq_domain_ops gicp_domain_ops = {
+	.alloc	= gicp_irq_domain_alloc,
+	.free	= gicp_irq_domain_free,
+};
+
+static struct irq_chip gicp_msi_irq_chip = {
+	.name		= "GICP",
+	.irq_set_type	= irq_chip_set_type_parent,
+	.flags		= IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static struct msi_domain_ops gicp_msi_ops = {
+};
+
+static struct msi_domain_info gicp_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_LEVEL_CAPABLE),
+	.ops	= &gicp_msi_ops,
+	.chip	= &gicp_msi_irq_chip,
+};
+
+static int mvebu_gicp_probe(struct platform_device *pdev)
+{
+	struct mvebu_gicp *gicp;
+	struct irq_domain *inner_domain, *plat_domain, *parent_domain;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *irq_parent_dn;
+	int ret, i;
+
+	gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
+	if (!gicp)
+		return -ENOMEM;
+
+	gicp->dev = &pdev->dev;
+	spin_lock_init(&gicp->spi_lock);
+
+	gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!gicp->res)
+		return -ENODEV;
+
+	ret = of_property_count_u32_elems(node, "marvell,spi-ranges");
+	if (ret < 0)
+		return ret;
+
+	gicp->spi_ranges_cnt = ret / 2;
+
+	gicp->spi_ranges =
+		devm_kcalloc(&pdev->dev,
+			     gicp->spi_ranges_cnt,
+			     sizeof(struct mvebu_gicp_spi_range),
+			     GFP_KERNEL);
+	if (!gicp->spi_ranges)
+		return -ENOMEM;
+
+	for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+		of_property_read_u32_index(node, "marvell,spi-ranges",
+					   i * 2,
+					   &gicp->spi_ranges[i].start);
+
+		of_property_read_u32_index(node, "marvell,spi-ranges",
+					   i * 2 + 1,
+					   &gicp->spi_ranges[i].count);
+
+		gicp->spi_cnt += gicp->spi_ranges[i].count;
+	}
+
+	gicp->spi_bitmap = devm_kcalloc(&pdev->dev,
+				BITS_TO_LONGS(gicp->spi_cnt), sizeof(long),
+				GFP_KERNEL);
+	if (!gicp->spi_bitmap)
+		return -ENOMEM;
+
+	irq_parent_dn = of_irq_find_parent(node);
+	if (!irq_parent_dn) {
+		dev_err(&pdev->dev, "failed to find parent IRQ node\n");
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(irq_parent_dn);
+	if (!parent_domain) {
+		dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
+		return -ENODEV;
+	}
+
+	inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
+						   gicp->spi_cnt,
+						   of_node_to_fwnode(node),
+						   &gicp_domain_ops, gicp);
+	if (!inner_domain)
+		return -ENOMEM;
+
+
+	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+						     &gicp_msi_domain_info,
+						     inner_domain);
+	if (!plat_domain) {
+		irq_domain_remove(inner_domain);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, gicp);
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_gicp_of_match[] = {
+	{ .compatible = "marvell,ap806-gicp", },
+	{},
+};
+
+static struct platform_driver mvebu_gicp_driver = {
+	.probe  = mvebu_gicp_probe,
+	.driver = {
+		.name = "mvebu-gicp",
+		.of_match_table = mvebu_gicp_of_match,
+	},
+};
+builtin_platform_driver(mvebu_gicp_driver);
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
new file mode 100644
index 0000000..1306333
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Hanna Hawa <hannah@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/mvebu-icu.h>
+
+/* ICU registers */
+#define ICU_SETSPI_NSR_AL	0x10
+#define ICU_SETSPI_NSR_AH	0x14
+#define ICU_CLRSPI_NSR_AL	0x18
+#define ICU_CLRSPI_NSR_AH	0x1c
+#define ICU_INT_CFG(x)          (0x100 + 4 * (x))
+#define   ICU_INT_ENABLE	BIT(24)
+#define   ICU_IS_EDGE		BIT(28)
+#define   ICU_GROUP_SHIFT	29
+
+/* ICU definitions */
+#define ICU_MAX_IRQS		207
+#define ICU_SATA0_ICU_ID	109
+#define ICU_SATA1_ICU_ID	107
+
+struct mvebu_icu {
+	struct irq_chip irq_chip;
+	void __iomem *base;
+	struct irq_domain *domain;
+	struct device *dev;
+	atomic_t initialized;
+};
+
+struct mvebu_icu_irq_data {
+	struct mvebu_icu *icu;
+	unsigned int icu_group;
+	unsigned int type;
+};
+
+static void mvebu_icu_init(struct mvebu_icu *icu, struct msi_msg *msg)
+{
+	if (atomic_cmpxchg(&icu->initialized, false, true))
+		return;
+
+	/* Set Clear/Set ICU SPI message address in AP */
+	writel_relaxed(msg[0].address_hi, icu->base + ICU_SETSPI_NSR_AH);
+	writel_relaxed(msg[0].address_lo, icu->base + ICU_SETSPI_NSR_AL);
+	writel_relaxed(msg[1].address_hi, icu->base + ICU_CLRSPI_NSR_AH);
+	writel_relaxed(msg[1].address_lo, icu->base + ICU_CLRSPI_NSR_AL);
+}
+
+static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	struct irq_data *d = irq_get_irq_data(desc->irq);
+	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+	struct mvebu_icu *icu = icu_irqd->icu;
+	unsigned int icu_int;
+
+	if (msg->address_lo || msg->address_hi) {
+		/* One off initialization */
+		mvebu_icu_init(icu, msg);
+		/* Configure the ICU with irq number & type */
+		icu_int = msg->data | ICU_INT_ENABLE;
+		if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
+			icu_int |= ICU_IS_EDGE;
+		icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
+	} else {
+		/* De-configure the ICU */
+		icu_int = 0;
+	}
+
+	writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
+
+	/*
+	 * The SATA unit has 2 ports, and a dedicated ICU entry per
+	 * port. The ahci sata driver supports only one irq interrupt
+	 * per SATA unit. To solve this conflict, we configure the 2
+	 * SATA wired interrupts in the south bridge into 1 GIC
+	 * interrupt in the north bridge. Even if only a single port
+	 * is enabled, if sata node is enabled, both interrupts are
+	 * configured (regardless of which port is actually in use).
+	 */
+	if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+		writel_relaxed(icu_int,
+			       icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+		writel_relaxed(icu_int,
+			       icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+	}
+}
+
+static int
+mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+			       unsigned long *hwirq, unsigned int *type)
+{
+	struct mvebu_icu *icu = d->host_data;
+	unsigned int icu_group;
+
+	/* Check the count of the parameters in dt */
+	if (WARN_ON(fwspec->param_count < 3)) {
+		dev_err(icu->dev, "wrong ICU parameter count %d\n",
+			fwspec->param_count);
+		return -EINVAL;
+	}
+
+	/* Only ICU group type is handled */
+	icu_group = fwspec->param[0];
+	if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
+	    icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
+		dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
+		return -EINVAL;
+	}
+
+	*hwirq = fwspec->param[1];
+	if (*hwirq >= ICU_MAX_IRQS) {
+		dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
+		return -EINVAL;
+	}
+
+	/* Mask the type to prevent wrong DT configuration */
+	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static int
+mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+			   unsigned int nr_irqs, void *args)
+{
+	int err;
+	unsigned long hwirq;
+	struct irq_fwspec *fwspec = args;
+	struct mvebu_icu *icu = platform_msi_get_host_data(domain);
+	struct mvebu_icu_irq_data *icu_irqd;
+
+	icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
+	if (!icu_irqd)
+		return -ENOMEM;
+
+	err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
+					     &icu_irqd->type);
+	if (err) {
+		dev_err(icu->dev, "failed to translate ICU parameters\n");
+		goto free_irqd;
+	}
+
+	icu_irqd->icu_group = fwspec->param[0];
+	icu_irqd->icu = icu;
+
+	err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+	if (err) {
+		dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
+		goto free_irqd;
+	}
+
+	/* Make sure there is no interrupt left pending by the firmware */
+	err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+	if (err)
+		goto free_msi;
+
+	err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &icu->irq_chip, icu_irqd);
+	if (err) {
+		dev_err(icu->dev, "failed to set the data to IRQ domain\n");
+		goto free_msi;
+	}
+
+	return 0;
+
+free_msi:
+	platform_msi_domain_free(domain, virq, nr_irqs);
+free_irqd:
+	kfree(icu_irqd);
+	return err;
+}
+
+static void
+mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+			  unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_get_irq_data(virq);
+	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+
+	kfree(icu_irqd);
+
+	platform_msi_domain_free(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mvebu_icu_domain_ops = {
+	.translate = mvebu_icu_irq_domain_translate,
+	.alloc     = mvebu_icu_irq_domain_alloc,
+	.free      = mvebu_icu_irq_domain_free,
+};
+
+static int mvebu_icu_probe(struct platform_device *pdev)
+{
+	struct mvebu_icu *icu;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *gicp_dn;
+	struct resource *res;
+	int i;
+
+	icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
+			   GFP_KERNEL);
+	if (!icu)
+		return -ENOMEM;
+
+	icu->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	icu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(icu->base)) {
+		dev_err(&pdev->dev, "Failed to map icu base address.\n");
+		return PTR_ERR(icu->base);
+	}
+
+	icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					    "ICU.%x",
+					    (unsigned int)res->start);
+	if (!icu->irq_chip.name)
+		return -ENOMEM;
+
+	icu->irq_chip.irq_mask = irq_chip_mask_parent;
+	icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
+	icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
+	icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
+#ifdef CONFIG_SMP
+	icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
+#endif
+
+	/*
+	 * We're probed after MSI domains have been resolved, so force
+	 * resolution here.
+	 */
+	pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
+						 DOMAIN_BUS_PLATFORM_MSI);
+	if (!pdev->dev.msi_domain)
+		return -EPROBE_DEFER;
+
+	gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
+	if (!gicp_dn)
+		return -ENODEV;
+
+	/*
+	 * Clean all ICU interrupts with type SPI_NSR, required to
+	 * avoid unpredictable SPI assignments done by firmware.
+	 */
+	for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
+		u32 icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
+		if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
+			writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
+	}
+
+	icu->domain =
+		platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
+						  mvebu_icu_write_msg,
+						  &mvebu_icu_domain_ops, icu);
+	if (!icu->domain) {
+		dev_err(&pdev->dev, "Failed to create ICU domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_icu_of_match[] = {
+	{ .compatible = "marvell,cp110-icu", },
+	{},
+};
+
+static struct platform_driver mvebu_icu_driver = {
+	.probe  = mvebu_icu_probe,
+	.driver = {
+		.name = "mvebu-icu",
+		.of_match_table = mvebu_icu_of_match,
+	},
+};
+builtin_platform_driver(mvebu_icu_driver);
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 0000000..b4d3678
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "GIC-ODMI: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_ODMIN_SET			0x40
+#define   GICP_ODMI_INT_NUM_SHIFT	12
+#define GICP_ODMIN_GM_EP_R0		0x110
+#define GICP_ODMIN_GM_EP_R1		0x114
+#define GICP_ODMIN_GM_EA_R0		0x108
+#define GICP_ODMIN_GM_EA_R1		0x118
+
+/*
+ * We don't support the group events, so we simply have 8 interrupts
+ * per frame.
+ */
+#define NODMIS_SHIFT		3
+#define NODMIS_PER_FRAME	(1 << NODMIS_SHIFT)
+#define NODMIS_MASK		(NODMIS_PER_FRAME - 1)
+
+struct odmi_data {
+	struct resource res;
+	void __iomem *base;
+	unsigned int spi_base;
+};
+
+static struct odmi_data *odmis;
+static unsigned long *odmis_bm;
+static unsigned int odmis_count;
+
+/* Protects odmis_bm */
+static DEFINE_SPINLOCK(odmis_bm_lock);
+
+static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct odmi_data *odmi;
+	phys_addr_t addr;
+	unsigned int odmin;
+
+	if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
+		return;
+
+	odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
+	odmin = d->hwirq & NODMIS_MASK;
+
+	addr = odmi->res.start + GICP_ODMIN_SET;
+
+	msg->address_hi = upper_32_bits(addr);
+	msg->address_lo = lower_32_bits(addr);
+	msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
+}
+
+static struct irq_chip odmi_irq_chip = {
+	.name			= "ODMI",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_compose_msi_msg	= odmi_compose_msi_msg,
+};
+
+static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *args)
+{
+	struct odmi_data *odmi = NULL;
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	unsigned int hwirq, odmin;
+	int ret;
+
+	spin_lock(&odmis_bm_lock);
+	hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
+	if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
+		spin_unlock(&odmis_bm_lock);
+		return -ENOSPC;
+	}
+
+	__set_bit(hwirq, odmis_bm);
+	spin_unlock(&odmis_bm_lock);
+
+	odmi = &odmis[hwirq >> NODMIS_SHIFT];
+	odmin = hwirq & NODMIS_MASK;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = GIC_SPI;
+	fwspec.param[1] = odmi->spi_base - 32 + odmin;
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret) {
+		pr_err("Cannot allocate parent IRQ\n");
+		spin_lock(&odmis_bm_lock);
+		__clear_bit(odmin, odmis_bm);
+		spin_unlock(&odmis_bm_lock);
+		return ret;
+	}
+
+	/* Configure the interrupt line to be edge */
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				      &odmi_irq_chip, NULL);
+
+	return 0;
+}
+
+static void odmi_irq_domain_free(struct irq_domain *domain,
+				 unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
+		pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
+		return;
+	}
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+	/* Actually free the MSI */
+	spin_lock(&odmis_bm_lock);
+	__clear_bit(d->hwirq, odmis_bm);
+	spin_unlock(&odmis_bm_lock);
+}
+
+static const struct irq_domain_ops odmi_domain_ops = {
+	.alloc	= odmi_irq_domain_alloc,
+	.free	= odmi_irq_domain_free,
+};
+
+static struct irq_chip odmi_msi_irq_chip = {
+	.name	= "ODMI",
+};
+
+static struct msi_domain_ops odmi_msi_ops = {
+};
+
+static struct msi_domain_info odmi_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &odmi_msi_ops,
+	.chip	= &odmi_msi_irq_chip,
+};
+
+static int __init mvebu_odmi_init(struct device_node *node,
+				  struct device_node *parent)
+{
+	struct irq_domain *inner_domain, *plat_domain;
+	int ret, i;
+
+	if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
+		return -EINVAL;
+
+	odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
+	if (!odmis)
+		return -ENOMEM;
+
+	odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
+			   sizeof(long), GFP_KERNEL);
+	if (!odmis_bm) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	for (i = 0; i < odmis_count; i++) {
+		struct odmi_data *odmi = &odmis[i];
+
+		ret = of_address_to_resource(node, i, &odmi->res);
+		if (ret)
+			goto err_unmap;
+
+		odmi->base = of_io_request_and_map(node, i, "odmi");
+		if (IS_ERR(odmi->base)) {
+			ret = PTR_ERR(odmi->base);
+			goto err_unmap;
+		}
+
+		if (of_property_read_u32_index(node, "marvell,spi-base",
+					       i, &odmi->spi_base)) {
+			ret = -EINVAL;
+			goto err_unmap;
+		}
+	}
+
+	inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+						odmis_count * NODMIS_PER_FRAME,
+						&odmi_domain_ops, NULL);
+	if (!inner_domain) {
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	inner_domain->parent = irq_find_host(parent);
+
+	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+						     &odmi_msi_domain_info,
+						     inner_domain);
+	if (!plat_domain) {
+		ret = -ENOMEM;
+		goto err_remove_inner;
+	}
+
+	return 0;
+
+err_remove_inner:
+	irq_domain_remove(inner_domain);
+err_unmap:
+	for (i = 0; i < odmis_count; i++) {
+		struct odmi_data *odmi = &odmis[i];
+
+		if (odmi->base && !IS_ERR(odmi->base))
+			iounmap(odmis[i].base);
+	}
+	kfree(odmis_bm);
+err_alloc:
+	kfree(odmis);
+	return ret;
+}
+
+IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
new file mode 100644
index 0000000..eec6395
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#define PIC_CAUSE	       0x0
+#define PIC_MASK	       0x4
+
+#define PIC_MAX_IRQS		32
+#define PIC_MAX_IRQ_MASK	((1UL << PIC_MAX_IRQS) - 1)
+
+struct mvebu_pic {
+	void __iomem *base;
+	u32 parent_irq;
+	struct irq_domain *domain;
+	struct irq_chip irq_chip;
+};
+
+static void mvebu_pic_reset(struct mvebu_pic *pic)
+{
+	/* ACK and mask all interrupts */
+	writel(0, pic->base + PIC_MASK);
+	writel(PIC_MAX_IRQ_MASK, pic->base + PIC_CAUSE);
+}
+
+static void mvebu_pic_eoi_irq(struct irq_data *d)
+{
+	struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+
+	writel(1 << d->hwirq, pic->base + PIC_CAUSE);
+}
+
+static void mvebu_pic_mask_irq(struct irq_data *d)
+{
+	struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+	u32 reg;
+
+	reg =  readl(pic->base + PIC_MASK);
+	reg |= (1 << d->hwirq);
+	writel(reg, pic->base + PIC_MASK);
+}
+
+static void mvebu_pic_unmask_irq(struct irq_data *d)
+{
+	struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+	u32 reg;
+
+	reg = readl(pic->base + PIC_MASK);
+	reg &= ~(1 << d->hwirq);
+	writel(reg, pic->base + PIC_MASK);
+}
+
+static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
+			     irq_hw_number_t hwirq)
+{
+	struct mvebu_pic *pic = domain->host_data;
+
+	irq_set_percpu_devid(virq);
+	irq_set_chip_data(virq, pic);
+	irq_set_chip_and_handler(virq, &pic->irq_chip,
+				 handle_percpu_devid_irq);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_probe(virq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mvebu_pic_domain_ops = {
+	.map = mvebu_pic_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc)
+{
+	struct mvebu_pic *pic = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long irqmap, irqn;
+	unsigned int cascade_irq;
+
+	irqmap = readl_relaxed(pic->base + PIC_CAUSE);
+	chained_irq_enter(chip, desc);
+
+	for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+		cascade_irq = irq_find_mapping(pic->domain, irqn);
+		generic_handle_irq(cascade_irq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void mvebu_pic_enable_percpu_irq(void *data)
+{
+	struct mvebu_pic *pic = data;
+
+	mvebu_pic_reset(pic);
+	enable_percpu_irq(pic->parent_irq, IRQ_TYPE_NONE);
+}
+
+static void mvebu_pic_disable_percpu_irq(void *data)
+{
+	struct mvebu_pic *pic = data;
+
+	disable_percpu_irq(pic->parent_irq);
+}
+
+static int mvebu_pic_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct mvebu_pic *pic;
+	struct irq_chip *irq_chip;
+	struct resource *res;
+
+	pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
+	if (!pic)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pic->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pic->base))
+		return PTR_ERR(pic->base);
+
+	irq_chip = &pic->irq_chip;
+	irq_chip->name = dev_name(&pdev->dev);
+	irq_chip->irq_mask = mvebu_pic_mask_irq;
+	irq_chip->irq_unmask = mvebu_pic_unmask_irq;
+	irq_chip->irq_eoi = mvebu_pic_eoi_irq;
+
+	pic->parent_irq = irq_of_parse_and_map(node, 0);
+	if (pic->parent_irq <= 0) {
+		dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
+		return -EINVAL;
+	}
+
+	pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
+					    &mvebu_pic_domain_ops, pic);
+	if (!pic->domain) {
+		dev_err(&pdev->dev, "Failed to allocate irq domain\n");
+		return -ENOMEM;
+	}
+
+	irq_set_chained_handler(pic->parent_irq, mvebu_pic_handle_cascade_irq);
+	irq_set_handler_data(pic->parent_irq, pic);
+
+	on_each_cpu(mvebu_pic_enable_percpu_irq, pic, 1);
+
+	platform_set_drvdata(pdev, pic);
+
+	return 0;
+}
+
+static int mvebu_pic_remove(struct platform_device *pdev)
+{
+	struct mvebu_pic *pic = platform_get_drvdata(pdev);
+
+	on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
+	irq_domain_remove(pic->domain);
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_pic_of_match[] = {
+	{ .compatible = "marvell,armada-8k-pic", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
+
+static struct platform_driver mvebu_pic_driver = {
+	.probe  = mvebu_pic_probe,
+	.remove = mvebu_pic_remove,
+	.driver = {
+		.name = "mvebu-pic",
+		.of_match_table = mvebu_pic_of_match,
+	},
+};
+module_platform_driver(mvebu_pic_driver);
+
+MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mvebu_pic");
+
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
new file mode 100644
index 0000000..e8b31f5
--- /dev/null
+++ b/drivers/irqchip/irq-mxs.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *	Add Alphascale ASM9260 support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/stmp_device.h>
+#include <asm/exception.h>
+
+#include "alphascale_asm9260-icoll.h"
+
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+#define SET_REG 4
+#define CLR_REG 8
+
+#define HW_ICOLL_VECTOR				0x0000
+#define HW_ICOLL_LEVELACK			0x0010
+#define HW_ICOLL_CTRL				0x0020
+#define HW_ICOLL_STAT_OFFSET			0x0070
+#define HW_ICOLL_INTERRUPT0			0x0120
+#define HW_ICOLL_INTERRUPTn(n)			((n) * 0x10)
+#define BM_ICOLL_INTR_ENABLE			BIT(2)
+#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0	0x1
+
+#define ICOLL_NUM_IRQS		128
+
+enum icoll_type {
+	ICOLL,
+	ASM9260_ICOLL,
+};
+
+struct icoll_priv {
+	void __iomem *vector;
+	void __iomem *levelack;
+	void __iomem *ctrl;
+	void __iomem *stat;
+	void __iomem *intr;
+	void __iomem *clear;
+	enum icoll_type type;
+};
+
+static struct icoll_priv icoll_priv;
+static struct irq_domain *icoll_domain;
+
+/* calculate bit offset depending on number of intterupt per register */
+static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
+{
+	/*
+	 * mask lower part of hwirq to convert it
+	 * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3)
+	 */
+	return bit << ((d->hwirq & 3) << 3);
+}
+
+/* calculate mem offset depending on number of intterupt per register */
+static void __iomem *icoll_intr_reg(struct irq_data *d)
+{
+	/* offset = hwirq / intr_per_reg * 0x10 */
+	return icoll_priv.intr + ((d->hwirq >> 2) * 0x10);
+}
+
+static void icoll_ack_irq(struct irq_data *d)
+{
+	/*
+	 * The Interrupt Collector is able to prioritize irqs.
+	 * Currently only level 0 is used. So acking can use
+	 * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
+	 */
+	__raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
+			icoll_priv.levelack);
+}
+
+static void icoll_mask_irq(struct irq_data *d)
+{
+	__raw_writel(BM_ICOLL_INTR_ENABLE,
+			icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
+}
+
+static void icoll_unmask_irq(struct irq_data *d)
+{
+	__raw_writel(BM_ICOLL_INTR_ENABLE,
+			icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
+}
+
+static void asm9260_mask_irq(struct irq_data *d)
+{
+	__raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+			icoll_intr_reg(d) + CLR_REG);
+}
+
+static void asm9260_unmask_irq(struct irq_data *d)
+{
+	__raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq),
+		     icoll_priv.clear +
+		     ASM9260_HW_ICOLL_CLEARn(d->hwirq));
+
+	__raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+			icoll_intr_reg(d) + SET_REG);
+}
+
+static struct irq_chip mxs_icoll_chip = {
+	.irq_ack = icoll_ack_irq,
+	.irq_mask = icoll_mask_irq,
+	.irq_unmask = icoll_unmask_irq,
+	.flags = IRQCHIP_MASK_ON_SUSPEND |
+		 IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct irq_chip asm9260_icoll_chip = {
+	.irq_ack = icoll_ack_irq,
+	.irq_mask = asm9260_mask_irq,
+	.irq_unmask = asm9260_unmask_irq,
+	.flags = IRQCHIP_MASK_ON_SUSPEND |
+		 IRQCHIP_SKIP_SET_WAKE,
+};
+
+asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+{
+	u32 irqnr;
+
+	irqnr = __raw_readl(icoll_priv.stat);
+	__raw_writel(irqnr, icoll_priv.vector);
+	handle_domain_irq(icoll_domain, irqnr, regs);
+}
+
+static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				irq_hw_number_t hw)
+{
+	struct irq_chip *chip;
+
+	if (icoll_priv.type == ICOLL)
+		chip = &mxs_icoll_chip;
+	else
+		chip = &asm9260_icoll_chip;
+
+	irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops icoll_irq_domain_ops = {
+	.map = icoll_irq_domain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static void __init icoll_add_domain(struct device_node *np,
+			  int num)
+{
+	icoll_domain = irq_domain_add_linear(np, num,
+					     &icoll_irq_domain_ops, NULL);
+
+	if (!icoll_domain)
+		panic("%pOF: unable to create irq domain", np);
+}
+
+static void __iomem * __init icoll_init_iobase(struct device_node *np)
+{
+	void __iomem *icoll_base;
+
+	icoll_base = of_io_request_and_map(np, 0, np->name);
+	if (IS_ERR(icoll_base))
+		panic("%pOF: unable to map resource", np);
+	return icoll_base;
+}
+
+static int __init icoll_of_init(struct device_node *np,
+			  struct device_node *interrupt_parent)
+{
+	void __iomem *icoll_base;
+
+	icoll_priv.type = ICOLL;
+
+	icoll_base		= icoll_init_iobase(np);
+	icoll_priv.vector	= icoll_base + HW_ICOLL_VECTOR;
+	icoll_priv.levelack	= icoll_base + HW_ICOLL_LEVELACK;
+	icoll_priv.ctrl		= icoll_base + HW_ICOLL_CTRL;
+	icoll_priv.stat		= icoll_base + HW_ICOLL_STAT_OFFSET;
+	icoll_priv.intr		= icoll_base + HW_ICOLL_INTERRUPT0;
+	icoll_priv.clear	= NULL;
+
+	/*
+	 * Interrupt Collector reset, which initializes the priority
+	 * for each irq to level 0.
+	 */
+	stmp_reset_block(icoll_priv.ctrl);
+
+	icoll_add_domain(np, ICOLL_NUM_IRQS);
+
+	return 0;
+}
+IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
+
+static int __init asm9260_of_init(struct device_node *np,
+			  struct device_node *interrupt_parent)
+{
+	void __iomem *icoll_base;
+	int i;
+
+	icoll_priv.type = ASM9260_ICOLL;
+
+	icoll_base = icoll_init_iobase(np);
+	icoll_priv.vector	= icoll_base + ASM9260_HW_ICOLL_VECTOR;
+	icoll_priv.levelack	= icoll_base + ASM9260_HW_ICOLL_LEVELACK;
+	icoll_priv.ctrl		= icoll_base + ASM9260_HW_ICOLL_CTRL;
+	icoll_priv.stat		= icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET;
+	icoll_priv.intr		= icoll_base + ASM9260_HW_ICOLL_INTERRUPT0;
+	icoll_priv.clear	= icoll_base + ASM9260_HW_ICOLL_CLEAR0;
+
+	writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE,
+			icoll_priv.ctrl);
+	/*
+	 * ASM9260 don't provide reset bit. So, we need to set level 0
+	 * manually.
+	 */
+	for (i = 0; i < 16 * 0x10; i += 0x10)
+		writel(0, icoll_priv.intr + i);
+
+	icoll_add_domain(np, ASM9260_NUM_IRQS);
+	set_handle_irq(icoll_handle_irq);
+
+	return 0;
+}
+IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
new file mode 100644
index 0000000..b177710
--- /dev/null
+++ b/drivers/irqchip/irq-nvic.c
@@ -0,0 +1,147 @@
+/*
+ * drivers/irq/irq-nvic.c
+ *
+ * Copyright (C) 2008 ARM Limited, All Rights Reserved.
+ * Copyright (C) 2013 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support for the Nested Vectored Interrupt Controller found on the
+ * ARMv7-M CPUs (Cortex-M3/M4)
+ */
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/v7m.h>
+#include <asm/exception.h>
+
+#define NVIC_ISER		0x000
+#define NVIC_ICER		0x080
+#define NVIC_IPR		0x300
+
+#define NVIC_MAX_BANKS		16
+/*
+ * Each bank handles 32 irqs. Only the 16th (= last) bank handles only
+ * 16 irqs.
+ */
+#define NVIC_MAX_IRQ		((NVIC_MAX_BANKS - 1) * 32 + 16)
+
+static struct irq_domain *nvic_irq_domain;
+
+asmlinkage void __exception_irq_entry
+nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
+{
+	unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
+
+	handle_IRQ(irq, regs);
+}
+
+static int nvic_irq_domain_translate(struct irq_domain *d,
+				     struct irq_fwspec *fwspec,
+				     unsigned long *hwirq, unsigned int *type)
+{
+	if (WARN_ON(fwspec->param_count < 1))
+		return -EINVAL;
+	*hwirq = fwspec->param[0];
+	*type = IRQ_TYPE_NONE;
+	return 0;
+}
+
+static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	int i, ret;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	struct irq_fwspec *fwspec = arg;
+
+	ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_map_generic_chip(domain, virq + i, hwirq + i);
+
+	return 0;
+}
+
+static const struct irq_domain_ops nvic_irq_domain_ops = {
+	.translate = nvic_irq_domain_translate,
+	.alloc = nvic_irq_domain_alloc,
+	.free = irq_domain_free_irqs_top,
+};
+
+static int __init nvic_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	unsigned int irqs, i, ret, numbanks;
+	void __iomem *nvic_base;
+
+	numbanks = (readl_relaxed(V7M_SCS_ICTR) &
+		    V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
+
+	nvic_base = of_iomap(node, 0);
+	if (!nvic_base) {
+		pr_warn("unable to map nvic registers\n");
+		return -ENOMEM;
+	}
+
+	irqs = numbanks * 32;
+	if (irqs > NVIC_MAX_IRQ)
+		irqs = NVIC_MAX_IRQ;
+
+	nvic_irq_domain =
+		irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
+
+	if (!nvic_irq_domain) {
+		pr_warn("Failed to allocate irq domain\n");
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
+					     "nvic_irq", handle_fasteoi_irq,
+					     clr, 0, IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_warn("Failed to allocate irq chips\n");
+		irq_domain_remove(nvic_irq_domain);
+		return ret;
+	}
+
+	for (i = 0; i < numbanks; ++i) {
+		struct irq_chip_generic *gc;
+
+		gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
+		gc->reg_base = nvic_base + 4 * i;
+		gc->chip_types[0].regs.enable = NVIC_ISER;
+		gc->chip_types[0].regs.disable = NVIC_ICER;
+		gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+		gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+		/* This is a no-op as end of interrupt is signaled by the
+		 * exception return sequence.
+		 */
+		gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
+
+		/* disable interrupts */
+		writel_relaxed(~0, gc->reg_base + NVIC_ICER);
+	}
+
+	/* Set priority on all interrupts */
+	for (i = 0; i < irqs; i += 4)
+		writel_relaxed(0, nvic_base + NVIC_IPR + i);
+
+	return 0;
+}
+IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
new file mode 100644
index 0000000..d360a6e
--- /dev/null
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -0,0 +1,394 @@
+/*
+ * linux/arch/arm/mach-omap2/irq.c
+ *
+ * Interrupt handler for OMAP2 boards.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/irq-omap-intc.h>
+
+/* selected INTC register offsets */
+
+#define INTC_REVISION		0x0000
+#define INTC_SYSCONFIG		0x0010
+#define INTC_SYSSTATUS		0x0014
+#define INTC_SIR		0x0040
+#define INTC_CONTROL		0x0048
+#define INTC_PROTECTION		0x004C
+#define INTC_IDLE		0x0050
+#define INTC_THRESHOLD		0x0068
+#define INTC_MIR0		0x0084
+#define INTC_MIR_CLEAR0		0x0088
+#define INTC_MIR_SET0		0x008c
+#define INTC_PENDING_IRQ0	0x0098
+#define INTC_PENDING_IRQ1	0x00b8
+#define INTC_PENDING_IRQ2	0x00d8
+#define INTC_PENDING_IRQ3	0x00f8
+#define INTC_ILR0		0x0100
+
+#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
+#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
+#define INTCPS_NR_ILR_REGS	128
+#define INTCPS_NR_MIR_REGS	4
+
+#define INTC_IDLE_FUNCIDLE	(1 << 0)
+#define INTC_IDLE_TURBO		(1 << 1)
+
+#define INTC_PROTECTION_ENABLE	(1 << 0)
+
+struct omap_intc_regs {
+	u32 sysconfig;
+	u32 protection;
+	u32 idle;
+	u32 threshold;
+	u32 ilr[INTCPS_NR_ILR_REGS];
+	u32 mir[INTCPS_NR_MIR_REGS];
+};
+static struct omap_intc_regs intc_context;
+
+static struct irq_domain *domain;
+static void __iomem *omap_irq_base;
+static int omap_nr_pending;
+static int omap_nr_irqs;
+
+static void intc_writel(u32 reg, u32 val)
+{
+	writel_relaxed(val, omap_irq_base + reg);
+}
+
+static u32 intc_readl(u32 reg)
+{
+	return readl_relaxed(omap_irq_base + reg);
+}
+
+void omap_intc_save_context(void)
+{
+	int i;
+
+	intc_context.sysconfig =
+		intc_readl(INTC_SYSCONFIG);
+	intc_context.protection =
+		intc_readl(INTC_PROTECTION);
+	intc_context.idle =
+		intc_readl(INTC_IDLE);
+	intc_context.threshold =
+		intc_readl(INTC_THRESHOLD);
+
+	for (i = 0; i < omap_nr_irqs; i++)
+		intc_context.ilr[i] =
+			intc_readl((INTC_ILR0 + 0x4 * i));
+	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
+		intc_context.mir[i] =
+			intc_readl(INTC_MIR0 + (0x20 * i));
+}
+
+void omap_intc_restore_context(void)
+{
+	int i;
+
+	intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
+	intc_writel(INTC_PROTECTION, intc_context.protection);
+	intc_writel(INTC_IDLE, intc_context.idle);
+	intc_writel(INTC_THRESHOLD, intc_context.threshold);
+
+	for (i = 0; i < omap_nr_irqs; i++)
+		intc_writel(INTC_ILR0 + 0x4 * i,
+				intc_context.ilr[i]);
+
+	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
+		intc_writel(INTC_MIR0 + 0x20 * i,
+			intc_context.mir[i]);
+	/* MIRs are saved and restore with other PRCM registers */
+}
+
+void omap3_intc_prepare_idle(void)
+{
+	/*
+	 * Disable autoidle as it can stall interrupt controller,
+	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
+	 */
+	intc_writel(INTC_SYSCONFIG, 0);
+	intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
+}
+
+void omap3_intc_resume_idle(void)
+{
+	/* Re-enable autoidle */
+	intc_writel(INTC_SYSCONFIG, 1);
+	intc_writel(INTC_IDLE, 0);
+}
+
+/* XXX: FIQ and additional INTC support (only MPU at the moment) */
+static void omap_ack_irq(struct irq_data *d)
+{
+	intc_writel(INTC_CONTROL, 0x1);
+}
+
+static void omap_mask_ack_irq(struct irq_data *d)
+{
+	irq_gc_mask_disable_reg(d);
+	omap_ack_irq(d);
+}
+
+static void __init omap_irq_soft_reset(void)
+{
+	unsigned long tmp;
+
+	tmp = intc_readl(INTC_REVISION) & 0xff;
+
+	pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
+		omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
+
+	tmp = intc_readl(INTC_SYSCONFIG);
+	tmp |= 1 << 1;	/* soft reset */
+	intc_writel(INTC_SYSCONFIG, tmp);
+
+	while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
+		/* Wait for reset to complete */;
+
+	/* Enable autoidle */
+	intc_writel(INTC_SYSCONFIG, 1 << 0);
+}
+
+int omap_irq_pending(void)
+{
+	int i;
+
+	for (i = 0; i < omap_nr_pending; i++)
+		if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
+			return 1;
+	return 0;
+}
+
+void omap3_intc_suspend(void)
+{
+	/* A pending interrupt would prevent OMAP from entering suspend */
+	omap_ack_irq(NULL);
+}
+
+static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
+{
+	int ret;
+	int i;
+
+	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
+			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
+			IRQ_LEVEL, 0);
+	if (ret) {
+		pr_warn("Failed to allocate irq chips\n");
+		return ret;
+	}
+
+	for (i = 0; i < omap_nr_pending; i++) {
+		struct irq_chip_generic *gc;
+		struct irq_chip_type *ct;
+
+		gc = irq_get_domain_generic_chip(d, 32 * i);
+		gc->reg_base = base;
+		ct = gc->chip_types;
+
+		ct->type = IRQ_TYPE_LEVEL_MASK;
+
+		ct->chip.irq_ack = omap_mask_ack_irq;
+		ct->chip.irq_mask = irq_gc_mask_disable_reg;
+		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+
+		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+
+		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
+		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
+	}
+
+	return 0;
+}
+
+static void __init omap_alloc_gc_legacy(void __iomem *base,
+		unsigned int irq_start, unsigned int num)
+{
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+
+	gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
+			handle_level_irq);
+	ct = gc->chip_types;
+	ct->chip.irq_ack = omap_mask_ack_irq;
+	ct->chip.irq_mask = irq_gc_mask_disable_reg;
+	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+
+	ct->regs.enable = INTC_MIR_CLEAR0;
+	ct->regs.disable = INTC_MIR_SET0;
+	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+			IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
+static int __init omap_init_irq_of(struct device_node *node)
+{
+	int ret;
+
+	omap_irq_base = of_iomap(node, 0);
+	if (WARN_ON(!omap_irq_base))
+		return -ENOMEM;
+
+	domain = irq_domain_add_linear(node, omap_nr_irqs,
+			&irq_generic_chip_ops, NULL);
+
+	omap_irq_soft_reset();
+
+	ret = omap_alloc_gc_of(domain, omap_irq_base);
+	if (ret < 0)
+		irq_domain_remove(domain);
+
+	return ret;
+}
+
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
+{
+	int j, irq_base;
+
+	omap_irq_base = ioremap(base, SZ_4K);
+	if (WARN_ON(!omap_irq_base))
+		return -ENOMEM;
+
+	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
+	if (irq_base < 0) {
+		pr_warn("Couldn't allocate IRQ numbers\n");
+		irq_base = 0;
+	}
+
+	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
+			&irq_domain_simple_ops, NULL);
+
+	omap_irq_soft_reset();
+
+	for (j = 0; j < omap_nr_irqs; j += 32)
+		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
+
+	return 0;
+}
+
+static void __init omap_irq_enable_protection(void)
+{
+	u32 reg;
+
+	reg = intc_readl(INTC_PROTECTION);
+	reg |= INTC_PROTECTION_ENABLE;
+	intc_writel(INTC_PROTECTION, reg);
+}
+
+static int __init omap_init_irq(u32 base, struct device_node *node)
+{
+	int ret;
+
+	/*
+	 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+	 * depends is still not ready for linear IRQ domains; because of that
+	 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+	 * linear IRQ Domain until that driver is finally fixed.
+	 */
+	if (of_device_is_compatible(node, "ti,omap2-intc") ||
+			of_device_is_compatible(node, "ti,omap3-intc")) {
+		struct resource res;
+
+		if (of_address_to_resource(node, 0, &res))
+			return -ENOMEM;
+
+		base = res.start;
+		ret = omap_init_irq_legacy(base, node);
+	} else if (node) {
+		ret = omap_init_irq_of(node);
+	} else {
+		ret = omap_init_irq_legacy(base, NULL);
+	}
+
+	if (ret == 0)
+		omap_irq_enable_protection();
+
+	return ret;
+}
+
+static asmlinkage void __exception_irq_entry
+omap_intc_handle_irq(struct pt_regs *regs)
+{
+	extern unsigned long irq_err_count;
+	u32 irqnr;
+
+	irqnr = intc_readl(INTC_SIR);
+
+	/*
+	 * A spurious IRQ can result if interrupt that triggered the
+	 * sorting is no longer active during the sorting (10 INTC
+	 * functional clock cycles after interrupt assertion). Or a
+	 * change in interrupt mask affected the result during sorting
+	 * time. There is no special handling required except ignoring
+	 * the SIR register value just read and retrying.
+	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
+	 *
+	 * Many a times, a spurious interrupt situation has been fixed
+	 * by adding a flush for the posted write acking the IRQ in
+	 * the device driver. Typically, this is going be the device
+	 * driver whose interrupt was handled just before the spurious
+	 * IRQ occurred. Pay attention to those device drivers if you
+	 * run into hitting the spurious IRQ condition below.
+	 */
+	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
+		pr_err_once("%s: spurious irq!\n", __func__);
+		irq_err_count++;
+		omap_ack_irq(NULL);
+		return;
+	}
+
+	irqnr &= ACTIVEIRQ_MASK;
+	handle_domain_irq(domain, irqnr, regs);
+}
+
+static int __init intc_of_init(struct device_node *node,
+			     struct device_node *parent)
+{
+	int ret;
+
+	omap_nr_pending = 3;
+	omap_nr_irqs = 96;
+
+	if (WARN_ON(!node))
+		return -ENODEV;
+
+	if (of_device_is_compatible(node, "ti,dm814-intc") ||
+	    of_device_is_compatible(node, "ti,dm816-intc") ||
+	    of_device_is_compatible(node, "ti,am33xx-intc")) {
+		omap_nr_irqs = 128;
+		omap_nr_pending = 4;
+	}
+
+	ret = omap_init_irq(-1, of_node_get(node));
+	if (ret < 0)
+		return ret;
+
+	set_handle_irq(omap_intc_handle_irq);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
+IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
+IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
+IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
+IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 0000000..e66ef43
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ *  Control register
+ *  +---------+---------+----------+---------+
+ *  | 31      | 30      | 29 .. 16 | 15 .. 0 |
+ *  ----------+---------+----------+----------
+ *  | IRQ ACK | IRQ GEN | DST CORE | DATA    |
+ *  +---------+---------+----------+---------+
+ *
+ *  Status register
+ *  +----------+-------------+----------+---------+
+ *  | 31       | 30          | 29 .. 16 | 15 .. 0 |
+ *  -----------+-------------+----------+---------+
+ *  | Reserved | IRQ Pending | SRC CORE | DATA    |
+ *  +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ *   ready.  Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ *   +---------+                         +---------+
+ *   | CPU     |                         | CPU     |
+ *   |  Core 0 |<==\ (memory access) /==>|  Core 1 |
+ *   |  [ PIC ]|   |                 |   |  [ PIC ]|
+ *   +----^-^--+   |                 |   +----^-^--+
+ *        | |      v                 v        | |
+ *   <====|=|=================================|=|==> (memory bus)
+ *        | |      ^                  ^       | |
+ *  (ipi  | +------|---------+--------|-------|-+ (device irq)
+ *   irq  |        |         |        |       |
+ *  core0)| +------|---------|--------|-------+ (ipi irq core1)
+ *        | |      |         |        |
+ *   +----o-o-+    |    +--------+    |
+ *   | ompic  |<===/    | Device |<===/
+ *   |  IPI   |         +--------+
+ *   +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES		8
+#define OMPIC_CTRL(cpu)		(0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu)		(0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK	(1 << 31)
+#define OMPIC_CTRL_IRQ_GEN	(1 << 30)
+#define OMPIC_CTRL_DST(cpu)	(((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING	(1 << 30)
+
+#define OMPIC_DATA(x)		((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+	return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+	iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+				unsigned int ipi_msg)
+{
+	unsigned int dst_cpu;
+	unsigned int src_cpu = smp_processor_id();
+
+	for_each_cpu(dst_cpu, mask) {
+		set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+		/*
+		 * On OpenRISC the atomic set_bit() call implies a memory
+		 * barrier.  Otherwise we would need: smp_wmb(); paired
+		 * with the read in ompic_ipi_handler.
+		 */
+
+		ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+			       OMPIC_CTRL_IRQ_GEN |
+			       OMPIC_CTRL_DST(dst_cpu) |
+			       OMPIC_DATA(1));
+	}
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+	unsigned int cpu = smp_processor_id();
+	unsigned long *pending_ops = &per_cpu(ops, cpu);
+	unsigned long ops;
+
+	ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+	while ((ops = xchg(pending_ops, 0)) != 0) {
+
+		/*
+		 * On OpenRISC the atomic xchg() call implies a memory
+		 * barrier.  Otherwise we may need an smp_rmb(); paired
+		 * with the write in ompic_raise_softirq.
+		 */
+
+		do {
+			unsigned long ipi_msg;
+
+			ipi_msg = __ffs(ops);
+			ops &= ~(1UL << ipi_msg);
+
+			handle_IPI(ipi_msg);
+		} while (ops);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	struct resource res;
+	int irq;
+	int ret;
+
+	/* Validate the DT */
+	if (ompic_base) {
+		pr_err("ompic: duplicate ompic's are not supported");
+		return -EEXIST;
+	}
+
+	if (of_address_to_resource(node, 0, &res)) {
+		pr_err("ompic: reg property requires an address and size");
+		return -EINVAL;
+	}
+
+	if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+		pr_err("ompic: reg size, currently %d must be at least %d",
+			resource_size(&res),
+			(num_possible_cpus() * OMPIC_CPUBYTES));
+		return -EINVAL;
+	}
+
+	/* Setup the device */
+	ompic_base = ioremap(res.start, resource_size(&res));
+	if (!ompic_base) {
+		pr_err("ompic: unable to map registers");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		pr_err("ompic: unable to parse device irq");
+		ret = -EINVAL;
+		goto out_unmap;
+	}
+
+	ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+				"ompic_ipi", NULL);
+	if (ret)
+		goto out_irq_disp;
+
+	set_smp_cross_call(ompic_raise_softirq);
+
+	return 0;
+
+out_irq_disp:
+	irq_dispose_mapping(irq);
+out_unmap:
+	iounmap(ompic_base);
+	ompic_base = NULL;
+	return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
new file mode 100644
index 0000000..dd9d5d1
--- /dev/null
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2014 Stefan Kristansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+/* OR1K PIC implementation */
+
+struct or1k_pic_dev {
+	struct irq_chip chip;
+	irq_flow_handler_t handle;
+	unsigned long flags;
+};
+
+/*
+ * We're a couple of cycles faster than the generic implementations with
+ * these 'fast' versions.
+ */
+
+static void or1k_pic_mask(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+}
+
+static void or1k_pic_unmask(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->hwirq));
+}
+
+static void or1k_pic_ack(struct irq_data *data)
+{
+	mtspr(SPR_PICSR, (1UL << data->hwirq));
+}
+
+static void or1k_pic_mask_ack(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+	mtspr(SPR_PICSR, (1UL << data->hwirq));
+}
+
+/*
+ * There are two oddities with the OR1200 PIC implementation:
+ * i)  LEVEL-triggered interrupts are latched and need to be cleared
+ * ii) the interrupt latch is cleared by writing a 0 to the bit,
+ *     as opposed to a 1 as mandated by the spec
+ */
+static void or1k_pic_or1200_ack(struct irq_data *data)
+{
+	mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
+}
+
+static void or1k_pic_or1200_mask_ack(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+	mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
+}
+
+static struct or1k_pic_dev or1k_pic_level = {
+	.chip = {
+		.name = "or1k-PIC-level",
+		.irq_unmask = or1k_pic_unmask,
+		.irq_mask = or1k_pic_mask,
+		.irq_mask_ack = or1k_pic_mask_ack,
+	},
+	.handle = handle_level_irq,
+	.flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct or1k_pic_dev or1k_pic_edge = {
+	.chip = {
+		.name = "or1k-PIC-edge",
+		.irq_unmask = or1k_pic_unmask,
+		.irq_mask = or1k_pic_mask,
+		.irq_ack = or1k_pic_ack,
+		.irq_mask_ack = or1k_pic_mask_ack,
+	},
+	.handle = handle_edge_irq,
+	.flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct or1k_pic_dev or1k_pic_or1200 = {
+	.chip = {
+		.name = "or1200-PIC",
+		.irq_unmask = or1k_pic_unmask,
+		.irq_mask = or1k_pic_mask,
+		.irq_ack = or1k_pic_or1200_ack,
+		.irq_mask_ack = or1k_pic_or1200_mask_ack,
+	},
+	.handle = handle_level_irq,
+	.flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct irq_domain *root_domain;
+
+static inline int pic_get_irq(int first)
+{
+	int hwirq;
+
+	hwirq = ffs(mfspr(SPR_PICSR) >> first);
+	if (!hwirq)
+		return NO_IRQ;
+	else
+		hwirq = hwirq + first - 1;
+
+	return hwirq;
+}
+
+static void or1k_pic_handle_irq(struct pt_regs *regs)
+{
+	int irq = -1;
+
+	while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
+		handle_domain_irq(root_domain, irq, regs);
+}
+
+static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	struct or1k_pic_dev *pic = d->host_data;
+
+	irq_set_chip_and_handler(irq, &pic->chip, pic->handle);
+	irq_set_status_flags(irq, pic->flags);
+
+	return 0;
+}
+
+static const struct irq_domain_ops or1k_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = or1k_map,
+};
+
+/*
+ * This sets up the IRQ domain for the PIC built in to the OpenRISC
+ * 1000 CPU.  This is the "root" domain as these are the interrupts
+ * that directly trigger an exception in the CPU.
+ */
+static int __init or1k_pic_init(struct device_node *node,
+				 struct or1k_pic_dev *pic)
+{
+	/* Disable all interrupts until explicitly requested */
+	mtspr(SPR_PICMR, (0UL));
+
+	root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
+					    pic);
+
+	set_handle_irq(or1k_pic_handle_irq);
+
+	return 0;
+}
+
+static int __init or1k_pic_or1200_init(struct device_node *node,
+				       struct device_node *parent)
+{
+	return or1k_pic_init(node, &or1k_pic_or1200);
+}
+IRQCHIP_DECLARE(or1k_pic_or1200, "opencores,or1200-pic", or1k_pic_or1200_init);
+IRQCHIP_DECLARE(or1k_pic, "opencores,or1k-pic", or1k_pic_or1200_init);
+
+static int __init or1k_pic_level_init(struct device_node *node,
+				      struct device_node *parent)
+{
+	return or1k_pic_init(node, &or1k_pic_level);
+}
+IRQCHIP_DECLARE(or1k_pic_level, "opencores,or1k-pic-level",
+		or1k_pic_level_init);
+
+static int __init or1k_pic_edge_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	return or1k_pic_init(node, &or1k_pic_edge);
+}
+IRQCHIP_DECLARE(or1k_pic_edge, "opencores,or1k-pic-edge", or1k_pic_edge_init);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
new file mode 100644
index 0000000..be4c5a8
--- /dev/null
+++ b/drivers/irqchip/irq-orion.c
@@ -0,0 +1,206 @@
+/*
+ * Marvell Orion SoCs IRQ chip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+/*
+ * Orion SoC main interrupt controller
+ */
+#define ORION_IRQS_PER_CHIP		32
+
+#define ORION_IRQ_CAUSE			0x00
+#define ORION_IRQ_MASK			0x04
+#define ORION_IRQ_FIQ_MASK		0x08
+#define ORION_IRQ_ENDP_MASK		0x0c
+
+static struct irq_domain *orion_irq_domain;
+
+static void
+__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
+{
+	struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
+	int n, base = 0;
+
+	for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
+		struct irq_chip_generic *gc =
+			irq_get_domain_generic_chip(orion_irq_domain, base);
+		u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
+			gc->mask_cache;
+		while (stat) {
+			u32 hwirq = __fls(stat);
+			handle_domain_irq(orion_irq_domain,
+					  gc->irq_base + hwirq, regs);
+			stat &= ~(1 << hwirq);
+		}
+	}
+}
+
+static int __init orion_irq_init(struct device_node *np,
+				 struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	int n, ret, base, num_chips = 0;
+	struct resource r;
+
+	/* count number of irq chips by valid reg addresses */
+	while (of_address_to_resource(np, num_chips, &r) == 0)
+		num_chips++;
+
+	orion_irq_domain = irq_domain_add_linear(np,
+				num_chips * ORION_IRQS_PER_CHIP,
+				&irq_generic_chip_ops, NULL);
+	if (!orion_irq_domain)
+		panic("%s: unable to add irq domain\n", np->name);
+
+	ret = irq_alloc_domain_generic_chips(orion_irq_domain,
+				ORION_IRQS_PER_CHIP, 1, np->name,
+				handle_level_irq, clr, 0,
+				IRQ_GC_INIT_MASK_CACHE);
+	if (ret)
+		panic("%s: unable to alloc irq domain gc\n", np->name);
+
+	for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
+		struct irq_chip_generic *gc =
+			irq_get_domain_generic_chip(orion_irq_domain, base);
+
+		of_address_to_resource(np, n, &r);
+
+		if (!request_mem_region(r.start, resource_size(&r), np->name))
+			panic("%s: unable to request mem region %d",
+			      np->name, n);
+
+		gc->reg_base = ioremap(r.start, resource_size(&r));
+		if (!gc->reg_base)
+			panic("%s: unable to map resource %d", np->name, n);
+
+		gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
+		gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+		/* mask all interrupts */
+		writel(0, gc->reg_base + ORION_IRQ_MASK);
+	}
+
+	set_handle_irq(orion_handle_irq);
+	return 0;
+}
+IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
+
+/*
+ * Orion SoC bridge interrupt controller
+ */
+#define ORION_BRIDGE_IRQ_CAUSE	0x00
+#define ORION_BRIDGE_IRQ_MASK	0x04
+
+static void orion_bridge_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *d = irq_desc_get_handler_data(desc);
+
+	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
+	u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
+		   gc->mask_cache;
+
+	while (stat) {
+		u32 hwirq = __fls(stat);
+
+		generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
+		stat &= ~(1 << hwirq);
+	}
+}
+
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+	ct->chip.irq_ack(d);
+	ct->chip.irq_unmask(d);
+	return 0;
+}
+
+static int __init orion_bridge_irq_init(struct device_node *np,
+					struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct resource r;
+	struct irq_domain *domain;
+	struct irq_chip_generic *gc;
+	int ret, irq, nrirqs = 32;
+
+	/* get optional number of interrupts provided */
+	of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
+
+	domain = irq_domain_add_linear(np, nrirqs,
+				       &irq_generic_chip_ops, NULL);
+	if (!domain) {
+		pr_err("%s: unable to add irq domain\n", np->name);
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
+			     handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_err("%s: unable to alloc irq domain gc\n", np->name);
+		return ret;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	if (ret) {
+		pr_err("%s: unable to get resource\n", np->name);
+		return ret;
+	}
+
+	if (!request_mem_region(r.start, resource_size(&r), np->name)) {
+		pr_err("%s: unable to request mem region\n", np->name);
+		return -ENOMEM;
+	}
+
+	/* Map the parent interrupt for the chained handler */
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq <= 0) {
+		pr_err("%s: unable to parse irq\n", np->name);
+		return -EINVAL;
+	}
+
+	gc = irq_get_domain_generic_chip(domain, 0);
+	gc->reg_base = ioremap(r.start, resource_size(&r));
+	if (!gc->reg_base) {
+		pr_err("%s: unable to map resource\n", np->name);
+		return -ENOMEM;
+	}
+
+	gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
+	gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+	gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
+	gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
+	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+	/* mask and clear all interrupts */
+	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
+
+	irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
+					 domain);
+
+	return 0;
+}
+IRQCHIP_DECLARE(orion_bridge_intc,
+		"marvell,orion-bridge-intc", orion_bridge_irq_init);
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 0000000..1f7cc59
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct partition_desc {
+	int				nr_parts;
+	struct partition_affinity	*parts;
+	struct irq_domain		*domain;
+	struct irq_desc			*chained_desc;
+	unsigned long			*bitmap;
+	struct irq_domain_ops		ops;
+};
+
+static bool partition_check_cpu(struct partition_desc *part,
+				unsigned int cpu, unsigned int hwirq)
+{
+	return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
+}
+
+static void partition_irq_mask(struct irq_data *d)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_mask)
+		chip->irq_mask(data);
+}
+
+static void partition_irq_unmask(struct irq_data *d)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_unmask)
+		chip->irq_unmask(data);
+}
+
+static int partition_irq_set_irqchip_state(struct irq_data *d,
+					   enum irqchip_irq_state which,
+					   bool val)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_set_irqchip_state)
+		return chip->irq_set_irqchip_state(data, which, val);
+
+	return -EINVAL;
+}
+
+static int partition_irq_get_irqchip_state(struct irq_data *d,
+					   enum irqchip_irq_state which,
+					   bool *val)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_get_irqchip_state)
+		return chip->irq_get_irqchip_state(data, which, val);
+
+	return -EINVAL;
+}
+
+static int partition_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (chip->irq_set_type)
+		return chip->irq_set_type(data, type);
+
+	return -EINVAL;
+}
+
+static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+}
+
+static struct irq_chip partition_irq_chip = {
+	.irq_mask		= partition_irq_mask,
+	.irq_unmask		= partition_irq_unmask,
+	.irq_set_type		= partition_irq_set_type,
+	.irq_get_irqchip_state	= partition_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= partition_irq_set_irqchip_state,
+	.irq_print_chip		= partition_irq_print_chip,
+};
+
+static void partition_handle_irq(struct irq_desc *desc)
+{
+	struct partition_desc *part = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int cpu = smp_processor_id();
+	int hwirq;
+
+	chained_irq_enter(chip, desc);
+
+	for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
+		if (partition_check_cpu(part, cpu, hwirq))
+			break;
+	}
+
+	if (unlikely(hwirq == part->nr_parts)) {
+		handle_bad_irq(desc);
+	} else {
+		unsigned int irq;
+		irq = irq_find_mapping(part->domain, hwirq);
+		generic_handle_irq(irq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs, void *arg)
+{
+	int ret;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	struct irq_fwspec *fwspec = arg;
+	struct partition_desc *part;
+
+	BUG_ON(nr_irqs != 1);
+	ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	part = domain->host_data;
+
+	set_bit(hwirq, part->bitmap);
+	irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
+					 partition_handle_irq, part);
+	irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
+	irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
+			    handle_percpu_devid_irq, NULL, NULL);
+	irq_set_status_flags(virq, IRQ_NOAUTOEN);
+
+	return 0;
+}
+
+static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs)
+{
+	struct irq_data *d;
+
+	BUG_ON(nr_irqs != 1);
+
+	d = irq_domain_get_irq_data(domain, virq);
+	irq_set_handler(virq, NULL);
+	irq_domain_reset_irq_data(d);
+}
+
+int partition_translate_id(struct partition_desc *desc, void *partition_id)
+{
+	struct partition_affinity *part = NULL;
+	int i;
+
+	for (i = 0; i < desc->nr_parts; i++) {
+		if (desc->parts[i].partition_id == partition_id) {
+			part = &desc->parts[i];
+			break;
+		}
+	}
+
+	if (WARN_ON(!part)) {
+		pr_err("Failed to find partition\n");
+		return -EINVAL;
+	}
+
+	return i;
+}
+
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+					     struct partition_affinity *parts,
+					     int nr_parts,
+					     int chained_irq,
+					     const struct irq_domain_ops *ops)
+{
+	struct partition_desc *desc;
+	struct irq_domain *d;
+
+	BUG_ON(!ops->select || !ops->translate);
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	desc->ops = *ops;
+	desc->ops.free = partition_domain_free;
+	desc->ops.alloc = partition_domain_alloc;
+
+	d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
+	if (!d)
+		goto out;
+	desc->domain = d;
+
+	desc->bitmap = kcalloc(BITS_TO_LONGS(nr_parts), sizeof(long),
+			       GFP_KERNEL);
+	if (WARN_ON(!desc->bitmap))
+		goto out;
+
+	desc->chained_desc = irq_to_desc(chained_irq);
+	desc->nr_parts = nr_parts;
+	desc->parts = parts;
+
+	return desc;
+out:
+	if (d)
+		irq_domain_remove(d);
+	kfree(desc);
+
+	return NULL;
+}
+
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+	if (dsc)
+		return dsc->domain;
+
+	return NULL;
+}
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
new file mode 100644
index 0000000..73addb4
--- /dev/null
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -0,0 +1,324 @@
+/*
+ * Cristian Birsan <cristian.birsan@microchip.com>
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/mach-pic32/pic32.h>
+
+#define REG_INTCON	0x0000
+#define REG_INTSTAT	0x0020
+#define REG_IFS_OFFSET	0x0040
+#define REG_IEC_OFFSET	0x00C0
+#define REG_IPC_OFFSET	0x0140
+#define REG_OFF_OFFSET	0x0540
+
+#define MAJPRI_MASK	0x07
+#define SUBPRI_MASK	0x03
+#define PRIORITY_MASK	0x1F
+
+#define PIC32_INT_PRI(pri, subpri)				\
+	((((pri) & MAJPRI_MASK) << 2) | ((subpri) & SUBPRI_MASK))
+
+struct evic_chip_data {
+	u32 irq_types[NR_IRQS];
+	u32 ext_irqs[8];
+};
+
+static struct irq_domain *evic_irq_domain;
+static void __iomem *evic_base;
+
+asmlinkage void __weak plat_irq_dispatch(void)
+{
+	unsigned int irq, hwirq;
+
+	hwirq = readl(evic_base + REG_INTSTAT) & 0xFF;
+	irq = irq_linear_revmap(evic_irq_domain, hwirq);
+	do_IRQ(irq);
+}
+
+static struct evic_chip_data *irqd_to_priv(struct irq_data *data)
+{
+	return (struct evic_chip_data *)data->domain->host_data;
+}
+
+static int pic32_set_ext_polarity(int bit, u32 type)
+{
+	/*
+	 * External interrupts can be either edge rising or edge falling,
+	 * but not both.
+	 */
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		writel(BIT(bit), evic_base + PIC32_SET(REG_INTCON));
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		writel(BIT(bit), evic_base + PIC32_CLR(REG_INTCON));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pic32_set_type_edge(struct irq_data *data,
+			       unsigned int flow_type)
+{
+	struct evic_chip_data *priv = irqd_to_priv(data);
+	int ret;
+	int i;
+
+	if (!(flow_type & IRQ_TYPE_EDGE_BOTH))
+		return -EBADR;
+
+	/* set polarity for external interrupts only */
+	for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
+		if (priv->ext_irqs[i] == data->hwirq) {
+			ret = pic32_set_ext_polarity(i, flow_type);
+			if (ret)
+				return ret;
+		}
+	}
+
+	irqd_set_trigger_type(data, flow_type);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static void pic32_bind_evic_interrupt(int irq, int set)
+{
+	writel(set, evic_base + REG_OFF_OFFSET + irq * 4);
+}
+
+static void pic32_set_irq_priority(int irq, int priority)
+{
+	u32 reg, shift;
+
+	reg = irq / 4;
+	shift = (irq % 4) * 8;
+
+	writel(PRIORITY_MASK << shift,
+		evic_base + PIC32_CLR(REG_IPC_OFFSET + reg * 0x10));
+	writel(priority << shift,
+		evic_base + PIC32_SET(REG_IPC_OFFSET + reg * 0x10));
+}
+
+#define IRQ_REG_MASK(_hwirq, _reg, _mask)		       \
+	do {						       \
+		_reg = _hwirq / 32;			       \
+		_mask = 1 << (_hwirq % 32);		       \
+	} while (0)
+
+static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				irq_hw_number_t hw)
+{
+	struct evic_chip_data *priv = d->host_data;
+	struct irq_data *data;
+	int ret;
+	u32 iecclr, ifsclr;
+	u32 reg, mask;
+
+	ret = irq_map_generic_chip(d, virq, hw);
+	if (ret)
+		return ret;
+
+	/*
+	 * Piggyback on xlate function to move to an alternate chip as necessary
+	 * at time of mapping instead of allowing the flow handler/chip to be
+	 * changed later. This requires all interrupts to be configured through
+	 * DT.
+	 */
+	if (priv->irq_types[hw] & IRQ_TYPE_SENSE_MASK) {
+		data = irq_domain_get_irq_data(d, virq);
+		irqd_set_trigger_type(data, priv->irq_types[hw]);
+		irq_setup_alt_chip(data, priv->irq_types[hw]);
+	}
+
+	IRQ_REG_MASK(hw, reg, mask);
+
+	iecclr = PIC32_CLR(REG_IEC_OFFSET + reg * 0x10);
+	ifsclr = PIC32_CLR(REG_IFS_OFFSET + reg * 0x10);
+
+	/* mask and clear flag */
+	writel(mask, evic_base + iecclr);
+	writel(mask, evic_base + ifsclr);
+
+	/* default priority is required */
+	pic32_set_irq_priority(hw, PIC32_INT_PRI(2, 0));
+
+	return ret;
+}
+
+int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+			   const u32 *intspec, unsigned int intsize,
+			   irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	struct evic_chip_data *priv = d->host_data;
+
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+
+	if (WARN_ON(intspec[0] >= NR_IRQS))
+		return -EINVAL;
+
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+	priv->irq_types[intspec[0]] = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static const struct irq_domain_ops pic32_irq_domain_ops = {
+	.map	= pic32_irq_domain_map,
+	.xlate	= pic32_irq_domain_xlate,
+};
+
+static void __init pic32_ext_irq_of_init(struct irq_domain *domain)
+{
+	struct device_node *node = irq_domain_get_of_node(domain);
+	struct evic_chip_data *priv = domain->host_data;
+	struct property *prop;
+	const __le32 *p;
+	u32 hwirq;
+	int i = 0;
+	const char *pname = "microchip,external-irqs";
+
+	of_property_for_each_u32(node, pname, prop, p, hwirq) {
+		if (i >= ARRAY_SIZE(priv->ext_irqs)) {
+			pr_warn("More than %d external irq, skip rest\n",
+				ARRAY_SIZE(priv->ext_irqs));
+			break;
+		}
+
+		priv->ext_irqs[i] = hwirq;
+		i++;
+	}
+}
+
+static int __init pic32_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	struct irq_chip_generic *gc;
+	struct evic_chip_data *priv;
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	int nchips, ret;
+	int i;
+
+	nchips = DIV_ROUND_UP(NR_IRQS, 32);
+
+	evic_base = of_iomap(node, 0);
+	if (!evic_base)
+		return -ENOMEM;
+
+	priv = kcalloc(nchips, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	evic_irq_domain = irq_domain_add_linear(node, nchips * 32,
+						&pic32_irq_domain_ops,
+						priv);
+	if (!evic_irq_domain) {
+		ret = -ENOMEM;
+		goto err_free_priv;
+	}
+
+	/*
+	 * The PIC32 EVIC has a linear list of irqs and the type of each
+	 * irq is determined by the hardware peripheral the EVIC is arbitrating.
+	 * These irq types are defined in the datasheet as "persistent" and
+	 * "non-persistent" which are mapped here to level and edge
+	 * respectively. To manage the different flow handler requirements of
+	 * each irq type, different chip_types are used.
+	 */
+	ret = irq_alloc_domain_generic_chips(evic_irq_domain, 32, 2,
+					     "evic-level", handle_level_irq,
+					     clr, 0, 0);
+	if (ret)
+		goto err_domain_remove;
+
+	board_bind_eic_interrupt = &pic32_bind_evic_interrupt;
+
+	for (i = 0; i < nchips; i++) {
+		u32 ifsclr = PIC32_CLR(REG_IFS_OFFSET + (i * 0x10));
+		u32 iec = REG_IEC_OFFSET + (i * 0x10);
+
+		gc = irq_get_domain_generic_chip(evic_irq_domain, i * 32);
+
+		gc->reg_base = evic_base;
+		gc->unused = 0;
+
+		/*
+		 * Level/persistent interrupts have a special requirement that
+		 * the condition generating the interrupt be cleared before the
+		 * interrupt flag (ifs) can be cleared. chip.irq_eoi is used to
+		 * complete the interrupt with an ack.
+		 */
+		gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
+		gc->chip_types[0].handler		= handle_fasteoi_irq;
+		gc->chip_types[0].regs.ack		= ifsclr;
+		gc->chip_types[0].regs.mask		= iec;
+		gc->chip_types[0].chip.name		= "evic-level";
+		gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
+		gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.flags		= IRQCHIP_SKIP_SET_WAKE;
+
+		/* Edge interrupts */
+		gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
+		gc->chip_types[1].handler		= handle_edge_irq;
+		gc->chip_types[1].regs.ack		= ifsclr;
+		gc->chip_types[1].regs.mask		= iec;
+		gc->chip_types[1].chip.name		= "evic-edge";
+		gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
+		gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
+		gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
+		gc->chip_types[1].chip.irq_set_type	= pic32_set_type_edge;
+		gc->chip_types[1].chip.flags		= IRQCHIP_SKIP_SET_WAKE;
+
+		gc->private = &priv[i];
+	}
+
+	irq_set_default_host(evic_irq_domain);
+
+	/*
+	 * External interrupts have software configurable edge polarity. These
+	 * interrupts are defined in DT allowing polarity to be configured only
+	 * for these interrupts when requested.
+	 */
+	pic32_ext_irq_of_init(evic_irq_domain);
+
+	return 0;
+
+err_domain_remove:
+	irq_domain_remove(evic_irq_domain);
+
+err_free_priv:
+	kfree(priv);
+
+err_iounmap:
+	iounmap(evic_base);
+
+	return ret;
+}
+
+IRQCHIP_DECLARE(pic32_evic, "microchip,pic32mzda-evic", pic32_of_init);
diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c
new file mode 100644
index 0000000..1054d74
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-h8300h.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * H8/300H interrupt controller driver
+ *
+ * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/io.h>
+
+static const char ipr_bit[] = {
+	 7,  6,  5,  5,
+	 4,  4,  4,  4,  3,  3,  3,  3,
+	 2,  2,  2,  2,  1,  1,  1,  1,
+	 0,  0,  0,  0, 15, 15, 15, 15,
+	14, 14, 14, 14, 13, 13, 13, 13,
+	-1, -1, -1, -1, 11, 11, 11, 11,
+	10, 10, 10, 10,  9,  9,  9,  9,
+};
+
+static void __iomem *intc_baseaddr;
+
+#define IPR (intc_baseaddr + 6)
+
+static void h8300h_disable_irq(struct irq_data *data)
+{
+	int bit;
+	int irq = data->irq - 12;
+
+	bit = ipr_bit[irq];
+	if (bit >= 0) {
+		if (bit < 8)
+			ctrl_bclr(bit & 7, IPR);
+		else
+			ctrl_bclr(bit & 7, (IPR+1));
+	}
+}
+
+static void h8300h_enable_irq(struct irq_data *data)
+{
+	int bit;
+	int irq = data->irq - 12;
+
+	bit = ipr_bit[irq];
+	if (bit >= 0) {
+		if (bit < 8)
+			ctrl_bset(bit & 7, IPR);
+		else
+			ctrl_bset(bit & 7, (IPR+1));
+	}
+}
+
+struct irq_chip h8300h_irq_chip = {
+	.name		= "H8/300H-INTC",
+	.irq_enable	= h8300h_enable_irq,
+	.irq_disable	= h8300h_disable_irq,
+};
+
+static int irq_map(struct irq_domain *h, unsigned int virq,
+		   irq_hw_number_t hw_irq_num)
+{
+       irq_set_chip_and_handler(virq, &h8300h_irq_chip, handle_simple_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops irq_ops = {
+       .map    = irq_map,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static int __init h8300h_intc_of_init(struct device_node *intc,
+				      struct device_node *parent)
+{
+	struct irq_domain *domain;
+
+	intc_baseaddr = of_iomap(intc, 0);
+	BUG_ON(!intc_baseaddr);
+
+	/* All interrupt priority low */
+	writeb(0x00, IPR + 0);
+	writeb(0x00, IPR + 1);
+
+	domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL);
+	BUG_ON(!domain);
+	irq_set_default_host(domain);
+	return 0;
+}
+
+IRQCHIP_DECLARE(h8300h_intc, "renesas,h8300h-intc", h8300h_intc_of_init);
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
new file mode 100644
index 0000000..85234d4
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * H8S interrupt contoller driver
+ *
+ * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/io.h>
+
+static void *intc_baseaddr;
+#define IPRA (intc_baseaddr)
+
+static const unsigned char ipr_table[] = {
+	0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
+	0x23, 0x22, 0x21, 0x20, 0x33, 0x32, 0x31, 0x30, /* 24 - 31 */
+	0x43, 0x42, 0x41, 0x40, 0x53, 0x53, 0x52, 0x52, /* 32 - 39 */
+	0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, /* 40 - 47 */
+	0x50, 0x50, 0x50, 0x50, 0x63, 0x63, 0x63, 0x63, /* 48 - 55 */
+	0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, /* 56 - 63 */
+	0x61, 0x61, 0x61, 0x61, 0x60, 0x60, 0x60, 0x60, /* 64 - 71 */
+	0x73, 0x73, 0x73, 0x73, 0x72, 0x72, 0x72, 0x72, /* 72 - 79 */
+	0x71, 0x71, 0x71, 0x71, 0x70, 0x83, 0x82, 0x81, /* 80 - 87 */
+	0x80, 0x80, 0x80, 0x80, 0x93, 0x93, 0x93, 0x93, /* 88 - 95 */
+	0x92, 0x92, 0x92, 0x92, 0x91, 0x91, 0x91, 0x91, /* 96 - 103 */
+	0x90, 0x90, 0x90, 0x90, 0xa3, 0xa3, 0xa3, 0xa3, /* 104 - 111 */
+	0xa2, 0xa2, 0xa2, 0xa2, 0xa1, 0xa1, 0xa1, 0xa1, /* 112 - 119 */
+	0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, /* 120 - 127 */
+};
+
+static void h8s_disable_irq(struct irq_data *data)
+{
+	int pos;
+	void __iomem *addr;
+	unsigned short pri;
+	int irq = data->irq;
+
+	addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
+	pos = (ipr_table[irq - 16] & 0x0f) * 4;
+	pri = ~(0x000f << pos);
+	pri &= readw(addr);
+	writew(pri, addr);
+}
+
+static void h8s_enable_irq(struct irq_data *data)
+{
+	int pos;
+	void __iomem *addr;
+	unsigned short pri;
+	int irq = data->irq;
+
+	addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
+	pos = (ipr_table[irq - 16] & 0x0f) * 4;
+	pri = ~(0x000f << pos);
+	pri &= readw(addr);
+	pri |= 1 << pos;
+	writew(pri, addr);
+}
+
+struct irq_chip h8s_irq_chip = {
+	.name		= "H8S-INTC",
+	.irq_enable	= h8s_enable_irq,
+	.irq_disable	= h8s_disable_irq,
+};
+
+static __init int irq_map(struct irq_domain *h, unsigned int virq,
+			  irq_hw_number_t hw_irq_num)
+{
+       irq_set_chip_and_handler(virq, &h8s_irq_chip, handle_simple_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops irq_ops = {
+       .map    = irq_map,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static int __init h8s_intc_of_init(struct device_node *intc,
+				   struct device_node *parent)
+{
+	struct irq_domain *domain;
+	int n;
+
+	intc_baseaddr = of_iomap(intc, 0);
+	BUG_ON(!intc_baseaddr);
+
+	/* All interrupt priority is 0 (disable) */
+	/* IPRA to IPRK */
+	for (n = 0; n <= 'k' - 'a'; n++)
+		writew(0x0000, IPRA + (n * 2));
+
+	domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL);
+	BUG_ON(!domain);
+	irq_set_default_host(domain);
+	return 0;
+}
+
+IRQCHIP_DECLARE(h8s_intc, "renesas,h8s-intc", h8s_intc_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
new file mode 100644
index 0000000..c6e6c9e
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -0,0 +1,624 @@
+/*
+ * Renesas INTC External IRQ Pin Driver
+ *
+ *  Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
+
+#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
+#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
+#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
+#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
+#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
+#define INTC_IRQPIN_REG_NR_MANDATORY 5
+#define INTC_IRQPIN_REG_IRLM 5 /* ICR0 with IRLM bit (optional) */
+#define INTC_IRQPIN_REG_NR 6
+
+/* INTC external IRQ PIN hardware register access:
+ *
+ * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
+ * PRIO is read-write 32-bit with 4-bits per IRQ (**)
+ * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ *
+ * (*) May be accessed by more than one driver instance - lock needed
+ * (**) Read-modify-write access by one driver instance - lock needed
+ * (***) Accessed by one driver instance only - no locking needed
+ */
+
+struct intc_irqpin_iomem {
+	void __iomem *iomem;
+	unsigned long (*read)(void __iomem *iomem);
+	void (*write)(void __iomem *iomem, unsigned long data);
+	int width;
+};
+
+struct intc_irqpin_irq {
+	int hw_irq;
+	int requested_irq;
+	int domain_irq;
+	struct intc_irqpin_priv *p;
+};
+
+struct intc_irqpin_priv {
+	struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
+	struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
+	unsigned int sense_bitfield_width;
+	struct platform_device *pdev;
+	struct irq_chip irq_chip;
+	struct irq_domain *irq_domain;
+	atomic_t wakeup_path;
+	unsigned shared_irqs:1;
+	u8 shared_irq_mask;
+};
+
+struct intc_irqpin_config {
+	unsigned int irlm_bit;
+	unsigned needs_irlm:1;
+};
+
+static unsigned long intc_irqpin_read32(void __iomem *iomem)
+{
+	return ioread32(iomem);
+}
+
+static unsigned long intc_irqpin_read8(void __iomem *iomem)
+{
+	return ioread8(iomem);
+}
+
+static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
+{
+	iowrite32(data, iomem);
+}
+
+static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
+{
+	iowrite8(data, iomem);
+}
+
+static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
+					     int reg)
+{
+	struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+	return i->read(i->iomem);
+}
+
+static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
+				     int reg, unsigned long data)
+{
+	struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+	i->write(i->iomem, data);
+}
+
+static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
+						   int reg, int hw_irq)
+{
+	return BIT((p->iomem[reg].width - 1) - hw_irq);
+}
+
+static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
+					       int reg, int hw_irq)
+{
+	intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
+}
+
+static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
+
+static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
+					  int reg, int shift,
+					  int width, int value)
+{
+	unsigned long flags;
+	unsigned long tmp;
+
+	raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
+
+	tmp = intc_irqpin_read(p, reg);
+	tmp &= ~(((1 << width) - 1) << shift);
+	tmp |= value << shift;
+	intc_irqpin_write(p, reg, tmp);
+
+	raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
+}
+
+static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
+					 int irq, int do_mask)
+{
+	/* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+	int bitfield_width = 4;
+	int shift = 32 - (irq + 1) * bitfield_width;
+
+	intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
+				      shift, bitfield_width,
+				      do_mask ? 0 : (1 << bitfield_width) - 1);
+}
+
+static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
+{
+	/* The SENSE register is assumed to be 32-bit. */
+	int bitfield_width = p->sense_bitfield_width;
+	int shift = 32 - (irq + 1) * bitfield_width;
+
+	dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
+
+	if (value >= (1 << bitfield_width))
+		return -EINVAL;
+
+	intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
+				      bitfield_width, value);
+	return 0;
+}
+
+static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
+{
+	dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
+		str, i->requested_irq, i->hw_irq, i->domain_irq);
+}
+
+static void intc_irqpin_irq_enable(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	intc_irqpin_dbg(&p->irq[hw_irq], "enable");
+	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+}
+
+static void intc_irqpin_irq_disable(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	intc_irqpin_dbg(&p->irq[hw_irq], "disable");
+	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+}
+
+static void intc_irqpin_shared_irq_enable(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
+	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+
+	p->shared_irq_mask &= ~BIT(hw_irq);
+}
+
+static void intc_irqpin_shared_irq_disable(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
+	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+
+	p->shared_irq_mask |= BIT(hw_irq);
+}
+
+static void intc_irqpin_irq_enable_force(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+	intc_irqpin_irq_enable(d);
+
+	/* enable interrupt through parent interrupt controller,
+	 * assumes non-shared interrupt with 1:1 mapping
+	 * needed for busted IRQs on some SoCs like sh73a0
+	 */
+	irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+}
+
+static void intc_irqpin_irq_disable_force(struct irq_data *d)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+	/* disable interrupt through parent interrupt controller,
+	 * assumes non-shared interrupt with 1:1 mapping
+	 * needed for busted IRQs on some SoCs like sh73a0
+	 */
+	irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
+	intc_irqpin_irq_disable(d);
+}
+
+#define INTC_IRQ_SENSE_VALID 0x10
+#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
+
+static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+	[IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
+	[IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
+	[IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
+	[IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
+	[IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
+};
+
+static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+
+	if (!(value & INTC_IRQ_SENSE_VALID))
+		return -EINVAL;
+
+	return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
+				     value ^ INTC_IRQ_SENSE_VALID);
+}
+
+static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
+	if (on)
+		atomic_inc(&p->wakeup_path);
+	else
+		atomic_dec(&p->wakeup_path);
+
+	return 0;
+}
+
+static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
+{
+	struct intc_irqpin_irq *i = dev_id;
+	struct intc_irqpin_priv *p = i->p;
+	unsigned long bit;
+
+	intc_irqpin_dbg(i, "demux1");
+	bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
+
+	if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
+		intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
+		intc_irqpin_dbg(i, "demux2");
+		generic_handle_irq(i->domain_irq);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
+{
+	struct intc_irqpin_priv *p = dev_id;
+	unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
+	irqreturn_t status = IRQ_NONE;
+	int k;
+
+	for (k = 0; k < 8; k++) {
+		if (reg_source & BIT(7 - k)) {
+			if (BIT(k) & p->shared_irq_mask)
+				continue;
+
+			status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
+		}
+	}
+
+	return status;
+}
+
+/*
+ * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
+ * different category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key intc_irqpin_irq_lock_class;
+
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
+static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
+				      irq_hw_number_t hw)
+{
+	struct intc_irqpin_priv *p = h->host_data;
+
+	p->irq[hw].domain_irq = virq;
+	p->irq[hw].hw_irq = hw;
+
+	intc_irqpin_dbg(&p->irq[hw], "map");
+	irq_set_chip_data(virq, h->host_data);
+	irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+			      &intc_irqpin_irq_request_class);
+	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
+	return 0;
+}
+
+static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
+	.map	= intc_irqpin_irq_domain_map,
+	.xlate  = irq_domain_xlate_twocell,
+};
+
+static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
+	.irlm_bit = 23, /* ICR0.IRLM0 */
+	.needs_irlm = 1,
+};
+
+static const struct intc_irqpin_config intc_irqpin_rmobile = {
+	.needs_irlm = 0,
+};
+
+static const struct of_device_id intc_irqpin_dt_ids[] = {
+	{ .compatible = "renesas,intc-irqpin", },
+	{ .compatible = "renesas,intc-irqpin-r8a7778",
+	  .data = &intc_irqpin_irlm_r8a777x },
+	{ .compatible = "renesas,intc-irqpin-r8a7779",
+	  .data = &intc_irqpin_irlm_r8a777x },
+	{ .compatible = "renesas,intc-irqpin-r8a7740",
+	  .data = &intc_irqpin_rmobile },
+	{ .compatible = "renesas,intc-irqpin-sh73a0",
+	  .data = &intc_irqpin_rmobile },
+	{},
+};
+MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
+
+static int intc_irqpin_probe(struct platform_device *pdev)
+{
+	const struct intc_irqpin_config *config;
+	struct device *dev = &pdev->dev;
+	struct intc_irqpin_priv *p;
+	struct intc_irqpin_iomem *i;
+	struct resource *io[INTC_IRQPIN_REG_NR];
+	struct resource *irq;
+	struct irq_chip *irq_chip;
+	void (*enable_fn)(struct irq_data *d);
+	void (*disable_fn)(struct irq_data *d);
+	const char *name = dev_name(dev);
+	bool control_parent;
+	unsigned int nirqs;
+	int ref_irq;
+	int ret;
+	int k;
+
+	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+	if (!p) {
+		dev_err(dev, "failed to allocate driver data\n");
+		return -ENOMEM;
+	}
+
+	/* deal with driver instance configuration */
+	of_property_read_u32(dev->of_node, "sense-bitfield-width",
+			     &p->sense_bitfield_width);
+	control_parent = of_property_read_bool(dev->of_node, "control-parent");
+	if (!p->sense_bitfield_width)
+		p->sense_bitfield_width = 4; /* default to 4 bits */
+
+	p->pdev = pdev;
+	platform_set_drvdata(pdev, p);
+
+	config = of_device_get_match_data(dev);
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	/* get hold of register banks */
+	memset(io, 0, sizeof(io));
+	for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+		io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
+		if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) {
+			dev_err(dev, "not enough IOMEM resources\n");
+			ret = -EINVAL;
+			goto err0;
+		}
+	}
+
+	/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
+	for (k = 0; k < INTC_IRQPIN_MAX; k++) {
+		irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
+		if (!irq)
+			break;
+
+		p->irq[k].p = p;
+		p->irq[k].requested_irq = irq->start;
+	}
+
+	nirqs = k;
+	if (nirqs < 1) {
+		dev_err(dev, "not enough IRQ resources\n");
+		ret = -EINVAL;
+		goto err0;
+	}
+
+	/* ioremap IOMEM and setup read/write callbacks */
+	for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+		i = &p->iomem[k];
+
+		/* handle optional registers */
+		if (!io[k])
+			continue;
+
+		switch (resource_size(io[k])) {
+		case 1:
+			i->width = 8;
+			i->read = intc_irqpin_read8;
+			i->write = intc_irqpin_write8;
+			break;
+		case 4:
+			i->width = 32;
+			i->read = intc_irqpin_read32;
+			i->write = intc_irqpin_write32;
+			break;
+		default:
+			dev_err(dev, "IOMEM size mismatch\n");
+			ret = -EINVAL;
+			goto err0;
+		}
+
+		i->iomem = devm_ioremap_nocache(dev, io[k]->start,
+						resource_size(io[k]));
+		if (!i->iomem) {
+			dev_err(dev, "failed to remap IOMEM\n");
+			ret = -ENXIO;
+			goto err0;
+		}
+	}
+
+	/* configure "individual IRQ mode" where needed */
+	if (config && config->needs_irlm) {
+		if (io[INTC_IRQPIN_REG_IRLM])
+			intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
+						      config->irlm_bit, 1, 1);
+		else
+			dev_warn(dev, "unable to select IRLM mode\n");
+	}
+
+	/* mask all interrupts using priority */
+	for (k = 0; k < nirqs; k++)
+		intc_irqpin_mask_unmask_prio(p, k, 1);
+
+	/* clear all pending interrupts */
+	intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
+
+	/* scan for shared interrupt lines */
+	ref_irq = p->irq[0].requested_irq;
+	p->shared_irqs = 1;
+	for (k = 1; k < nirqs; k++) {
+		if (ref_irq != p->irq[k].requested_irq) {
+			p->shared_irqs = 0;
+			break;
+		}
+	}
+
+	/* use more severe masking method if requested */
+	if (control_parent) {
+		enable_fn = intc_irqpin_irq_enable_force;
+		disable_fn = intc_irqpin_irq_disable_force;
+	} else if (!p->shared_irqs) {
+		enable_fn = intc_irqpin_irq_enable;
+		disable_fn = intc_irqpin_irq_disable;
+	} else {
+		enable_fn = intc_irqpin_shared_irq_enable;
+		disable_fn = intc_irqpin_shared_irq_disable;
+	}
+
+	irq_chip = &p->irq_chip;
+	irq_chip->name = name;
+	irq_chip->irq_mask = disable_fn;
+	irq_chip->irq_unmask = enable_fn;
+	irq_chip->irq_set_type = intc_irqpin_irq_set_type;
+	irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
+	irq_chip->flags	= IRQCHIP_MASK_ON_SUSPEND;
+
+	p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
+					      &intc_irqpin_irq_domain_ops, p);
+	if (!p->irq_domain) {
+		ret = -ENXIO;
+		dev_err(dev, "cannot initialize irq domain\n");
+		goto err0;
+	}
+
+	if (p->shared_irqs) {
+		/* request one shared interrupt */
+		if (devm_request_irq(dev, p->irq[0].requested_irq,
+				intc_irqpin_shared_irq_handler,
+				IRQF_SHARED, name, p)) {
+			dev_err(dev, "failed to request low IRQ\n");
+			ret = -ENOENT;
+			goto err1;
+		}
+	} else {
+		/* request interrupts one by one */
+		for (k = 0; k < nirqs; k++) {
+			if (devm_request_irq(dev, p->irq[k].requested_irq,
+					     intc_irqpin_irq_handler, 0, name,
+					     &p->irq[k])) {
+				dev_err(dev, "failed to request low IRQ\n");
+				ret = -ENOENT;
+				goto err1;
+			}
+		}
+	}
+
+	/* unmask all interrupts on prio level */
+	for (k = 0; k < nirqs; k++)
+		intc_irqpin_mask_unmask_prio(p, k, 0);
+
+	dev_info(dev, "driving %d irqs\n", nirqs);
+
+	return 0;
+
+err1:
+	irq_domain_remove(p->irq_domain);
+err0:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int intc_irqpin_remove(struct platform_device *pdev)
+{
+	struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
+
+	irq_domain_remove(p->irq_domain);
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static int __maybe_unused intc_irqpin_suspend(struct device *dev)
+{
+	struct intc_irqpin_priv *p = dev_get_drvdata(dev);
+
+	if (atomic_read(&p->wakeup_path))
+		device_set_wakeup_path(dev);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
+
+static struct platform_driver intc_irqpin_device_driver = {
+	.probe		= intc_irqpin_probe,
+	.remove		= intc_irqpin_remove,
+	.driver		= {
+		.name	= "renesas_intc_irqpin",
+		.of_match_table = intc_irqpin_dt_ids,
+		.pm	= &intc_irqpin_pm_ops,
+	}
+};
+
+static int __init intc_irqpin_init(void)
+{
+	return platform_driver_register(&intc_irqpin_device_driver);
+}
+postcore_initcall(intc_irqpin_init);
+
+static void __exit intc_irqpin_exit(void)
+{
+	platform_driver_unregister(&intc_irqpin_device_driver);
+}
+module_exit(intc_irqpin_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
new file mode 100644
index 0000000..a4f1112
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -0,0 +1,310 @@
+/*
+ * Renesas IRQC Driver
+ *
+ *  Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#define IRQC_IRQ_MAX	32	/* maximum 32 interrupts per driver instance */
+
+#define IRQC_REQ_STS	0x00	/* Interrupt Request Status Register */
+#define IRQC_EN_STS	0x04	/* Interrupt Enable Status Register */
+#define IRQC_EN_SET	0x08	/* Interrupt Enable Set Register */
+#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
+				/* SYS-CPU vs. RT-CPU */
+#define DETECT_STATUS	0x100	/* IRQn Detect Status Register */
+#define MONITOR		0x104	/* IRQn Signal Level Monitor Register */
+#define HLVL_STS	0x108	/* IRQn High Level Detect Status Register */
+#define LLVL_STS	0x10c	/* IRQn Low Level Detect Status Register */
+#define S_R_EDGE_STS	0x110	/* IRQn Sync Rising Edge Detect Status Reg. */
+#define S_F_EDGE_STS	0x114	/* IRQn Sync Falling Edge Detect Status Reg. */
+#define A_R_EDGE_STS	0x118	/* IRQn Async Rising Edge Detect Status Reg. */
+#define A_F_EDGE_STS	0x11c	/* IRQn Async Falling Edge Detect Status Reg. */
+#define CHTEN_STS	0x120	/* Chattering Reduction Status Register */
+#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
+				/* IRQn Configuration Register */
+
+struct irqc_irq {
+	int hw_irq;
+	int requested_irq;
+	struct irqc_priv *p;
+};
+
+struct irqc_priv {
+	void __iomem *iomem;
+	void __iomem *cpu_int_base;
+	struct irqc_irq irq[IRQC_IRQ_MAX];
+	unsigned int number_of_irqs;
+	struct platform_device *pdev;
+	struct irq_chip_generic *gc;
+	struct irq_domain *irq_domain;
+	atomic_t wakeup_path;
+};
+
+static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+	return data->domain->host_data;
+}
+
+static void irqc_dbg(struct irqc_irq *i, char *str)
+{
+	dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
+		str, i->requested_irq, i->hw_irq);
+}
+
+static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+	[IRQ_TYPE_LEVEL_LOW]	= 0x01,
+	[IRQ_TYPE_LEVEL_HIGH]	= 0x02,
+	[IRQ_TYPE_EDGE_FALLING]	= 0x04,	/* Synchronous */
+	[IRQ_TYPE_EDGE_RISING]	= 0x08,	/* Synchronous */
+	[IRQ_TYPE_EDGE_BOTH]	= 0x0c,	/* Synchronous */
+};
+
+static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct irqc_priv *p = irq_data_to_priv(d);
+	int hw_irq = irqd_to_hwirq(d);
+	unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
+	u32 tmp;
+
+	irqc_dbg(&p->irq[hw_irq], "sense");
+
+	if (!value)
+		return -EINVAL;
+
+	tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
+	tmp &= ~0x3f;
+	tmp |= value;
+	iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
+	return 0;
+}
+
+static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct irqc_priv *p = irq_data_to_priv(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
+	if (on)
+		atomic_inc(&p->wakeup_path);
+	else
+		atomic_dec(&p->wakeup_path);
+
+	return 0;
+}
+
+static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
+{
+	struct irqc_irq *i = dev_id;
+	struct irqc_priv *p = i->p;
+	u32 bit = BIT(i->hw_irq);
+
+	irqc_dbg(i, "demux1");
+
+	if (ioread32(p->iomem + DETECT_STATUS) & bit) {
+		iowrite32(bit, p->iomem + DETECT_STATUS);
+		irqc_dbg(i, "demux2");
+		generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+static int irqc_probe(struct platform_device *pdev)
+{
+	struct irqc_priv *p;
+	struct resource *io;
+	struct resource *irq;
+	const char *name = dev_name(&pdev->dev);
+	int ret;
+	int k;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (!p) {
+		dev_err(&pdev->dev, "failed to allocate driver data\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	p->pdev = pdev;
+	platform_set_drvdata(pdev, p);
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	/* get hold of manadatory IOMEM */
+	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!io) {
+		dev_err(&pdev->dev, "not enough IOMEM resources\n");
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
+	for (k = 0; k < IRQC_IRQ_MAX; k++) {
+		irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
+		if (!irq)
+			break;
+
+		p->irq[k].p = p;
+		p->irq[k].hw_irq = k;
+		p->irq[k].requested_irq = irq->start;
+	}
+
+	p->number_of_irqs = k;
+	if (p->number_of_irqs < 1) {
+		dev_err(&pdev->dev, "not enough IRQ resources\n");
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	/* ioremap IOMEM and setup read/write callbacks */
+	p->iomem = ioremap_nocache(io->start, resource_size(io));
+	if (!p->iomem) {
+		dev_err(&pdev->dev, "failed to remap IOMEM\n");
+		ret = -ENXIO;
+		goto err2;
+	}
+
+	p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
+
+	p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+					      p->number_of_irqs,
+					      &irq_generic_chip_ops, p);
+	if (!p->irq_domain) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "cannot initialize irq domain\n");
+		goto err2;
+	}
+
+	ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
+					     1, name, handle_level_irq,
+					     0, 0, IRQ_GC_INIT_NESTED_LOCK);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot allocate generic chip\n");
+		goto err3;
+	}
+
+	p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
+	p->gc->reg_base = p->cpu_int_base;
+	p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
+	p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
+	p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+	p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+	p->gc->chip_types[0].chip.irq_set_type	= irqc_irq_set_type;
+	p->gc->chip_types[0].chip.irq_set_wake	= irqc_irq_set_wake;
+	p->gc->chip_types[0].chip.flags	= IRQCHIP_MASK_ON_SUSPEND;
+
+	/* request interrupts one by one */
+	for (k = 0; k < p->number_of_irqs; k++) {
+		if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
+				0, name, &p->irq[k])) {
+			dev_err(&pdev->dev, "failed to request IRQ\n");
+			ret = -ENOENT;
+			goto err4;
+		}
+	}
+
+	dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
+
+	return 0;
+err4:
+	while (--k >= 0)
+		free_irq(p->irq[k].requested_irq, &p->irq[k]);
+
+err3:
+	irq_domain_remove(p->irq_domain);
+err2:
+	iounmap(p->iomem);
+err1:
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(p);
+err0:
+	return ret;
+}
+
+static int irqc_remove(struct platform_device *pdev)
+{
+	struct irqc_priv *p = platform_get_drvdata(pdev);
+	int k;
+
+	for (k = 0; k < p->number_of_irqs; k++)
+		free_irq(p->irq[k].requested_irq, &p->irq[k]);
+
+	irq_domain_remove(p->irq_domain);
+	iounmap(p->iomem);
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(p);
+	return 0;
+}
+
+static int __maybe_unused irqc_suspend(struct device *dev)
+{
+	struct irqc_priv *p = dev_get_drvdata(dev);
+
+	if (atomic_read(&p->wakeup_path))
+		device_set_wakeup_path(dev);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
+
+static const struct of_device_id irqc_dt_ids[] = {
+	{ .compatible = "renesas,irqc", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, irqc_dt_ids);
+
+static struct platform_driver irqc_device_driver = {
+	.probe		= irqc_probe,
+	.remove		= irqc_remove,
+	.driver		= {
+		.name	= "renesas_irqc",
+		.of_match_table	= irqc_dt_ids,
+		.pm	= &irqc_pm_ops,
+	}
+};
+
+static int __init irqc_init(void)
+{
+	return platform_driver_register(&irqc_device_driver);
+}
+postcore_initcall(irqc_init);
+
+static void __exit irqc_exit(void)
+{
+	platform_driver_unregister(&irqc_device_driver);
+}
+module_exit(irqc_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
new file mode 100644
index 0000000..c19766f
--- /dev/null
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -0,0 +1,1339 @@
+/*
+ * S3C24XX IRQ handling
+ *
+ * Copyright (c) 2003-2004 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/regs-irqtype.h>
+#include <plat/pm.h>
+
+#define S3C_IRQTYPE_NONE	0
+#define S3C_IRQTYPE_EINT	1
+#define S3C_IRQTYPE_EDGE	2
+#define S3C_IRQTYPE_LEVEL	3
+
+struct s3c_irq_data {
+	unsigned int type;
+	unsigned long offset;
+	unsigned long parent_irq;
+
+	/* data gets filled during init */
+	struct s3c_irq_intc *intc;
+	unsigned long sub_bits;
+	struct s3c_irq_intc *sub_intc;
+};
+
+/*
+ * Sructure holding the controller data
+ * @reg_pending		register holding pending irqs
+ * @reg_intpnd		special register intpnd in main intc
+ * @reg_mask		mask register
+ * @domain		irq_domain of the controller
+ * @parent		parent controller for ext and sub irqs
+ * @irqs		irq-data, always s3c_irq_data[32]
+ */
+struct s3c_irq_intc {
+	void __iomem		*reg_pending;
+	void __iomem		*reg_intpnd;
+	void __iomem		*reg_mask;
+	struct irq_domain	*domain;
+	struct s3c_irq_intc	*parent;
+	struct s3c_irq_data	*irqs;
+};
+
+/*
+ * Array holding pointers to the global controller structs
+ * [0] ... main_intc
+ * [1] ... sub_intc
+ * [2] ... main_intc2 on s3c2416
+ */
+static struct s3c_irq_intc *s3c_intc[3];
+
+static void s3c_irq_mask(struct irq_data *data)
+{
+	struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+	struct s3c_irq_intc *intc = irq_data->intc;
+	struct s3c_irq_intc *parent_intc = intc->parent;
+	struct s3c_irq_data *parent_data;
+	unsigned long mask;
+	unsigned int irqno;
+
+	mask = readl_relaxed(intc->reg_mask);
+	mask |= (1UL << irq_data->offset);
+	writel_relaxed(mask, intc->reg_mask);
+
+	if (parent_intc) {
+		parent_data = &parent_intc->irqs[irq_data->parent_irq];
+
+		/* check to see if we need to mask the parent IRQ
+		 * The parent_irq is always in main_intc, so the hwirq
+		 * for find_mapping does not need an offset in any case.
+		 */
+		if ((mask & parent_data->sub_bits) == parent_data->sub_bits) {
+			irqno = irq_find_mapping(parent_intc->domain,
+					 irq_data->parent_irq);
+			s3c_irq_mask(irq_get_irq_data(irqno));
+		}
+	}
+}
+
+static void s3c_irq_unmask(struct irq_data *data)
+{
+	struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+	struct s3c_irq_intc *intc = irq_data->intc;
+	struct s3c_irq_intc *parent_intc = intc->parent;
+	unsigned long mask;
+	unsigned int irqno;
+
+	mask = readl_relaxed(intc->reg_mask);
+	mask &= ~(1UL << irq_data->offset);
+	writel_relaxed(mask, intc->reg_mask);
+
+	if (parent_intc) {
+		irqno = irq_find_mapping(parent_intc->domain,
+					 irq_data->parent_irq);
+		s3c_irq_unmask(irq_get_irq_data(irqno));
+	}
+}
+
+static inline void s3c_irq_ack(struct irq_data *data)
+{
+	struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+	struct s3c_irq_intc *intc = irq_data->intc;
+	unsigned long bitval = 1UL << irq_data->offset;
+
+	writel_relaxed(bitval, intc->reg_pending);
+	if (intc->reg_intpnd)
+		writel_relaxed(bitval, intc->reg_intpnd);
+}
+
+static int s3c_irq_type(struct irq_data *data, unsigned int type)
+{
+	switch (type) {
+	case IRQ_TYPE_NONE:
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_EDGE_FALLING:
+	case IRQ_TYPE_EDGE_BOTH:
+		irq_set_handler(data->irq, handle_edge_irq);
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+	case IRQ_TYPE_LEVEL_HIGH:
+		irq_set_handler(data->irq, handle_level_irq);
+		break;
+	default:
+		pr_err("No such irq type %d\n", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s3c_irqext_type_set(void __iomem *gpcon_reg,
+			       void __iomem *extint_reg,
+			       unsigned long gpcon_offset,
+			       unsigned long extint_offset,
+			       unsigned int type)
+{
+	unsigned long newvalue = 0, value;
+
+	/* Set the GPIO to external interrupt mode */
+	value = readl_relaxed(gpcon_reg);
+	value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
+	writel_relaxed(value, gpcon_reg);
+
+	/* Set the external interrupt to pointed trigger type */
+	switch (type)
+	{
+		case IRQ_TYPE_NONE:
+			pr_warn("No edge setting!\n");
+			break;
+
+		case IRQ_TYPE_EDGE_RISING:
+			newvalue = S3C2410_EXTINT_RISEEDGE;
+			break;
+
+		case IRQ_TYPE_EDGE_FALLING:
+			newvalue = S3C2410_EXTINT_FALLEDGE;
+			break;
+
+		case IRQ_TYPE_EDGE_BOTH:
+			newvalue = S3C2410_EXTINT_BOTHEDGE;
+			break;
+
+		case IRQ_TYPE_LEVEL_LOW:
+			newvalue = S3C2410_EXTINT_LOWLEV;
+			break;
+
+		case IRQ_TYPE_LEVEL_HIGH:
+			newvalue = S3C2410_EXTINT_HILEV;
+			break;
+
+		default:
+			pr_err("No such irq type %d\n", type);
+			return -EINVAL;
+	}
+
+	value = readl_relaxed(extint_reg);
+	value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
+	writel_relaxed(value, extint_reg);
+
+	return 0;
+}
+
+static int s3c_irqext_type(struct irq_data *data, unsigned int type)
+{
+	void __iomem *extint_reg;
+	void __iomem *gpcon_reg;
+	unsigned long gpcon_offset, extint_offset;
+
+	if ((data->hwirq >= 4) && (data->hwirq <= 7)) {
+		gpcon_reg = S3C2410_GPFCON;
+		extint_reg = S3C24XX_EXTINT0;
+		gpcon_offset = (data->hwirq) * 2;
+		extint_offset = (data->hwirq) * 4;
+	} else if ((data->hwirq >= 8) && (data->hwirq <= 15)) {
+		gpcon_reg = S3C2410_GPGCON;
+		extint_reg = S3C24XX_EXTINT1;
+		gpcon_offset = (data->hwirq - 8) * 2;
+		extint_offset = (data->hwirq - 8) * 4;
+	} else if ((data->hwirq >= 16) && (data->hwirq <= 23)) {
+		gpcon_reg = S3C2410_GPGCON;
+		extint_reg = S3C24XX_EXTINT2;
+		gpcon_offset = (data->hwirq - 8) * 2;
+		extint_offset = (data->hwirq - 16) * 4;
+	} else {
+		return -EINVAL;
+	}
+
+	return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
+				   extint_offset, type);
+}
+
+static int s3c_irqext0_type(struct irq_data *data, unsigned int type)
+{
+	void __iomem *extint_reg;
+	void __iomem *gpcon_reg;
+	unsigned long gpcon_offset, extint_offset;
+
+	if (data->hwirq <= 3) {
+		gpcon_reg = S3C2410_GPFCON;
+		extint_reg = S3C24XX_EXTINT0;
+		gpcon_offset = (data->hwirq) * 2;
+		extint_offset = (data->hwirq) * 4;
+	} else {
+		return -EINVAL;
+	}
+
+	return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
+				   extint_offset, type);
+}
+
+static struct irq_chip s3c_irq_chip = {
+	.name		= "s3c",
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_type	= s3c_irq_type,
+	.irq_set_wake	= s3c_irq_wake
+};
+
+static struct irq_chip s3c_irq_level_chip = {
+	.name		= "s3c-level",
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_ack	= s3c_irq_ack,
+	.irq_set_type	= s3c_irq_type,
+};
+
+static struct irq_chip s3c_irqext_chip = {
+	.name		= "s3c-ext",
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_ack	= s3c_irq_ack,
+	.irq_set_type	= s3c_irqext_type,
+	.irq_set_wake	= s3c_irqext_wake
+};
+
+static struct irq_chip s3c_irq_eint0t4 = {
+	.name		= "s3c-ext0",
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake,
+	.irq_set_type	= s3c_irqext0_type,
+};
+
+static void s3c_irq_demux(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
+	struct s3c_irq_intc *intc = irq_data->intc;
+	struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
+	unsigned int n, offset, irq;
+	unsigned long src, msk;
+
+	/* we're using individual domains for the non-dt case
+	 * and one big domain for the dt case where the subintc
+	 * starts at hwirq number 32.
+	 */
+	offset = irq_domain_get_of_node(intc->domain) ? 32 : 0;
+
+	chained_irq_enter(chip, desc);
+
+	src = readl_relaxed(sub_intc->reg_pending);
+	msk = readl_relaxed(sub_intc->reg_mask);
+
+	src &= ~msk;
+	src &= irq_data->sub_bits;
+
+	while (src) {
+		n = __ffs(src);
+		src &= ~(1 << n);
+		irq = irq_find_mapping(sub_intc->domain, offset + n);
+		generic_handle_irq(irq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
+				      struct pt_regs *regs, int intc_offset)
+{
+	int pnd;
+	int offset;
+
+	pnd = readl_relaxed(intc->reg_intpnd);
+	if (!pnd)
+		return false;
+
+	/* non-dt machines use individual domains */
+	if (!irq_domain_get_of_node(intc->domain))
+		intc_offset = 0;
+
+	/* We have a problem that the INTOFFSET register does not always
+	 * show one interrupt. Occasionally we get two interrupts through
+	 * the prioritiser, and this causes the INTOFFSET register to show
+	 * what looks like the logical-or of the two interrupt numbers.
+	 *
+	 * Thanks to Klaus, Shannon, et al for helping to debug this problem
+	 */
+	offset = readl_relaxed(intc->reg_intpnd + 4);
+
+	/* Find the bit manually, when the offset is wrong.
+	 * The pending register only ever contains the one bit of the next
+	 * interrupt to handle.
+	 */
+	if (!(pnd & (1 << offset)))
+		offset =  __ffs(pnd);
+
+	handle_domain_irq(intc->domain, intc_offset + offset, regs);
+	return true;
+}
+
+asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs)
+{
+	do {
+		if (likely(s3c_intc[0]))
+			if (s3c24xx_handle_intc(s3c_intc[0], regs, 0))
+				continue;
+
+		if (s3c_intc[2])
+			if (s3c24xx_handle_intc(s3c_intc[2], regs, 64))
+				continue;
+
+		break;
+	} while (1);
+}
+
+#ifdef CONFIG_FIQ
+/**
+ * s3c24xx_set_fiq - set the FIQ routing
+ * @irq: IRQ number to route to FIQ on processor.
+ * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing.
+ *
+ * Change the state of the IRQ to FIQ routing depending on @irq and @on. If
+ * @on is true, the @irq is checked to see if it can be routed and the
+ * interrupt controller updated to route the IRQ. If @on is false, the FIQ
+ * routing is cleared, regardless of which @irq is specified.
+ */
+int s3c24xx_set_fiq(unsigned int irq, bool on)
+{
+	u32 intmod;
+	unsigned offs;
+
+	if (on) {
+		offs = irq - FIQ_START;
+		if (offs > 31)
+			return -EINVAL;
+
+		intmod = 1 << offs;
+	} else {
+		intmod = 0;
+	}
+
+	writel_relaxed(intmod, S3C2410_INTMOD);
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(s3c24xx_set_fiq);
+#endif
+
+static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
+							irq_hw_number_t hw)
+{
+	struct s3c_irq_intc *intc = h->host_data;
+	struct s3c_irq_data *irq_data = &intc->irqs[hw];
+	struct s3c_irq_intc *parent_intc;
+	struct s3c_irq_data *parent_irq_data;
+	unsigned int irqno;
+
+	/* attach controller pointer to irq_data */
+	irq_data->intc = intc;
+	irq_data->offset = hw;
+
+	parent_intc = intc->parent;
+
+	/* set handler and flags */
+	switch (irq_data->type) {
+	case S3C_IRQTYPE_NONE:
+		return 0;
+	case S3C_IRQTYPE_EINT:
+		/* On the S3C2412, the EINT0to3 have a parent irq
+		 * but need the s3c_irq_eint0t4 chip
+		 */
+		if (parent_intc && (!soc_is_s3c2412() || hw >= 4))
+			irq_set_chip_and_handler(virq, &s3c_irqext_chip,
+						 handle_edge_irq);
+		else
+			irq_set_chip_and_handler(virq, &s3c_irq_eint0t4,
+						 handle_edge_irq);
+		break;
+	case S3C_IRQTYPE_EDGE:
+		if (parent_intc || intc->reg_pending == S3C2416_SRCPND2)
+			irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+						 handle_edge_irq);
+		else
+			irq_set_chip_and_handler(virq, &s3c_irq_chip,
+						 handle_edge_irq);
+		break;
+	case S3C_IRQTYPE_LEVEL:
+		if (parent_intc)
+			irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+						 handle_level_irq);
+		else
+			irq_set_chip_and_handler(virq, &s3c_irq_chip,
+						 handle_level_irq);
+		break;
+	default:
+		pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type);
+		return -EINVAL;
+	}
+
+	irq_set_chip_data(virq, irq_data);
+
+	if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
+		if (irq_data->parent_irq > 31) {
+			pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
+			       irq_data->parent_irq);
+			return -EINVAL;
+		}
+
+		parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
+		parent_irq_data->sub_intc = intc;
+		parent_irq_data->sub_bits |= (1UL << hw);
+
+		/* attach the demuxer to the parent irq */
+		irqno = irq_find_mapping(parent_intc->domain,
+					 irq_data->parent_irq);
+		if (!irqno) {
+			pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
+			       irq_data->parent_irq);
+			return -EINVAL;
+		}
+		irq_set_chained_handler(irqno, s3c_irq_demux);
+	}
+
+	return 0;
+}
+
+static const struct irq_domain_ops s3c24xx_irq_ops = {
+	.map = s3c24xx_irq_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
+{
+	void __iomem *reg_source;
+	unsigned long pend;
+	unsigned long last;
+	int i;
+
+	/* if intpnd is set, read the next pending irq from there */
+	reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending;
+
+	last = 0;
+	for (i = 0; i < 4; i++) {
+		pend = readl_relaxed(reg_source);
+
+		if (pend == 0 || pend == last)
+			break;
+
+		writel_relaxed(pend, intc->reg_pending);
+		if (intc->reg_intpnd)
+			writel_relaxed(pend, intc->reg_intpnd);
+
+		pr_info("irq: clearing pending status %08x\n", (int)pend);
+		last = pend;
+	}
+}
+
+static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np,
+				       struct s3c_irq_data *irq_data,
+				       struct s3c_irq_intc *parent,
+				       unsigned long address)
+{
+	struct s3c_irq_intc *intc;
+	void __iomem *base = (void *)0xf6000000; /* static mapping */
+	int irq_num;
+	int irq_start;
+	int ret;
+
+	intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
+	if (!intc)
+		return ERR_PTR(-ENOMEM);
+
+	intc->irqs = irq_data;
+
+	if (parent)
+		intc->parent = parent;
+
+	/* select the correct data for the controller.
+	 * Need to hard code the irq num start and offset
+	 * to preserve the static mapping for now
+	 */
+	switch (address) {
+	case 0x4a000000:
+		pr_debug("irq: found main intc\n");
+		intc->reg_pending = base;
+		intc->reg_mask = base + 0x08;
+		intc->reg_intpnd = base + 0x10;
+		irq_num = 32;
+		irq_start = S3C2410_IRQ(0);
+		break;
+	case 0x4a000018:
+		pr_debug("irq: found subintc\n");
+		intc->reg_pending = base + 0x18;
+		intc->reg_mask = base + 0x1c;
+		irq_num = 29;
+		irq_start = S3C2410_IRQSUB(0);
+		break;
+	case 0x4a000040:
+		pr_debug("irq: found intc2\n");
+		intc->reg_pending = base + 0x40;
+		intc->reg_mask = base + 0x48;
+		intc->reg_intpnd = base + 0x50;
+		irq_num = 8;
+		irq_start = S3C2416_IRQ(0);
+		break;
+	case 0x560000a4:
+		pr_debug("irq: found eintc\n");
+		base = (void *)0xfd000000;
+
+		intc->reg_mask = base + 0xa4;
+		intc->reg_pending = base + 0xa8;
+		irq_num = 24;
+		irq_start = S3C2410_IRQ(32);
+		break;
+	default:
+		pr_err("irq: unsupported controller address\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* now that all the data is complete, init the irq-domain */
+	s3c24xx_clear_intc(intc);
+	intc->domain = irq_domain_add_legacy(np, irq_num, irq_start,
+					     0, &s3c24xx_irq_ops,
+					     intc);
+	if (!intc->domain) {
+		pr_err("irq: could not create irq-domain\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	set_handle_irq(s3c24xx_handle_irq);
+
+	return intc;
+
+err:
+	kfree(intc);
+	return ERR_PTR(ret);
+}
+
+static struct s3c_irq_data __maybe_unused init_eint[32] = {
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
+};
+
+#ifdef CONFIG_CPU_S3C2410
+static struct s3c_irq_data init_s3c2410base[32] = {
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2410subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+};
+
+void __init s3c2410_init_irq(void)
+{
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0],
+					s3c_intc[0], 0x4a000018);
+	s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c_irq_data init_s3c2412base[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2412eint[32] = {
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
+	{ .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
+};
+
+static struct s3c_irq_data init_s3c2412subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+	{ .type = S3C_IRQTYPE_NONE, },
+	{ .type = S3C_IRQTYPE_NONE, },
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */
+};
+
+void __init s3c2412_init_irq(void)
+{
+	pr_info("S3C2412: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4);
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0],
+					s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2416
+static struct s3c_irq_data init_s3c2416base[32] = {
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
+	{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* NAND */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_NONE, },
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2416subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+static struct s3c_irq_data init_s3c2416_second[32] = {
+	{ .type = S3C_IRQTYPE_EDGE }, /* 2D */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE }, /* PCM0 */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_EDGE }, /* I2S0 */
+};
+
+void __init s3c2416_init_irq(void)
+{
+	pr_info("S3C2416: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0],
+					s3c_intc[0], 0x4a000018);
+
+	s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0],
+					NULL, 0x4a000040);
+}
+
+#endif
+
+#ifdef CONFIG_CPU_S3C2440
+static struct s3c_irq_data init_s3c2440base[32] = {
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2440subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+void __init s3c2440_init_irq(void)
+{
+	pr_info("S3C2440: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0],
+					s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2442
+static struct s3c_irq_data init_s3c2442base[32] = {
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2442subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+};
+
+void __init s3c2442_init_irq(void)
+{
+	pr_info("S3C2442: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0],
+					s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2443
+static struct s3c_irq_data init_s3c2443base[32] = {
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+	{ .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* CFON */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* NAND */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+	{ .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+	{ .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+
+static struct s3c_irq_data init_s3c2443subint[32] = {
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+	{ .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+	{ .type = S3C_IRQTYPE_NONE }, /* reserved */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+	{ .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+void __init s3c2443_init_irq(void)
+{
+	pr_info("S3C2443: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+	init_FIQ(FIQ_START);
+#endif
+
+	s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL,
+					0x4a000000);
+	if (IS_ERR(s3c_intc[0])) {
+		pr_err("irq: could not create main interrupt controller\n");
+		return;
+	}
+
+	s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+	s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0],
+					s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_OF
+static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq,
+							irq_hw_number_t hw)
+{
+	unsigned int ctrl_num = hw / 32;
+	unsigned int intc_hw = hw % 32;
+	struct s3c_irq_intc *intc = s3c_intc[ctrl_num];
+	struct s3c_irq_intc *parent_intc = intc->parent;
+	struct s3c_irq_data *irq_data = &intc->irqs[intc_hw];
+
+	/* attach controller pointer to irq_data */
+	irq_data->intc = intc;
+	irq_data->offset = intc_hw;
+
+	if (!parent_intc)
+		irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq);
+	else
+		irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+					 handle_edge_irq);
+
+	irq_set_chip_data(virq, irq_data);
+
+	return 0;
+}
+
+/* Translate our of irq notation
+ * format: <ctrl_num ctrl_irq parent_irq type>
+ */
+static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	struct s3c_irq_intc *intc;
+	struct s3c_irq_intc *parent_intc;
+	struct s3c_irq_data *irq_data;
+	struct s3c_irq_data *parent_irq_data;
+	int irqno;
+
+	if (WARN_ON(intsize < 4))
+		return -EINVAL;
+
+	if (intspec[0] > 2 || !s3c_intc[intspec[0]]) {
+		pr_err("controller number %d invalid\n", intspec[0]);
+		return -EINVAL;
+	}
+	intc = s3c_intc[intspec[0]];
+
+	*out_hwirq = intspec[0] * 32 + intspec[2];
+	*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
+
+	parent_intc = intc->parent;
+	if (parent_intc) {
+		irq_data = &intc->irqs[intspec[2]];
+		irq_data->parent_irq = intspec[1];
+		parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
+		parent_irq_data->sub_intc = intc;
+		parent_irq_data->sub_bits |= (1UL << intspec[2]);
+
+		/* parent_intc is always s3c_intc[0], so no offset */
+		irqno = irq_create_mapping(parent_intc->domain, intspec[1]);
+		if (irqno < 0) {
+			pr_err("irq: could not map parent interrupt\n");
+			return irqno;
+		}
+
+		irq_set_chained_handler(irqno, s3c_irq_demux);
+	}
+
+	return 0;
+}
+
+static const struct irq_domain_ops s3c24xx_irq_ops_of = {
+	.map = s3c24xx_irq_map_of,
+	.xlate = s3c24xx_irq_xlate_of,
+};
+
+struct s3c24xx_irq_of_ctrl {
+	char			*name;
+	unsigned long		offset;
+	struct s3c_irq_intc	**handle;
+	struct s3c_irq_intc	**parent;
+	struct irq_domain_ops	*ops;
+};
+
+static int __init s3c_init_intc_of(struct device_node *np,
+			struct device_node *interrupt_parent,
+			struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl)
+{
+	struct s3c_irq_intc *intc;
+	struct s3c24xx_irq_of_ctrl *ctrl;
+	struct irq_domain *domain;
+	void __iomem *reg_base;
+	int i;
+
+	reg_base = of_iomap(np, 0);
+	if (!reg_base) {
+		pr_err("irq-s3c24xx: could not map irq registers\n");
+		return -EINVAL;
+	}
+
+	domain = irq_domain_add_linear(np, num_ctrl * 32,
+						     &s3c24xx_irq_ops_of, NULL);
+	if (!domain) {
+		pr_err("irq: could not create irq-domain\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_ctrl; i++) {
+		ctrl = &s3c_ctrl[i];
+
+		pr_debug("irq: found controller %s\n", ctrl->name);
+
+		intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
+		if (!intc)
+			return -ENOMEM;
+
+		intc->domain = domain;
+		intc->irqs = kcalloc(32, sizeof(struct s3c_irq_data),
+				     GFP_KERNEL);
+		if (!intc->irqs) {
+			kfree(intc);
+			return -ENOMEM;
+		}
+
+		if (ctrl->parent) {
+			intc->reg_pending = reg_base + ctrl->offset;
+			intc->reg_mask = reg_base + ctrl->offset + 0x4;
+
+			if (*(ctrl->parent)) {
+				intc->parent = *(ctrl->parent);
+			} else {
+				pr_warn("irq: parent of %s missing\n",
+					ctrl->name);
+				kfree(intc->irqs);
+				kfree(intc);
+				continue;
+			}
+		} else {
+			intc->reg_pending = reg_base + ctrl->offset;
+			intc->reg_mask = reg_base + ctrl->offset + 0x08;
+			intc->reg_intpnd = reg_base + ctrl->offset + 0x10;
+		}
+
+		s3c24xx_clear_intc(intc);
+		s3c_intc[i] = intc;
+	}
+
+	set_handle_irq(s3c24xx_handle_irq);
+
+	return 0;
+}
+
+static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = {
+	{
+		.name = "intc",
+		.offset = 0,
+	}, {
+		.name = "subintc",
+		.offset = 0x18,
+		.parent = &s3c_intc[0],
+	}
+};
+
+int __init s3c2410_init_intc_of(struct device_node *np,
+			struct device_node *interrupt_parent)
+{
+	return s3c_init_intc_of(np, interrupt_parent,
+				s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl));
+}
+IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of);
+
+static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = {
+	{
+		.name = "intc",
+		.offset = 0,
+	}, {
+		.name = "subintc",
+		.offset = 0x18,
+		.parent = &s3c_intc[0],
+	}, {
+		.name = "intc2",
+		.offset = 0x40,
+	}
+};
+
+int __init s3c2416_init_intc_of(struct device_node *np,
+			struct device_node *interrupt_parent)
+{
+	return s3c_init_intc_of(np, interrupt_parent,
+				s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl));
+}
+IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of);
+#endif
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
new file mode 100644
index 0000000..61bb28d
--- /dev/null
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 Dmitry Eremin-Solenikov
+ * Copyright (C) 1999-2001 Nicolas Pitre
+ *
+ * Generic IRQ handling for the SA11x0.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqchip/irq-sa11x0.h>
+
+#include <soc/sa1100/pwer.h>
+
+#include <asm/exception.h>
+
+#define ICIP	0x00  /* IC IRQ Pending reg. */
+#define ICMR	0x04  /* IC Mask Reg.        */
+#define ICLR	0x08  /* IC Level Reg.       */
+#define ICCR	0x0C  /* IC Control Reg.     */
+#define ICFP	0x10  /* IC FIQ Pending reg. */
+#define ICPR	0x20  /* IC Pending Reg.     */
+
+static void __iomem *iobase;
+
+/*
+ * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
+ * this is for internal IRQs i.e. from IRQ LCD to RTCAlrm.
+ */
+static void sa1100_mask_irq(struct irq_data *d)
+{
+	u32 reg;
+
+	reg = readl_relaxed(iobase + ICMR);
+	reg &= ~BIT(d->hwirq);
+	writel_relaxed(reg, iobase + ICMR);
+}
+
+static void sa1100_unmask_irq(struct irq_data *d)
+{
+	u32 reg;
+
+	reg = readl_relaxed(iobase + ICMR);
+	reg |= BIT(d->hwirq);
+	writel_relaxed(reg, iobase + ICMR);
+}
+
+static int sa1100_set_wake(struct irq_data *d, unsigned int on)
+{
+	return sa11x0_sc_set_wake(d->hwirq, on);
+}
+
+static struct irq_chip sa1100_normal_chip = {
+	.name		= "SC",
+	.irq_ack	= sa1100_mask_irq,
+	.irq_mask	= sa1100_mask_irq,
+	.irq_unmask	= sa1100_unmask_irq,
+	.irq_set_wake	= sa1100_set_wake,
+};
+
+static int sa1100_normal_irqdomain_map(struct irq_domain *d,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &sa1100_normal_chip,
+				 handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops sa1100_normal_irqdomain_ops = {
+	.map = sa1100_normal_irqdomain_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+static struct irq_domain *sa1100_normal_irqdomain;
+
+static struct sa1100irq_state {
+	unsigned int	saved;
+	unsigned int	icmr;
+	unsigned int	iclr;
+	unsigned int	iccr;
+} sa1100irq_state;
+
+static int sa1100irq_suspend(void)
+{
+	struct sa1100irq_state *st = &sa1100irq_state;
+
+	st->saved = 1;
+	st->icmr = readl_relaxed(iobase + ICMR);
+	st->iclr = readl_relaxed(iobase + ICLR);
+	st->iccr = readl_relaxed(iobase + ICCR);
+
+	/*
+	 * Disable all GPIO-based interrupts.
+	 */
+	writel_relaxed(st->icmr & 0xfffff000, iobase + ICMR);
+
+	return 0;
+}
+
+static void sa1100irq_resume(void)
+{
+	struct sa1100irq_state *st = &sa1100irq_state;
+
+	if (st->saved) {
+		writel_relaxed(st->iccr, iobase + ICCR);
+		writel_relaxed(st->iclr, iobase + ICLR);
+
+		writel_relaxed(st->icmr, iobase + ICMR);
+	}
+}
+
+static struct syscore_ops sa1100irq_syscore_ops = {
+	.suspend	= sa1100irq_suspend,
+	.resume		= sa1100irq_resume,
+};
+
+static int __init sa1100irq_init_devicefs(void)
+{
+	register_syscore_ops(&sa1100irq_syscore_ops);
+	return 0;
+}
+
+device_initcall(sa1100irq_init_devicefs);
+
+static asmlinkage void __exception_irq_entry
+sa1100_handle_irq(struct pt_regs *regs)
+{
+	uint32_t icip, icmr, mask;
+
+	do {
+		icip = readl_relaxed(iobase + ICIP);
+		icmr = readl_relaxed(iobase + ICMR);
+		mask = icip & icmr;
+
+		if (mask == 0)
+			break;
+
+		handle_domain_irq(sa1100_normal_irqdomain,
+				ffs(mask) - 1, regs);
+	} while (1);
+}
+
+void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start)
+{
+	iobase = ioremap(io_start, SZ_64K);
+	if (WARN_ON(!iobase))
+		return;
+
+	/* disable all IRQs */
+	writel_relaxed(0, iobase + ICMR);
+
+	/* all IRQs are IRQ, not FIQ */
+	writel_relaxed(0, iobase + ICLR);
+
+	/*
+	 * Whatever the doc says, this has to be set for the wait-on-irq
+	 * instruction to work... on a SA1100 rev 9 at least.
+	 */
+	writel_relaxed(1, iobase + ICCR);
+
+	sa1100_normal_irqdomain = irq_domain_add_simple(NULL,
+			32, irq_start,
+			&sa1100_normal_irqdomain_ops, NULL);
+
+	set_handle_irq(sa1100_handle_irq);
+}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
new file mode 100644
index 0000000..532e9d6
--- /dev/null
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+#define pr_fmt(fmt) "plic: " fmt
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/*
+ * This driver implements a version of the RISC-V PLIC with the actual layout
+ * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
+ *
+ *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
+ *
+ * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
+ * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
+ * Spec.
+ */
+
+#define MAX_DEVICES			1024
+#define MAX_CONTEXTS			15872
+
+/*
+ * Each interrupt source has a priority register associated with it.
+ * We always hardwire it to one in Linux.
+ */
+#define PRIORITY_BASE			0
+#define     PRIORITY_PER_ID		4
+
+/*
+ * Each hart context has a vector of interrupt enable bits associated with it.
+ * There's one bit for each interrupt source.
+ */
+#define ENABLE_BASE			0x2000
+#define     ENABLE_PER_HART		0x80
+
+/*
+ * Each hart context has a set of control registers associated with it.  Right
+ * now there's only two: a source priority threshold over which the hart will
+ * take an interrupt, and a register to claim interrupts.
+ */
+#define CONTEXT_BASE			0x200000
+#define     CONTEXT_PER_HART		0x1000
+#define     CONTEXT_THRESHOLD		0x00
+#define     CONTEXT_CLAIM		0x04
+
+static void __iomem *plic_regs;
+
+struct plic_handler {
+	bool			present;
+	int			ctxid;
+};
+static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+
+static inline void __iomem *plic_hart_offset(int ctxid)
+{
+	return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
+}
+
+static inline u32 __iomem *plic_enable_base(int ctxid)
+{
+	return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
+}
+
+/*
+ * Protect mask operations on the registers given that we can't assume that
+ * atomic memory operations work on them.
+ */
+static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
+
+static inline void plic_toggle(int ctxid, int hwirq, int enable)
+{
+	u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+	u32 hwirq_mask = 1 << (hwirq % 32);
+
+	raw_spin_lock(&plic_toggle_lock);
+	if (enable)
+		writel(readl(reg) | hwirq_mask, reg);
+	else
+		writel(readl(reg) & ~hwirq_mask, reg);
+	raw_spin_unlock(&plic_toggle_lock);
+}
+
+static inline void plic_irq_toggle(struct irq_data *d, int enable)
+{
+	int cpu;
+
+	writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+	for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+		if (handler->present)
+			plic_toggle(handler->ctxid, d->hwirq, enable);
+	}
+}
+
+static void plic_irq_enable(struct irq_data *d)
+{
+	plic_irq_toggle(d, 1);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+	plic_irq_toggle(d, 0);
+}
+
+static struct irq_chip plic_chip = {
+	.name		= "SiFive PLIC",
+	/*
+	 * There is no need to mask/unmask PLIC interrupts.  They are "masked"
+	 * by reading claim and "unmasked" when writing it back.
+	 */
+	.irq_enable	= plic_irq_enable,
+	.irq_disable	= plic_irq_disable,
+};
+
+static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+			      irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+	irq_set_chip_data(irq, NULL);
+	irq_set_noprobe(irq);
+	return 0;
+}
+
+static const struct irq_domain_ops plic_irqdomain_ops = {
+	.map		= plic_irqdomain_map,
+	.xlate		= irq_domain_xlate_onecell,
+};
+
+static struct irq_domain *plic_irqdomain;
+
+/*
+ * Handling an interrupt is a two-step process: first you claim the interrupt
+ * by reading the claim register, then you complete the interrupt by writing
+ * that source ID back to the same claim register.  This automatically enables
+ * and disables the interrupt, so there's nothing else to do.
+ */
+static void plic_handle_irq(struct pt_regs *regs)
+{
+	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+	void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+	irq_hw_number_t hwirq;
+
+	WARN_ON_ONCE(!handler->present);
+
+	csr_clear(sie, SIE_SEIE);
+	while ((hwirq = readl(claim))) {
+		int irq = irq_find_mapping(plic_irqdomain, hwirq);
+
+		if (unlikely(irq <= 0))
+			pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
+					hwirq);
+		else
+			generic_handle_irq(irq);
+		writel(hwirq, claim);
+	}
+	csr_set(sie, SIE_SEIE);
+}
+
+/*
+ * Walk up the DT tree until we find an active RISC-V core (HART) node and
+ * extract the cpuid from it.
+ */
+static int plic_find_hart_id(struct device_node *node)
+{
+	for (; node; node = node->parent) {
+		if (of_device_is_compatible(node, "riscv"))
+			return riscv_of_processor_hart(node);
+	}
+
+	return -1;
+}
+
+static int __init plic_init(struct device_node *node,
+		struct device_node *parent)
+{
+	int error = 0, nr_handlers, nr_mapped = 0, i;
+	u32 nr_irqs;
+
+	if (plic_regs) {
+		pr_warn("PLIC already present.\n");
+		return -ENXIO;
+	}
+
+	plic_regs = of_iomap(node, 0);
+	if (WARN_ON(!plic_regs))
+		return -EIO;
+
+	error = -EINVAL;
+	of_property_read_u32(node, "riscv,ndev", &nr_irqs);
+	if (WARN_ON(!nr_irqs))
+		goto out_iounmap;
+
+	nr_handlers = of_irq_count(node);
+	if (WARN_ON(!nr_handlers))
+		goto out_iounmap;
+	if (WARN_ON(nr_handlers < num_possible_cpus()))
+		goto out_iounmap;
+
+	error = -ENOMEM;
+	plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+			&plic_irqdomain_ops, NULL);
+	if (WARN_ON(!plic_irqdomain))
+		goto out_iounmap;
+
+	for (i = 0; i < nr_handlers; i++) {
+		struct of_phandle_args parent;
+		struct plic_handler *handler;
+		irq_hw_number_t hwirq;
+		int cpu;
+
+		if (of_irq_parse_one(node, i, &parent)) {
+			pr_err("failed to parse parent for context %d.\n", i);
+			continue;
+		}
+
+		/* skip context holes */
+		if (parent.args[0] == -1)
+			continue;
+
+		cpu = plic_find_hart_id(parent.np);
+		if (cpu < 0) {
+			pr_warn("failed to parse hart ID for context %d.\n", i);
+			continue;
+		}
+
+		handler = per_cpu_ptr(&plic_handlers, cpu);
+		handler->present = true;
+		handler->ctxid = i;
+
+		/* priority must be > threshold to trigger an interrupt */
+		writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+		for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+			plic_toggle(i, hwirq, 0);
+		nr_mapped++;
+	}
+
+	pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
+		nr_irqs, nr_mapped, nr_handlers);
+	set_handle_irq(plic_handle_irq);
+	return 0;
+
+out_iounmap:
+	iounmap(plic_regs);
+	return error;
+}
+
+IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
+IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
new file mode 100644
index 0000000..e133684
--- /dev/null
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -0,0 +1,135 @@
+/*
+ * interrupt controller support for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#define SIRFSOC_INT_RISC_MASK0		0x0018
+#define SIRFSOC_INT_RISC_MASK1		0x001C
+#define SIRFSOC_INT_RISC_LEVEL0		0x0020
+#define SIRFSOC_INT_RISC_LEVEL1		0x0024
+#define SIRFSOC_INIT_IRQ_ID		0x0038
+#define SIRFSOC_INT_BASE_OFFSET		0x0004
+
+#define SIRFSOC_NUM_IRQS		64
+#define SIRFSOC_NUM_BANKS		(SIRFSOC_NUM_IRQS / 32)
+
+static struct irq_domain *sirfsoc_irqdomain;
+
+static void __iomem *sirfsoc_irq_get_regbase(void)
+{
+	return (void __iomem __force *)sirfsoc_irqdomain->host_data;
+}
+
+static __init void sirfsoc_alloc_gc(void __iomem *base)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	unsigned int set = IRQ_LEVEL;
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	int i;
+
+	irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc",
+				       handle_level_irq, clr, set,
+				       IRQ_GC_INIT_MASK_CACHE);
+
+	for (i = 0; i < SIRFSOC_NUM_BANKS; i++) {
+		gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32);
+		gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET;
+		ct = gc->chip_types;
+		ct->chip.irq_mask = irq_gc_mask_clr_bit;
+		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+	}
+}
+
+static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
+{
+	void __iomem *base = sirfsoc_irq_get_regbase();
+	u32 irqstat;
+
+	irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
+	handle_domain_irq(sirfsoc_irqdomain, irqstat & 0xff, regs);
+}
+
+static int __init sirfsoc_irq_init(struct device_node *np,
+	struct device_node *parent)
+{
+	void __iomem *base = of_iomap(np, 0);
+	if (!base)
+		panic("unable to map intc cpu registers\n");
+
+	sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
+						  &irq_generic_chip_ops, base);
+	sirfsoc_alloc_gc(base);
+
+	writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
+	writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
+
+	writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0);
+	writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1);
+
+	set_handle_irq(sirfsoc_handle_irq);
+
+	return 0;
+}
+IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init);
+
+struct sirfsoc_irq_status {
+	u32 mask0;
+	u32 mask1;
+	u32 level0;
+	u32 level1;
+};
+
+static struct sirfsoc_irq_status sirfsoc_irq_st;
+
+static int sirfsoc_irq_suspend(void)
+{
+	void __iomem *base = sirfsoc_irq_get_regbase();
+
+	sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
+	sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
+	sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0);
+	sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1);
+
+	return 0;
+}
+
+static void sirfsoc_irq_resume(void)
+{
+	void __iomem *base = sirfsoc_irq_get_regbase();
+
+	writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
+	writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
+	writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0);
+	writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1);
+}
+
+static struct syscore_ops sirfsoc_irq_syscore_ops = {
+	.suspend	= sirfsoc_irq_suspend,
+	.resume		= sirfsoc_irq_resume,
+};
+
+static int __init sirfsoc_irq_pm_init(void)
+{
+	if (!sirfsoc_irqdomain)
+		return 0;
+
+	register_syscore_ops(&sirfsoc_irq_syscore_ops);
+	return 0;
+}
+device_initcall(sirfsoc_irq_pm_init);
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 0000000..1927b2f
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for Socionext External Interrupt Unit (EXIU)
+ *
+ * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on irq-tegra.c:
+ *   Copyright (C) 2011 Google, Inc.
+ *   Copyright (C) 2010,2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_IRQS	32
+
+#define EIMASK		0x00
+#define EISRCSEL	0x04
+#define EIREQSTA	0x08
+#define EIRAWREQSTA	0x0C
+#define EIREQCLR	0x10
+#define EILVL		0x14
+#define EIEDG		0x18
+#define EISIR		0x1C
+
+struct exiu_irq_data {
+	void __iomem	*base;
+	u32		spi_base;
+};
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+	writel(BIT(d->hwirq), data->base + EIREQCLR);
+	irq_chip_eoi_parent(d);
+}
+
+static void exiu_irq_mask(struct irq_data *d)
+{
+	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+	u32 val;
+
+	val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
+	writel_relaxed(val, data->base + EIMASK);
+	irq_chip_mask_parent(d);
+}
+
+static void exiu_irq_unmask(struct irq_data *d)
+{
+	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+	u32 val;
+
+	val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+	writel_relaxed(val, data->base + EIMASK);
+	irq_chip_unmask_parent(d);
+}
+
+static void exiu_irq_enable(struct irq_data *d)
+{
+	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+	u32 val;
+
+	/* clear interrupts that were latched while disabled */
+	writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+	val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+	writel_relaxed(val, data->base + EIMASK);
+	irq_chip_enable_parent(d);
+}
+
+static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+	u32 val;
+
+	val = readl_relaxed(data->base + EILVL);
+	if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+		val |= BIT(d->hwirq);
+	else
+		val &= ~BIT(d->hwirq);
+	writel_relaxed(val, data->base + EILVL);
+
+	val = readl_relaxed(data->base + EIEDG);
+	if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+		val &= ~BIT(d->hwirq);
+	else
+		val |= BIT(d->hwirq);
+	writel_relaxed(val, data->base + EIEDG);
+
+	writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip exiu_irq_chip = {
+	.name			= "EXIU",
+	.irq_eoi		= exiu_irq_eoi,
+	.irq_enable		= exiu_irq_enable,
+	.irq_mask		= exiu_irq_mask,
+	.irq_unmask		= exiu_irq_unmask,
+	.irq_set_type		= exiu_irq_set_type,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_EOI_THREADED |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int exiu_domain_translate(struct irq_domain *domain,
+				 struct irq_fwspec *fwspec,
+				 unsigned long *hwirq,
+				 unsigned int *type)
+{
+	struct exiu_irq_data *info = domain->host_data;
+
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		if (fwspec->param[0] != GIC_SPI)
+			return -EINVAL; /* No PPI should point to this domain */
+
+		*hwirq = fwspec->param[1] - info->spi_base;
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
+			     unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	struct exiu_irq_data *info = dom->host_data;
+	irq_hw_number_t hwirq;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;	/* Not GIC compliant */
+	if (fwspec->param[0] != GIC_SPI)
+		return -EINVAL;	/* No PPI should point to this domain */
+
+	WARN_ON(nr_irqs != 1);
+	hwirq = fwspec->param[1] - info->spi_base;
+	irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = dom->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops exiu_domain_ops = {
+	.translate	= exiu_domain_translate,
+	.alloc		= exiu_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init exiu_init(struct device_node *node,
+			    struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct exiu_irq_data *data;
+	int err;
+
+	if (!parent) {
+		pr_err("%pOF: no parent, giving up\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to obtain parent domain\n", node);
+		return -ENXIO;
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
+		pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+		err = -ENODEV;
+		goto out_free;
+	}
+
+	data->base = of_iomap(node, 0);
+	if (!data->base) {
+		err = -ENODEV;
+		goto out_free;
+	}
+
+	/* clear and mask all interrupts */
+	writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
+	writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
+					  &exiu_domain_ops, data);
+	if (!domain) {
+		pr_err("%pOF: failed to allocate domain\n", node);
+		err = -ENOMEM;
+		goto out_unmap;
+	}
+
+	pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
+		parent);
+
+	return 0;
+
+out_unmap:
+	iounmap(data->base);
+out_free:
+	kfree(data);
+	return err;
+}
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c
new file mode 100644
index 0000000..5e0e250
--- /dev/null
+++ b/drivers/irqchip/irq-st.c
@@ -0,0 +1,206 @@
+/*
+ *  Copyright (C) 2014 STMicroelectronics – All Rights Reserved
+ *
+ *  Author: Lee Jones <lee.jones@linaro.org>
+ *
+ *  This is a re-write of Christophe Kerello's PMU driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/interrupt-controller/irq-st.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define STIH415_SYSCFG_642		0x0a8
+#define STIH416_SYSCFG_7543		0x87c
+#define STIH407_SYSCFG_5102		0x198
+#define STID127_SYSCFG_734		0x088
+
+#define ST_A9_IRQ_MASK			0x001FFFFF
+#define ST_A9_IRQ_MAX_CHANS		2
+
+#define ST_A9_IRQ_EN_CTI_0		BIT(0)
+#define ST_A9_IRQ_EN_CTI_1		BIT(1)
+#define ST_A9_IRQ_EN_PMU_0		BIT(2)
+#define ST_A9_IRQ_EN_PMU_1		BIT(3)
+#define ST_A9_IRQ_EN_PL310_L2		BIT(4)
+#define ST_A9_IRQ_EN_EXT_0		BIT(5)
+#define ST_A9_IRQ_EN_EXT_1		BIT(6)
+#define ST_A9_IRQ_EN_EXT_2		BIT(7)
+
+#define ST_A9_FIQ_N_SEL(dev, chan)	(dev << (8  + (chan * 3)))
+#define ST_A9_IRQ_N_SEL(dev, chan)	(dev << (14 + (chan * 3)))
+#define ST_A9_EXTIRQ_INV_SEL(dev)	(dev << 20)
+
+struct st_irq_syscfg {
+	struct regmap *regmap;
+	unsigned int syscfg;
+	unsigned int config;
+	bool ext_inverted;
+};
+
+static const struct of_device_id st_irq_syscfg_match[] = {
+	{
+		.compatible = "st,stih415-irq-syscfg",
+		.data = (void *)STIH415_SYSCFG_642,
+	},
+	{
+		.compatible = "st,stih416-irq-syscfg",
+		.data = (void *)STIH416_SYSCFG_7543,
+	},
+	{
+		.compatible = "st,stih407-irq-syscfg",
+		.data = (void *)STIH407_SYSCFG_5102,
+	},
+	{
+		.compatible = "st,stid127-irq-syscfg",
+		.data = (void *)STID127_SYSCFG_734,
+	},
+	{}
+};
+
+static int st_irq_xlate(struct platform_device *pdev,
+			int device, int channel, bool irq)
+{
+	struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev);
+
+	/* Set the device enable bit. */
+	switch (device) {
+	case ST_IRQ_SYSCFG_EXT_0:
+		ddata->config |= ST_A9_IRQ_EN_EXT_0;
+		break;
+	case ST_IRQ_SYSCFG_EXT_1:
+		ddata->config |= ST_A9_IRQ_EN_EXT_1;
+		break;
+	case ST_IRQ_SYSCFG_EXT_2:
+		ddata->config |= ST_A9_IRQ_EN_EXT_2;
+		break;
+	case ST_IRQ_SYSCFG_CTI_0:
+		ddata->config |= ST_A9_IRQ_EN_CTI_0;
+		break;
+	case ST_IRQ_SYSCFG_CTI_1:
+		ddata->config |= ST_A9_IRQ_EN_CTI_1;
+		break;
+	case ST_IRQ_SYSCFG_PMU_0:
+		ddata->config |= ST_A9_IRQ_EN_PMU_0;
+		break;
+	case ST_IRQ_SYSCFG_PMU_1:
+		ddata->config |= ST_A9_IRQ_EN_PMU_1;
+		break;
+	case ST_IRQ_SYSCFG_pl310_L2:
+		ddata->config |= ST_A9_IRQ_EN_PL310_L2;
+		break;
+	case ST_IRQ_SYSCFG_DISABLED:
+		return 0;
+	default:
+		dev_err(&pdev->dev, "Unrecognised device %d\n", device);
+		return -EINVAL;
+	}
+
+	/* Select IRQ/FIQ channel for device. */
+	ddata->config |= irq ?
+		ST_A9_IRQ_N_SEL(device, channel) :
+		ST_A9_FIQ_N_SEL(device, channel);
+
+	return 0;
+}
+
+static int st_irq_syscfg_enable(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev);
+	int channels, ret, i;
+	u32 device, invert;
+
+	channels = of_property_count_u32_elems(np, "st,irq-device");
+	if (channels != ST_A9_IRQ_MAX_CHANS) {
+		dev_err(&pdev->dev, "st,enable-irq-device must have 2 elems\n");
+		return -EINVAL;
+	}
+
+	channels = of_property_count_u32_elems(np, "st,fiq-device");
+	if (channels != ST_A9_IRQ_MAX_CHANS) {
+		dev_err(&pdev->dev, "st,enable-fiq-device must have 2 elems\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ST_A9_IRQ_MAX_CHANS; i++) {
+		of_property_read_u32_index(np, "st,irq-device", i, &device);
+
+		ret = st_irq_xlate(pdev, device, i, true);
+		if (ret)
+			return ret;
+
+		of_property_read_u32_index(np, "st,fiq-device", i, &device);
+
+		ret = st_irq_xlate(pdev, device, i, false);
+		if (ret)
+			return ret;
+	}
+
+	/* External IRQs may be inverted. */
+	of_property_read_u32(np, "st,invert-ext", &invert);
+	ddata->config |= ST_A9_EXTIRQ_INV_SEL(invert);
+
+	return regmap_update_bits(ddata->regmap, ddata->syscfg,
+				  ST_A9_IRQ_MASK, ddata->config);
+}
+
+static int st_irq_syscfg_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *match;
+	struct st_irq_syscfg *ddata;
+
+	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return -ENOMEM;
+
+	match = of_match_device(st_irq_syscfg_match, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	ddata->syscfg = (unsigned int)match->data;
+
+	ddata->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+	if (IS_ERR(ddata->regmap)) {
+		dev_err(&pdev->dev, "syscfg phandle missing\n");
+		return PTR_ERR(ddata->regmap);
+	}
+
+	dev_set_drvdata(&pdev->dev, ddata);
+
+	return st_irq_syscfg_enable(pdev);
+}
+
+static int __maybe_unused st_irq_syscfg_resume(struct device *dev)
+{
+	struct st_irq_syscfg *ddata = dev_get_drvdata(dev);
+
+	return regmap_update_bits(ddata->regmap, ddata->syscfg,
+				  ST_A9_IRQ_MASK, ddata->config);
+}
+
+static SIMPLE_DEV_PM_OPS(st_irq_syscfg_pm_ops, NULL, st_irq_syscfg_resume);
+
+static struct platform_driver st_irq_syscfg_driver = {
+	.driver = {
+		.name = "st_irq_syscfg",
+		.pm = &st_irq_syscfg_pm_ops,
+		.of_match_table = st_irq_syscfg_match,
+	},
+	.probe = st_irq_syscfg_probe,
+};
+
+static int __init st_irq_syscfg_init(void)
+{
+	return platform_driver_register(&st_irq_syscfg_driver);
+}
+core_initcall(st_irq_syscfg_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
new file mode 100644
index 0000000..0a2088e
--- /dev/null
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Copyright (C) STMicroelectronics 2017
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQS_PER_BANK 32
+
+struct stm32_exti_bank {
+	u32 imr_ofst;
+	u32 emr_ofst;
+	u32 rtsr_ofst;
+	u32 ftsr_ofst;
+	u32 swier_ofst;
+	u32 rpr_ofst;
+	u32 fpr_ofst;
+};
+
+#define UNDEF_REG ~0
+
+struct stm32_desc_irq {
+	u32 exti;
+	u32 irq_parent;
+};
+
+struct stm32_exti_drv_data {
+	const struct stm32_exti_bank **exti_banks;
+	const struct stm32_desc_irq *desc_irqs;
+	u32 bank_nr;
+	u32 irq_nr;
+};
+
+struct stm32_exti_chip_data {
+	struct stm32_exti_host_data *host_data;
+	const struct stm32_exti_bank *reg_bank;
+	struct raw_spinlock rlock;
+	u32 wake_active;
+	u32 mask_cache;
+	u32 rtsr_cache;
+	u32 ftsr_cache;
+};
+
+struct stm32_exti_host_data {
+	void __iomem *base;
+	struct stm32_exti_chip_data *chips_data;
+	const struct stm32_exti_drv_data *drv_data;
+};
+
+static struct stm32_exti_host_data *stm32_host_data;
+
+static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
+	.imr_ofst	= 0x00,
+	.emr_ofst	= 0x04,
+	.rtsr_ofst	= 0x08,
+	.ftsr_ofst	= 0x0C,
+	.swier_ofst	= 0x10,
+	.rpr_ofst	= 0x14,
+	.fpr_ofst	= UNDEF_REG,
+};
+
+static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
+	&stm32f4xx_exti_b1,
+};
+
+static const struct stm32_exti_drv_data stm32f4xx_drv_data = {
+	.exti_banks = stm32f4xx_exti_banks,
+	.bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks),
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
+	.imr_ofst	= 0x80,
+	.emr_ofst	= 0x84,
+	.rtsr_ofst	= 0x00,
+	.ftsr_ofst	= 0x04,
+	.swier_ofst	= 0x08,
+	.rpr_ofst	= 0x88,
+	.fpr_ofst	= UNDEF_REG,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
+	.imr_ofst	= 0x90,
+	.emr_ofst	= 0x94,
+	.rtsr_ofst	= 0x20,
+	.ftsr_ofst	= 0x24,
+	.swier_ofst	= 0x28,
+	.rpr_ofst	= 0x98,
+	.fpr_ofst	= UNDEF_REG,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
+	.imr_ofst	= 0xA0,
+	.emr_ofst	= 0xA4,
+	.rtsr_ofst	= 0x40,
+	.ftsr_ofst	= 0x44,
+	.swier_ofst	= 0x48,
+	.rpr_ofst	= 0xA8,
+	.fpr_ofst	= UNDEF_REG,
+};
+
+static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
+	&stm32h7xx_exti_b1,
+	&stm32h7xx_exti_b2,
+	&stm32h7xx_exti_b3,
+};
+
+static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
+	.exti_banks = stm32h7xx_exti_banks,
+	.bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b1 = {
+	.imr_ofst	= 0x80,
+	.emr_ofst	= 0x84,
+	.rtsr_ofst	= 0x00,
+	.ftsr_ofst	= 0x04,
+	.swier_ofst	= 0x08,
+	.rpr_ofst	= 0x0C,
+	.fpr_ofst	= 0x10,
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b2 = {
+	.imr_ofst	= 0x90,
+	.emr_ofst	= 0x94,
+	.rtsr_ofst	= 0x20,
+	.ftsr_ofst	= 0x24,
+	.swier_ofst	= 0x28,
+	.rpr_ofst	= 0x2C,
+	.fpr_ofst	= 0x30,
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b3 = {
+	.imr_ofst	= 0xA0,
+	.emr_ofst	= 0xA4,
+	.rtsr_ofst	= 0x40,
+	.ftsr_ofst	= 0x44,
+	.swier_ofst	= 0x48,
+	.rpr_ofst	= 0x4C,
+	.fpr_ofst	= 0x50,
+};
+
+static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
+	&stm32mp1_exti_b1,
+	&stm32mp1_exti_b2,
+	&stm32mp1_exti_b3,
+};
+
+static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+	{ .exti = 0, .irq_parent = 6 },
+	{ .exti = 1, .irq_parent = 7 },
+	{ .exti = 2, .irq_parent = 8 },
+	{ .exti = 3, .irq_parent = 9 },
+	{ .exti = 4, .irq_parent = 10 },
+	{ .exti = 5, .irq_parent = 23 },
+	{ .exti = 6, .irq_parent = 64 },
+	{ .exti = 7, .irq_parent = 65 },
+	{ .exti = 8, .irq_parent = 66 },
+	{ .exti = 9, .irq_parent = 67 },
+	{ .exti = 10, .irq_parent = 40 },
+	{ .exti = 11, .irq_parent = 42 },
+	{ .exti = 12, .irq_parent = 76 },
+	{ .exti = 13, .irq_parent = 77 },
+	{ .exti = 14, .irq_parent = 121 },
+	{ .exti = 15, .irq_parent = 127 },
+	{ .exti = 16, .irq_parent = 1 },
+	{ .exti = 65, .irq_parent = 144 },
+	{ .exti = 68, .irq_parent = 143 },
+	{ .exti = 73, .irq_parent = 129 },
+};
+
+static const struct stm32_exti_drv_data stm32mp1_drv_data = {
+	.exti_banks = stm32mp1_exti_banks,
+	.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+	.desc_irqs = stm32mp1_desc_irq,
+	.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
+};
+
+static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data,
+			     irq_hw_number_t hwirq)
+{
+	const struct stm32_desc_irq *desc_irq;
+	int i;
+
+	if (!drv_data->desc_irqs)
+		return -EINVAL;
+
+	for (i = 0; i < drv_data->irq_nr; i++) {
+		desc_irq = &drv_data->desc_irqs[i];
+		if (desc_irq->exti == hwirq)
+			return desc_irq->irq_parent;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
+{
+	struct stm32_exti_chip_data *chip_data = gc->private;
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+	unsigned long pending;
+
+	pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
+	if (stm32_bank->fpr_ofst != UNDEF_REG)
+		pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
+
+	return pending;
+}
+
+static void stm32_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int virq, nbanks = domain->gc->num_chips;
+	struct irq_chip_generic *gc;
+	unsigned long pending;
+	int n, i, irq_base = 0;
+
+	chained_irq_enter(chip, desc);
+
+	for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
+		gc = irq_get_domain_generic_chip(domain, irq_base);
+
+		while ((pending = stm32_exti_pending(gc))) {
+			for_each_set_bit(n, &pending, IRQS_PER_BANK) {
+				virq = irq_find_mapping(domain, irq_base + n);
+				generic_handle_irq(virq);
+			}
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int stm32_exti_set_type(struct irq_data *d,
+			       unsigned int type, u32 *rtsr, u32 *ftsr)
+{
+	u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		*rtsr |= mask;
+		*ftsr &= ~mask;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		*rtsr &= ~mask;
+		*ftsr |= mask;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		*rtsr |= mask;
+		*ftsr |= mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct stm32_exti_chip_data *chip_data = gc->private;
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+	u32 rtsr, ftsr;
+	int err;
+
+	irq_gc_lock(gc);
+
+	rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
+	ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
+
+	err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
+	if (err) {
+		irq_gc_unlock(gc);
+		return err;
+	}
+
+	irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
+	irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
+
+	irq_gc_unlock(gc);
+
+	return 0;
+}
+
+static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
+			       u32 wake_active)
+{
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+	void __iomem *base = chip_data->host_data->base;
+
+	/* save rtsr, ftsr registers */
+	chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst);
+	chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst);
+
+	writel_relaxed(wake_active, base + stm32_bank->imr_ofst);
+}
+
+static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data,
+			      u32 mask_cache)
+{
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+	void __iomem *base = chip_data->host_data->base;
+
+	/* restore rtsr, ftsr, registers */
+	writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst);
+	writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst);
+
+	writel_relaxed(mask_cache, base + stm32_bank->imr_ofst);
+}
+
+static void stm32_irq_suspend(struct irq_chip_generic *gc)
+{
+	struct stm32_exti_chip_data *chip_data = gc->private;
+
+	irq_gc_lock(gc);
+	stm32_chip_suspend(chip_data, gc->wake_active);
+	irq_gc_unlock(gc);
+}
+
+static void stm32_irq_resume(struct irq_chip_generic *gc)
+{
+	struct stm32_exti_chip_data *chip_data = gc->private;
+
+	irq_gc_lock(gc);
+	stm32_chip_resume(chip_data, gc->mask_cache);
+	irq_gc_unlock(gc);
+}
+
+static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
+			    unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	irq_hw_number_t hwirq;
+
+	hwirq = fwspec->param[0];
+
+	irq_map_generic_chip(d, virq, hwirq);
+
+	return 0;
+}
+
+static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
+			    unsigned int nr_irqs)
+{
+	struct irq_data *data = irq_domain_get_irq_data(d, virq);
+
+	irq_domain_reset_irq_data(data);
+}
+
+static const struct irq_domain_ops irq_exti_domain_ops = {
+	.map	= irq_map_generic_chip,
+	.alloc  = stm32_exti_alloc,
+	.free	= stm32_exti_free,
+};
+
+static void stm32_irq_ack(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct stm32_exti_chip_data *chip_data = gc->private;
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+	irq_gc_lock(gc);
+
+	irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
+	if (stm32_bank->fpr_ofst != UNDEF_REG)
+		irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
+
+	irq_gc_unlock(gc);
+}
+
+static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	void __iomem *base = chip_data->host_data->base;
+	u32 val;
+
+	val = readl_relaxed(base + reg);
+	val |= BIT(d->hwirq % IRQS_PER_BANK);
+	writel_relaxed(val, base + reg);
+
+	return val;
+}
+
+static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	void __iomem *base = chip_data->host_data->base;
+	u32 val;
+
+	val = readl_relaxed(base + reg);
+	val &= ~BIT(d->hwirq % IRQS_PER_BANK);
+	writel_relaxed(val, base + reg);
+
+	return val;
+}
+
+static void stm32_exti_h_eoi(struct irq_data *d)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+	raw_spin_lock(&chip_data->rlock);
+
+	stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
+	if (stm32_bank->fpr_ofst != UNDEF_REG)
+		stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
+
+	raw_spin_unlock(&chip_data->rlock);
+
+	if (d->parent_data->chip)
+		irq_chip_eoi_parent(d);
+}
+
+static void stm32_exti_h_mask(struct irq_data *d)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+	raw_spin_lock(&chip_data->rlock);
+	chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
+	raw_spin_unlock(&chip_data->rlock);
+
+	if (d->parent_data->chip)
+		irq_chip_mask_parent(d);
+}
+
+static void stm32_exti_h_unmask(struct irq_data *d)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+	raw_spin_lock(&chip_data->rlock);
+	chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
+	raw_spin_unlock(&chip_data->rlock);
+
+	if (d->parent_data->chip)
+		irq_chip_unmask_parent(d);
+}
+
+static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+	void __iomem *base = chip_data->host_data->base;
+	u32 rtsr, ftsr;
+	int err;
+
+	raw_spin_lock(&chip_data->rlock);
+	rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
+	ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
+
+	err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
+	if (err) {
+		raw_spin_unlock(&chip_data->rlock);
+		return err;
+	}
+
+	writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
+	writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
+	raw_spin_unlock(&chip_data->rlock);
+
+	return 0;
+}
+
+static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+	u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+	raw_spin_lock(&chip_data->rlock);
+
+	if (on)
+		chip_data->wake_active |= mask;
+	else
+		chip_data->wake_active &= ~mask;
+
+	raw_spin_unlock(&chip_data->rlock);
+
+	return 0;
+}
+
+static int stm32_exti_h_set_affinity(struct irq_data *d,
+				     const struct cpumask *dest, bool force)
+{
+	if (d->parent_data->chip)
+		return irq_chip_set_affinity_parent(d, dest, force);
+
+	return -EINVAL;
+}
+
+#ifdef CONFIG_PM
+static int stm32_exti_h_suspend(void)
+{
+	struct stm32_exti_chip_data *chip_data;
+	int i;
+
+	for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
+		chip_data = &stm32_host_data->chips_data[i];
+		raw_spin_lock(&chip_data->rlock);
+		stm32_chip_suspend(chip_data, chip_data->wake_active);
+		raw_spin_unlock(&chip_data->rlock);
+	}
+
+	return 0;
+}
+
+static void stm32_exti_h_resume(void)
+{
+	struct stm32_exti_chip_data *chip_data;
+	int i;
+
+	for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
+		chip_data = &stm32_host_data->chips_data[i];
+		raw_spin_lock(&chip_data->rlock);
+		stm32_chip_resume(chip_data, chip_data->mask_cache);
+		raw_spin_unlock(&chip_data->rlock);
+	}
+}
+
+static struct syscore_ops stm32_exti_h_syscore_ops = {
+	.suspend	= stm32_exti_h_suspend,
+	.resume		= stm32_exti_h_resume,
+};
+
+static void stm32_exti_h_syscore_init(void)
+{
+	register_syscore_ops(&stm32_exti_h_syscore_ops);
+}
+#else
+static inline void stm32_exti_h_syscore_init(void) {}
+#endif
+
+static struct irq_chip stm32_exti_h_chip = {
+	.name			= "stm32-exti-h",
+	.irq_eoi		= stm32_exti_h_eoi,
+	.irq_mask		= stm32_exti_h_mask,
+	.irq_unmask		= stm32_exti_h_unmask,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= stm32_exti_h_set_type,
+	.irq_set_wake		= stm32_exti_h_set_wake,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND,
+	.irq_set_affinity	= IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
+};
+
+static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
+				     unsigned int virq,
+				     unsigned int nr_irqs, void *data)
+{
+	struct stm32_exti_host_data *host_data = dm->host_data;
+	struct stm32_exti_chip_data *chip_data;
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec p_fwspec;
+	irq_hw_number_t hwirq;
+	int p_irq, bank;
+
+	hwirq = fwspec->param[0];
+	bank  = hwirq / IRQS_PER_BANK;
+	chip_data = &host_data->chips_data[bank];
+
+	irq_domain_set_hwirq_and_chip(dm, virq, hwirq,
+				      &stm32_exti_h_chip, chip_data);
+
+	p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq);
+	if (p_irq >= 0) {
+		p_fwspec.fwnode = dm->parent->fwnode;
+		p_fwspec.param_count = 3;
+		p_fwspec.param[0] = GIC_SPI;
+		p_fwspec.param[1] = p_irq;
+		p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+		return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+	}
+
+	return 0;
+}
+
+static struct
+stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
+					   struct device_node *node)
+{
+	struct stm32_exti_host_data *host_data;
+
+	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+	if (!host_data)
+		return NULL;
+
+	host_data->drv_data = dd;
+	host_data->chips_data = kcalloc(dd->bank_nr,
+					sizeof(struct stm32_exti_chip_data),
+					GFP_KERNEL);
+	if (!host_data->chips_data)
+		goto free_host_data;
+
+	host_data->base = of_iomap(node, 0);
+	if (!host_data->base) {
+		pr_err("%pOF: Unable to map registers\n", node);
+		goto free_chips_data;
+	}
+
+	stm32_host_data = host_data;
+
+	return host_data;
+
+free_chips_data:
+	kfree(host_data->chips_data);
+free_host_data:
+	kfree(host_data);
+
+	return NULL;
+}
+
+static struct
+stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
+					   u32 bank_idx,
+					   struct device_node *node)
+{
+	const struct stm32_exti_bank *stm32_bank;
+	struct stm32_exti_chip_data *chip_data;
+	void __iomem *base = h_data->base;
+	u32 irqs_mask;
+
+	stm32_bank = h_data->drv_data->exti_banks[bank_idx];
+	chip_data = &h_data->chips_data[bank_idx];
+	chip_data->host_data = h_data;
+	chip_data->reg_bank = stm32_bank;
+
+	raw_spin_lock_init(&chip_data->rlock);
+
+	/* Determine number of irqs supported */
+	writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
+	irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
+
+	/*
+	 * This IP has no reset, so after hot reboot we should
+	 * clear registers to avoid residue
+	 */
+	writel_relaxed(0, base + stm32_bank->imr_ofst);
+	writel_relaxed(0, base + stm32_bank->emr_ofst);
+	writel_relaxed(0, base + stm32_bank->rtsr_ofst);
+	writel_relaxed(0, base + stm32_bank->ftsr_ofst);
+	writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
+	if (stm32_bank->fpr_ofst != UNDEF_REG)
+		writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
+
+	pr_info("%s: bank%d, External IRQs available:%#x\n",
+		node->full_name, bank_idx, irqs_mask);
+
+	return chip_data;
+}
+
+static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
+				  struct device_node *node)
+{
+	struct stm32_exti_host_data *host_data;
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	int nr_irqs, ret, i;
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+
+	host_data = stm32_exti_host_init(drv_data, node);
+	if (!host_data)
+		return -ENOMEM;
+
+	domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
+				       &irq_exti_domain_ops, NULL);
+	if (!domain) {
+		pr_err("%s: Could not register interrupt domain.\n",
+		       node->name);
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
+					     handle_edge_irq, clr, 0, 0);
+	if (ret) {
+		pr_err("%pOF: Could not allocate generic interrupt chip.\n",
+		       node);
+		goto out_free_domain;
+	}
+
+	for (i = 0; i < drv_data->bank_nr; i++) {
+		const struct stm32_exti_bank *stm32_bank;
+		struct stm32_exti_chip_data *chip_data;
+
+		stm32_bank = drv_data->exti_banks[i];
+		chip_data = stm32_exti_chip_init(host_data, i, node);
+
+		gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
+
+		gc->reg_base = host_data->base;
+		gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
+		gc->chip_types->chip.irq_ack = stm32_irq_ack;
+		gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
+		gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
+		gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
+		gc->chip_types->chip.irq_set_wake = irq_gc_set_wake;
+		gc->suspend = stm32_irq_suspend;
+		gc->resume = stm32_irq_resume;
+		gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK);
+
+		gc->chip_types->regs.mask = stm32_bank->imr_ofst;
+		gc->private = (void *)chip_data;
+	}
+
+	nr_irqs = of_irq_count(node);
+	for (i = 0; i < nr_irqs; i++) {
+		unsigned int irq = irq_of_parse_and_map(node, i);
+
+		irq_set_handler_data(irq, domain);
+		irq_set_chained_handler(irq, stm32_irq_handler);
+	}
+
+	return 0;
+
+out_free_domain:
+	irq_domain_remove(domain);
+out_unmap:
+	iounmap(host_data->base);
+	kfree(host_data->chips_data);
+	kfree(host_data);
+	return ret;
+}
+
+static const struct irq_domain_ops stm32_exti_h_domain_ops = {
+	.alloc	= stm32_exti_h_domain_alloc,
+	.free	= irq_domain_free_irqs_common,
+};
+
+static int
+__init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
+				 struct device_node *node,
+				 struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct stm32_exti_host_data *host_data;
+	int ret, i;
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("interrupt-parent not found\n");
+		return -EINVAL;
+	}
+
+	host_data = stm32_exti_host_init(drv_data, node);
+	if (!host_data)
+		return -ENOMEM;
+
+	for (i = 0; i < drv_data->bank_nr; i++)
+		stm32_exti_chip_init(host_data, i, node);
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0,
+					  drv_data->bank_nr * IRQS_PER_BANK,
+					  node, &stm32_exti_h_domain_ops,
+					  host_data);
+
+	if (!domain) {
+		pr_err("%s: Could not register exti domain.\n", node->name);
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	stm32_exti_h_syscore_init();
+
+	return 0;
+
+out_unmap:
+	iounmap(host_data->base);
+	kfree(host_data->chips_data);
+	kfree(host_data);
+	return ret;
+}
+
+static int __init stm32f4_exti_of_init(struct device_node *np,
+				       struct device_node *parent)
+{
+	return stm32_exti_init(&stm32f4xx_drv_data, np);
+}
+
+IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
+
+static int __init stm32h7_exti_of_init(struct device_node *np,
+				       struct device_node *parent)
+{
+	return stm32_exti_init(&stm32h7xx_drv_data, np);
+}
+
+IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
+
+static int __init stm32mp1_exti_of_init(struct device_node *np,
+					struct device_node *parent)
+{
+	return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent);
+}
+
+IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
new file mode 100644
index 0000000..e3e5b91
--- /dev/null
+++ b/drivers/irqchip/irq-sun4i.c
@@ -0,0 +1,158 @@
+/*
+ * Allwinner A1X SoCs IRQ chip driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Benn Huang <benn@allwinnertech.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+
+#define SUN4I_IRQ_VECTOR_REG		0x00
+#define SUN4I_IRQ_PROTECTION_REG	0x08
+#define SUN4I_IRQ_NMI_CTRL_REG		0x0c
+#define SUN4I_IRQ_PENDING_REG(x)	(0x10 + 0x4 * x)
+#define SUN4I_IRQ_FIQ_PENDING_REG(x)	(0x20 + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG(x)		(0x40 + 0x4 * x)
+#define SUN4I_IRQ_MASK_REG(x)		(0x50 + 0x4 * x)
+
+static void __iomem *sun4i_irq_base;
+static struct irq_domain *sun4i_irq_domain;
+
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+
+static void sun4i_irq_ack(struct irq_data *irqd)
+{
+	unsigned int irq = irqd_to_hwirq(irqd);
+
+	if (irq != 0)
+		return; /* Only IRQ 0 / the ENMI needs to be acked */
+
+	writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
+}
+
+static void sun4i_irq_mask(struct irq_data *irqd)
+{
+	unsigned int irq = irqd_to_hwirq(irqd);
+	unsigned int irq_off = irq % 32;
+	int reg = irq / 32;
+	u32 val;
+
+	val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+	writel(val & ~(1 << irq_off),
+	       sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+}
+
+static void sun4i_irq_unmask(struct irq_data *irqd)
+{
+	unsigned int irq = irqd_to_hwirq(irqd);
+	unsigned int irq_off = irq % 32;
+	int reg = irq / 32;
+	u32 val;
+
+	val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+	writel(val | (1 << irq_off),
+	       sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+}
+
+static struct irq_chip sun4i_irq_chip = {
+	.name		= "sun4i_irq",
+	.irq_eoi	= sun4i_irq_ack,
+	.irq_mask	= sun4i_irq_mask,
+	.irq_unmask	= sun4i_irq_unmask,
+	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+};
+
+static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
+			 irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
+	irq_set_probe(virq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops sun4i_irq_ops = {
+	.map = sun4i_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int __init sun4i_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	sun4i_irq_base = of_iomap(node, 0);
+	if (!sun4i_irq_base)
+		panic("%pOF: unable to map IC registers\n",
+			node);
+
+	/* Disable all interrupts */
+	writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
+	writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
+	writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
+
+	/* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
+	writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
+	writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
+	writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
+
+	/* Clear all the pending interrupts */
+	writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
+	writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1));
+	writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2));
+
+	/* Enable protection mode */
+	writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG);
+
+	/* Configure the external interrupt source type */
+	writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG);
+
+	sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
+						 &sun4i_irq_ops, NULL);
+	if (!sun4i_irq_domain)
+		panic("%pOF: unable to create IRQ domain\n", node);
+
+	set_handle_irq(sun4i_handle_irq);
+
+	return 0;
+}
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
+
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+{
+	u32 hwirq;
+
+	/*
+	 * hwirq == 0 can mean one of 3 things:
+	 * 1) no more irqs pending
+	 * 2) irq 0 pending
+	 * 3) spurious irq
+	 * So if we immediately get a reading of 0, check the irq-pending reg
+	 * to differentiate between 2 and 3. We only do this once to avoid
+	 * the extra check in the common case of 1 hapening after having
+	 * read the vector-reg once.
+	 */
+	hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+	if (hwirq == 0 &&
+		  !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
+		return;
+
+	do {
+		handle_domain_irq(sun4i_irq_domain, hwirq, regs);
+		hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+	} while (hwirq != 0);
+}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
new file mode 100644
index 0000000..a412b5d
--- /dev/null
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -0,0 +1,262 @@
+/*
+ * Allwinner A20/A31 SoCs NMI IRQ chip driver.
+ *
+ * Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define DRV_NAME	"sunxi-nmi"
+#define pr_fmt(fmt)	DRV_NAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
+
+#define SUNXI_NMI_IRQ_BIT	BIT(0)
+
+#define SUN6I_R_INTC_CTRL	0x0c
+#define SUN6I_R_INTC_PENDING	0x10
+#define SUN6I_R_INTC_ENABLE	0x40
+
+/*
+ * For deprecated sun6i-a31-sc-nmi compatible.
+ * Registers are offset by 0x0c.
+ */
+#define SUN6I_R_INTC_NMI_OFFSET	0x0c
+#define SUN6I_NMI_CTRL		(SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET)
+#define SUN6I_NMI_PENDING	(SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET)
+#define SUN6I_NMI_ENABLE	(SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET)
+
+#define SUN7I_NMI_CTRL		0x00
+#define SUN7I_NMI_PENDING	0x04
+#define SUN7I_NMI_ENABLE	0x08
+
+#define SUN9I_NMI_CTRL		0x00
+#define SUN9I_NMI_ENABLE	0x04
+#define SUN9I_NMI_PENDING	0x08
+
+enum {
+	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
+	SUNXI_SRC_TYPE_EDGE_FALLING,
+	SUNXI_SRC_TYPE_LEVEL_HIGH,
+	SUNXI_SRC_TYPE_EDGE_RISING,
+};
+
+struct sunxi_sc_nmi_reg_offs {
+	u32 ctrl;
+	u32 pend;
+	u32 enable;
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst = {
+	.ctrl	= SUN6I_R_INTC_CTRL,
+	.pend	= SUN6I_R_INTC_PENDING,
+	.enable	= SUN6I_R_INTC_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
+	.ctrl	= SUN6I_NMI_CTRL,
+	.pend	= SUN6I_NMI_PENDING,
+	.enable	= SUN6I_NMI_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
+	.ctrl	= SUN7I_NMI_CTRL,
+	.pend	= SUN7I_NMI_PENDING,
+	.enable	= SUN7I_NMI_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
+	.ctrl	= SUN9I_NMI_CTRL,
+	.pend	= SUN9I_NMI_PENDING,
+	.enable	= SUN9I_NMI_ENABLE,
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+				      u32 val)
+{
+	irq_reg_writel(gc, val, off);
+}
+
+static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+{
+	return irq_reg_readl(gc, off);
+}
+
+static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int virq = irq_find_mapping(domain, 0);
+
+	chained_irq_enter(chip, desc);
+	generic_handle_irq(virq);
+	chained_irq_exit(chip, desc);
+}
+
+static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	struct irq_chip_type *ct = gc->chip_types;
+	u32 src_type_reg;
+	u32 ctrl_off = ct->regs.type;
+	unsigned int src_type;
+	unsigned int i;
+
+	irq_gc_lock(gc);
+
+	switch (flow_type & IRQF_TRIGGER_MASK) {
+	case IRQ_TYPE_EDGE_FALLING:
+		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
+		break;
+	case IRQ_TYPE_NONE:
+	case IRQ_TYPE_LEVEL_LOW:
+		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
+		break;
+	default:
+		irq_gc_unlock(gc);
+		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
+			data->irq);
+		return -EBADR;
+	}
+
+	irqd_set_trigger_type(data, flow_type);
+	irq_setup_alt_chip(data, flow_type);
+
+	for (i = 0; i < gc->num_ct; i++, ct++)
+		if (ct->type & flow_type)
+			ctrl_off = ct->regs.type;
+
+	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
+	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
+	src_type_reg |= src_type;
+	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
+
+	irq_gc_unlock(gc);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+					const struct sunxi_sc_nmi_reg_offs *reg_offs)
+{
+	struct irq_domain *domain;
+	struct irq_chip_generic *gc;
+	unsigned int irq;
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	int ret;
+
+
+	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+	if (!domain) {
+		pr_err("Could not register interrupt domain.\n");
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
+					     handle_fasteoi_irq, clr, 0,
+					     IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_err("Could not allocate generic interrupt chip.\n");
+		goto fail_irqd_remove;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		pr_err("unable to parse irq\n");
+		ret = -EINVAL;
+		goto fail_irqd_remove;
+	}
+
+	gc = irq_get_domain_generic_chip(domain, 0);
+	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (IS_ERR(gc->reg_base)) {
+		pr_err("unable to map resource\n");
+		ret = PTR_ERR(gc->reg_base);
+		goto fail_irqd_remove;
+	}
+
+	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
+	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
+	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
+	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
+	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+	gc->chip_types[0].regs.ack		= reg_offs->pend;
+	gc->chip_types[0].regs.mask		= reg_offs->enable;
+	gc->chip_types[0].regs.type		= reg_offs->ctrl;
+
+	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
+	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
+	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
+	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
+	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
+	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
+	gc->chip_types[1].regs.ack		= reg_offs->pend;
+	gc->chip_types[1].regs.mask		= reg_offs->enable;
+	gc->chip_types[1].regs.type		= reg_offs->ctrl;
+	gc->chip_types[1].handler		= handle_edge_irq;
+
+	/* Disable any active interrupts */
+	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+
+	/* Clear any pending NMI interrupts */
+	sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+
+	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
+
+	return 0;
+
+fail_irqd_remove:
+	irq_domain_remove(domain);
+
+	return ret;
+}
+
+static int __init sun6i_r_intc_irq_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return sunxi_sc_nmi_irq_init(node, &sun6i_r_intc_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_r_intc, "allwinner,sun6i-a31-r-intc",
+		sun6i_r_intc_irq_init);
+
+static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
+
+static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+}
+IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
+
+static int __init sun9i_nmi_irq_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+}
+IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
new file mode 100644
index 0000000..580e2d7
--- /dev/null
+++ b/drivers/irqchip/irq-tango.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ0_CTL_BASE		0x0000
+#define IRQ1_CTL_BASE		0x0100
+#define EDGE_CTL_BASE		0x0200
+#define IRQ2_CTL_BASE		0x0300
+
+#define IRQ_CTL_HI		0x18
+#define EDGE_CTL_HI		0x20
+
+#define IRQ_STATUS		0x00
+#define IRQ_RAWSTAT		0x04
+#define IRQ_EN_SET		0x08
+#define IRQ_EN_CLR		0x0c
+#define IRQ_SOFT_SET		0x10
+#define IRQ_SOFT_CLR		0x14
+
+#define EDGE_STATUS		0x00
+#define EDGE_RAWSTAT		0x04
+#define EDGE_CFG_RISE		0x08
+#define EDGE_CFG_FALL		0x0c
+#define EDGE_CFG_RISE_SET	0x10
+#define EDGE_CFG_RISE_CLR	0x14
+#define EDGE_CFG_FALL_SET	0x18
+#define EDGE_CFG_FALL_CLR	0x1c
+
+struct tangox_irq_chip {
+	void __iomem *base;
+	unsigned long ctl;
+};
+
+static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
+{
+	return readl_relaxed(chip->base + reg);
+}
+
+static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
+{
+	writel_relaxed(val, chip->base + reg);
+}
+
+static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
+				 int base)
+{
+	unsigned int hwirq;
+	unsigned int virq;
+
+	while (status) {
+		hwirq = __ffs(status);
+		virq = irq_find_mapping(dom, base + hwirq);
+		if (virq)
+			generic_handle_irq(virq);
+		status &= ~BIT(hwirq);
+	}
+}
+
+static void tangox_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *dom = irq_desc_get_handler_data(desc);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	struct tangox_irq_chip *chip = dom->host_data;
+	unsigned int status_lo, status_hi;
+
+	chained_irq_enter(host_chip, desc);
+
+	status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
+	status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
+
+	tangox_dispatch_irqs(dom, status_lo, 0);
+	tangox_dispatch_irqs(dom, status_hi, 32);
+
+	chained_irq_exit(host_chip, desc);
+}
+
+static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct tangox_irq_chip *chip = gc->domain->host_data;
+	struct irq_chip_regs *regs = &gc->chip_types[0].regs;
+
+	switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_RISING:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+		break;
+
+	case IRQ_TYPE_LEVEL_HIGH:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+		break;
+
+	case IRQ_TYPE_LEVEL_LOW:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+		break;
+
+	default:
+		pr_err("Invalid trigger mode %x for IRQ %d\n",
+		       flow_type, d->irq);
+		return -EINVAL;
+	}
+
+	return irq_setup_alt_chip(d, flow_type);
+}
+
+static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
+					unsigned long ctl_offs,
+					unsigned long edge_offs)
+{
+	struct tangox_irq_chip *chip = gc->domain->host_data;
+	struct irq_chip_type *ct = gc->chip_types;
+	unsigned long ctl_base = chip->ctl + ctl_offs;
+	unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
+	int i;
+
+	gc->reg_base = chip->base;
+	gc->unused = 0;
+
+	for (i = 0; i < 2; i++) {
+		ct[i].chip.irq_ack = irq_gc_ack_set_bit;
+		ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
+		ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
+		ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
+		ct[i].chip.irq_set_type = tangox_irq_set_type;
+		ct[i].chip.name = gc->domain->name;
+
+		ct[i].regs.enable = ctl_base + IRQ_EN_SET;
+		ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
+		ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
+		ct[i].regs.type = edge_base;
+	}
+
+	ct[0].type = IRQ_TYPE_LEVEL_MASK;
+	ct[0].handler = handle_level_irq;
+
+	ct[1].type = IRQ_TYPE_EDGE_BOTH;
+	ct[1].handler = handle_edge_irq;
+
+	intc_writel(chip, ct->regs.disable, 0xffffffff);
+	intc_writel(chip, ct->regs.ack, 0xffffffff);
+}
+
+static void __init tangox_irq_domain_init(struct irq_domain *dom)
+{
+	struct irq_chip_generic *gc;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		gc = irq_get_domain_generic_chip(dom, i * 32);
+		tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
+	}
+}
+
+static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
+				  struct device_node *node)
+{
+	struct tangox_irq_chip *chip;
+	struct irq_domain *dom;
+	struct resource res;
+	int irq;
+	int err;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq)
+		panic("%s: failed to get IRQ", node->name);
+
+	err = of_address_to_resource(node, 0, &res);
+	if (err)
+		panic("%s: failed to get address", node->name);
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	chip->ctl = res.start - baseres->start;
+	chip->base = base;
+
+	dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
+	if (!dom)
+		panic("%s: failed to create irqdomain", node->name);
+
+	err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
+					     handle_level_irq, 0, 0, 0);
+	if (err)
+		panic("%s: failed to allocate irqchip", node->name);
+
+	tangox_irq_domain_init(dom);
+
+	irq_set_chained_handler_and_data(irq, tangox_irq_handler, dom);
+
+	return 0;
+}
+
+static int __init tangox_of_irq_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	struct device_node *c;
+	struct resource res;
+	void __iomem *base;
+
+	base = of_iomap(node, 0);
+	if (!base)
+		panic("%s: of_iomap failed", node->name);
+
+	of_address_to_resource(node, 0, &res);
+
+	for_each_child_of_node(node, c)
+		tangox_irq_init(base, &res, c);
+
+	return 0;
+}
+IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
new file mode 100644
index 0000000..848d782
--- /dev/null
+++ b/drivers/irqchip/irq-tb10x.c
@@ -0,0 +1,196 @@
+/*
+ * Abilis Systems interrupt controller driver
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#define AB_IRQCTL_INT_ENABLE   0x00
+#define AB_IRQCTL_INT_STATUS   0x04
+#define AB_IRQCTL_SRC_MODE     0x08
+#define AB_IRQCTL_SRC_POLARITY 0x0C
+#define AB_IRQCTL_INT_MODE     0x10
+#define AB_IRQCTL_INT_POLARITY 0x14
+#define AB_IRQCTL_INT_FORCE    0x18
+
+#define AB_IRQCTL_MAXIRQ       32
+
+static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
+	u32 val)
+{
+	irq_reg_writel(gc, val, reg);
+}
+
+static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
+{
+	return irq_reg_readl(gc, reg);
+}
+
+static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	uint32_t im, mod, pol;
+
+	im = data->mask;
+
+	irq_gc_lock(gc);
+
+	mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
+	pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
+
+	switch (flow_type & IRQF_TRIGGER_MASK) {
+	case IRQ_TYPE_EDGE_FALLING:
+		pol ^= im;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		mod ^= im;
+		break;
+	case IRQ_TYPE_NONE:
+		flow_type = IRQ_TYPE_LEVEL_LOW;
+	case IRQ_TYPE_LEVEL_LOW:
+		mod ^= im;
+		pol ^= im;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		break;
+	default:
+		irq_gc_unlock(gc);
+		pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
+			__func__, data->irq);
+		return -EBADR;
+	}
+
+	irqd_set_trigger_type(data, flow_type);
+	irq_setup_alt_chip(data, flow_type);
+
+	ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
+	ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
+	ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
+
+	irq_gc_unlock(gc);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static void tb10x_irq_cascade(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
+
+	generic_handle_irq(irq_find_mapping(domain, irq));
+}
+
+static int __init of_tb10x_init_irq(struct device_node *ictl,
+					struct device_node *parent)
+{
+	int i, ret, nrirqs = of_irq_count(ictl);
+	struct resource mem;
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+	void __iomem *reg_base;
+
+	if (of_address_to_resource(ictl, 0, &mem)) {
+		pr_err("%s: No registers declared in DeviceTree.\n",
+			ictl->name);
+		return -EINVAL;
+	}
+
+	if (!request_mem_region(mem.start, resource_size(&mem),
+		ictl->name)) {
+		pr_err("%s: Request mem region failed.\n", ictl->name);
+		return -EBUSY;
+	}
+
+	reg_base = ioremap(mem.start, resource_size(&mem));
+	if (!reg_base) {
+		ret = -EBUSY;
+		pr_err("%s: ioremap failed.\n", ictl->name);
+		goto ioremap_fail;
+	}
+
+	domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
+					&irq_generic_chip_ops, NULL);
+	if (!domain) {
+		ret = -ENOMEM;
+		pr_err("%s: Could not register interrupt domain.\n",
+			ictl->name);
+		goto irq_domain_add_fail;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
+				2, ictl->name, handle_level_irq,
+				IRQ_NOREQUEST, IRQ_NOPROBE,
+				IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_err("%s: Could not allocate generic interrupt chip.\n",
+			ictl->name);
+		goto gc_alloc_fail;
+	}
+
+	gc = domain->gc->gc[0];
+	gc->reg_base                         = reg_base;
+
+	gc->chip_types[0].type               = IRQ_TYPE_LEVEL_MASK;
+	gc->chip_types[0].chip.irq_mask      = irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask    = irq_gc_mask_set_bit;
+	gc->chip_types[0].chip.irq_set_type  = tb10x_irq_set_type;
+	gc->chip_types[0].regs.mask          = AB_IRQCTL_INT_ENABLE;
+
+	gc->chip_types[1].type               = IRQ_TYPE_EDGE_BOTH;
+	gc->chip_types[1].chip.name          = gc->chip_types[0].chip.name;
+	gc->chip_types[1].chip.irq_ack       = irq_gc_ack_set_bit;
+	gc->chip_types[1].chip.irq_mask      = irq_gc_mask_clr_bit;
+	gc->chip_types[1].chip.irq_unmask    = irq_gc_mask_set_bit;
+	gc->chip_types[1].chip.irq_set_type  = tb10x_irq_set_type;
+	gc->chip_types[1].regs.ack           = AB_IRQCTL_INT_STATUS;
+	gc->chip_types[1].regs.mask          = AB_IRQCTL_INT_ENABLE;
+	gc->chip_types[1].handler            = handle_edge_irq;
+
+	for (i = 0; i < nrirqs; i++) {
+		unsigned int irq = irq_of_parse_and_map(ictl, i);
+
+		irq_set_chained_handler_and_data(irq, tb10x_irq_cascade,
+						 domain);
+	}
+
+	ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
+	ab_irqctl_writereg(gc, AB_IRQCTL_INT_MODE, 0);
+	ab_irqctl_writereg(gc, AB_IRQCTL_INT_POLARITY, 0);
+	ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, ~0UL);
+
+	return 0;
+
+gc_alloc_fail:
+	irq_domain_remove(domain);
+irq_domain_add_fail:
+	iounmap(reg_base);
+ioremap_fail:
+	release_mem_region(mem.start, resource_size(&mem));
+	return ret;
+}
+IRQCHIP_DECLARE(tb10x_intc, "abilis,tb10x-ictl", of_tb10x_init_irq);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
new file mode 100644
index 0000000..0abc0cd
--- /dev/null
+++ b/drivers/irqchip/irq-tegra.c
@@ -0,0 +1,368 @@
+/*
+ * Driver code for Tegra's Legacy Interrupt Controller
+ *
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Heavily based on the original arch/arm/mach-tegra/irq.c code:
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross@android.com>
+ *
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define ICTLR_CPU_IEP_VFIQ	0x08
+#define ICTLR_CPU_IEP_FIR	0x14
+#define ICTLR_CPU_IEP_FIR_SET	0x18
+#define ICTLR_CPU_IEP_FIR_CLR	0x1c
+
+#define ICTLR_CPU_IER		0x20
+#define ICTLR_CPU_IER_SET	0x24
+#define ICTLR_CPU_IER_CLR	0x28
+#define ICTLR_CPU_IEP_CLASS	0x2C
+
+#define ICTLR_COP_IER		0x30
+#define ICTLR_COP_IER_SET	0x34
+#define ICTLR_COP_IER_CLR	0x38
+#define ICTLR_COP_IEP_CLASS	0x3c
+
+#define TEGRA_MAX_NUM_ICTLRS	6
+
+static unsigned int num_ictlrs;
+
+struct tegra_ictlr_soc {
+	unsigned int num_ictlrs;
+};
+
+static const struct tegra_ictlr_soc tegra20_ictlr_soc = {
+	.num_ictlrs = 4,
+};
+
+static const struct tegra_ictlr_soc tegra30_ictlr_soc = {
+	.num_ictlrs = 5,
+};
+
+static const struct tegra_ictlr_soc tegra210_ictlr_soc = {
+	.num_ictlrs = 6,
+};
+
+static const struct of_device_id ictlr_matches[] = {
+	{ .compatible = "nvidia,tegra210-ictlr", .data = &tegra210_ictlr_soc },
+	{ .compatible = "nvidia,tegra30-ictlr", .data = &tegra30_ictlr_soc },
+	{ .compatible = "nvidia,tegra20-ictlr", .data = &tegra20_ictlr_soc },
+	{ }
+};
+
+struct tegra_ictlr_info {
+	void __iomem *base[TEGRA_MAX_NUM_ICTLRS];
+#ifdef CONFIG_PM_SLEEP
+	u32 cop_ier[TEGRA_MAX_NUM_ICTLRS];
+	u32 cop_iep[TEGRA_MAX_NUM_ICTLRS];
+	u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS];
+	u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS];
+
+	u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS];
+#endif
+};
+
+static struct tegra_ictlr_info *lic;
+
+static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
+{
+	void __iomem *base = (void __iomem __force *)d->chip_data;
+	u32 mask;
+
+	mask = BIT(d->hwirq % 32);
+	writel_relaxed(mask, base + reg);
+}
+
+static void tegra_mask(struct irq_data *d)
+{
+	tegra_ictlr_write_mask(d, ICTLR_CPU_IER_CLR);
+	irq_chip_mask_parent(d);
+}
+
+static void tegra_unmask(struct irq_data *d)
+{
+	tegra_ictlr_write_mask(d, ICTLR_CPU_IER_SET);
+	irq_chip_unmask_parent(d);
+}
+
+static void tegra_eoi(struct irq_data *d)
+{
+	tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_CLR);
+	irq_chip_eoi_parent(d);
+}
+
+static int tegra_retrigger(struct irq_data *d)
+{
+	tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_SET);
+	return irq_chip_retrigger_hierarchy(d);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_set_wake(struct irq_data *d, unsigned int enable)
+{
+	u32 irq = d->hwirq;
+	u32 index, mask;
+
+	index = (irq / 32);
+	mask = BIT(irq % 32);
+	if (enable)
+		lic->ictlr_wake_mask[index] |= mask;
+	else
+		lic->ictlr_wake_mask[index] &= ~mask;
+
+	/*
+	 * Do *not* call into the parent, as the GIC doesn't have any
+	 * wake-up facility...
+	 */
+	return 0;
+}
+
+static int tegra_ictlr_suspend(void)
+{
+	unsigned long flags;
+	unsigned int i;
+
+	local_irq_save(flags);
+	for (i = 0; i < num_ictlrs; i++) {
+		void __iomem *ictlr = lic->base[i];
+
+		/* Save interrupt state */
+		lic->cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER);
+		lic->cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS);
+		lic->cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER);
+		lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
+
+		/* Disable COP interrupts */
+		writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+
+		/* Disable CPU interrupts */
+		writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+
+		/* Enable the wakeup sources of ictlr */
+		writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
+	}
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+static void tegra_ictlr_resume(void)
+{
+	unsigned long flags;
+	unsigned int i;
+
+	local_irq_save(flags);
+	for (i = 0; i < num_ictlrs; i++) {
+		void __iomem *ictlr = lic->base[i];
+
+		writel_relaxed(lic->cpu_iep[i],
+			       ictlr + ICTLR_CPU_IEP_CLASS);
+		writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+		writel_relaxed(lic->cpu_ier[i],
+			       ictlr + ICTLR_CPU_IER_SET);
+		writel_relaxed(lic->cop_iep[i],
+			       ictlr + ICTLR_COP_IEP_CLASS);
+		writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+		writel_relaxed(lic->cop_ier[i],
+			       ictlr + ICTLR_COP_IER_SET);
+	}
+	local_irq_restore(flags);
+}
+
+static struct syscore_ops tegra_ictlr_syscore_ops = {
+	.suspend	= tegra_ictlr_suspend,
+	.resume		= tegra_ictlr_resume,
+};
+
+static void tegra_ictlr_syscore_init(void)
+{
+	register_syscore_ops(&tegra_ictlr_syscore_ops);
+}
+#else
+#define tegra_set_wake	NULL
+static inline void tegra_ictlr_syscore_init(void) {}
+#endif
+
+static struct irq_chip tegra_ictlr_chip = {
+	.name			= "LIC",
+	.irq_eoi		= tegra_eoi,
+	.irq_mask		= tegra_mask,
+	.irq_unmask		= tegra_unmask,
+	.irq_retrigger		= tegra_retrigger,
+	.irq_set_wake		= tegra_set_wake,
+	.irq_set_type		= irq_chip_set_type_parent,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int tegra_ictlr_domain_translate(struct irq_domain *d,
+					struct irq_fwspec *fwspec,
+					unsigned long *hwirq,
+					unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq,
+				    unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	struct tegra_ictlr_info *info = domain->host_data;
+	irq_hw_number_t hwirq;
+	unsigned int i;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;	/* Not GIC compliant */
+	if (fwspec->param[0] != GIC_SPI)
+		return -EINVAL;	/* No PPI should point to this domain */
+
+	hwirq = fwspec->param[1];
+	if (hwirq >= (num_ictlrs * 32))
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++) {
+		int ictlr = (hwirq + i) / 32;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &tegra_ictlr_chip,
+					      (void __force *)info->base[ictlr]);
+	}
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops tegra_ictlr_domain_ops = {
+	.translate	= tegra_ictlr_domain_translate,
+	.alloc		= tegra_ictlr_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init tegra_ictlr_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	const struct of_device_id *match;
+	const struct tegra_ictlr_soc *soc;
+	unsigned int i;
+	int err;
+
+	if (!parent) {
+		pr_err("%pOF: no parent, giving up\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to obtain parent domain\n", node);
+		return -ENXIO;
+	}
+
+	match = of_match_node(ictlr_matches, node);
+	if (!match)		/* Should never happen... */
+		return -ENODEV;
+
+	soc = match->data;
+
+	lic = kzalloc(sizeof(*lic), GFP_KERNEL);
+	if (!lic)
+		return -ENOMEM;
+
+	for (i = 0; i < TEGRA_MAX_NUM_ICTLRS; i++) {
+		void __iomem *base;
+
+		base = of_iomap(node, i);
+		if (!base)
+			break;
+
+		lic->base[i] = base;
+
+		/* Disable all interrupts */
+		writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+		/* All interrupts target IRQ */
+		writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
+
+		num_ictlrs++;
+	}
+
+	if (!num_ictlrs) {
+		pr_err("%pOF: no valid regions, giving up\n", node);
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	WARN(num_ictlrs != soc->num_ictlrs,
+	     "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
+	     node, num_ictlrs, soc->num_ictlrs);
+
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
+					  node, &tegra_ictlr_domain_ops,
+					  lic);
+	if (!domain) {
+		pr_err("%pOF: failed to allocated domain\n", node);
+		err = -ENOMEM;
+		goto out_unmap;
+	}
+
+	tegra_ictlr_syscore_init();
+
+	pr_info("%pOF: %d interrupts forwarded to %pOF\n",
+		node, num_ictlrs * 32, parent);
+
+	return 0;
+
+out_unmap:
+	for (i = 0; i < num_ictlrs; i++)
+		iounmap(lic->base[i]);
+out_free:
+	kfree(lic);
+	return err;
+}
+
+IRQCHIP_DECLARE(tegra20_ictlr, "nvidia,tegra20-ictlr", tegra_ictlr_init);
+IRQCHIP_DECLARE(tegra30_ictlr, "nvidia,tegra30-ictlr", tegra_ictlr_init);
+IRQCHIP_DECLARE(tegra210_ictlr, "nvidia,tegra210-ictlr", tegra_ictlr_init);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
new file mode 100644
index 0000000..2325fb3
--- /dev/null
+++ b/drivers/irqchip/irq-ts4800.c
@@ -0,0 +1,163 @@
+/*
+ * Multiplexed-IRQs driver for TS-4800's FPGA
+ *
+ * Copyright (c) 2015 - Savoir-faire Linux
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#define IRQ_MASK        0x4
+#define IRQ_STATUS      0x8
+
+struct ts4800_irq_data {
+	void __iomem            *base;
+	struct irq_domain       *domain;
+	struct irq_chip         irq_chip;
+};
+
+static void ts4800_irq_mask(struct irq_data *d)
+{
+	struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+	u16 reg = readw(data->base + IRQ_MASK);
+	u16 mask = 1 << d->hwirq;
+
+	writew(reg | mask, data->base + IRQ_MASK);
+}
+
+static void ts4800_irq_unmask(struct irq_data *d)
+{
+	struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+	u16 reg = readw(data->base + IRQ_MASK);
+	u16 mask = 1 << d->hwirq;
+
+	writew(reg & ~mask, data->base + IRQ_MASK);
+}
+
+static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	struct ts4800_irq_data *data = d->host_data;
+
+	irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, data);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops ts4800_ic_ops = {
+	.map = ts4800_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static void ts4800_ic_chained_handle_irq(struct irq_desc *desc)
+{
+	struct ts4800_irq_data *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u16 status = readw(data->base + IRQ_STATUS);
+
+	chained_irq_enter(chip, desc);
+
+	if (unlikely(status == 0)) {
+		handle_bad_irq(desc);
+		goto out;
+	}
+
+	do {
+		unsigned int bit = __ffs(status);
+		int irq = irq_find_mapping(data->domain, bit);
+
+		status &= ~(1 << bit);
+		generic_handle_irq(irq);
+	} while (status);
+
+out:
+	chained_irq_exit(chip, desc);
+}
+
+static int ts4800_ic_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct ts4800_irq_data *data;
+	struct irq_chip *irq_chip;
+	struct resource *res;
+	int parent_irq;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->base))
+		return PTR_ERR(data->base);
+
+	writew(0xFFFF, data->base + IRQ_MASK);
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq) {
+		dev_err(&pdev->dev, "failed to get parent IRQ\n");
+		return -EINVAL;
+	}
+
+	irq_chip = &data->irq_chip;
+	irq_chip->name = dev_name(&pdev->dev);
+	irq_chip->irq_mask = ts4800_irq_mask;
+	irq_chip->irq_unmask = ts4800_irq_unmask;
+
+	data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
+	if (!data->domain) {
+		dev_err(&pdev->dev, "cannot add IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	irq_set_chained_handler_and_data(parent_irq,
+					 ts4800_ic_chained_handle_irq, data);
+
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+}
+
+static int ts4800_ic_remove(struct platform_device *pdev)
+{
+	struct ts4800_irq_data *data = platform_get_drvdata(pdev);
+
+	irq_domain_remove(data->domain);
+
+	return 0;
+}
+
+static const struct of_device_id ts4800_ic_of_match[] = {
+	{ .compatible = "technologic,ts4800-irqc", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ts4800_ic_of_match);
+
+static struct platform_driver ts4800_ic_driver = {
+	.probe  = ts4800_ic_probe,
+	.remove = ts4800_ic_remove,
+	.driver = {
+		.name = "ts4800-irqc",
+		.of_match_table = ts4800_ic_of_match,
+	},
+};
+module_platform_driver(ts4800_ic_driver);
+
+MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
new file mode 100644
index 0000000..7ba7f25
--- /dev/null
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -0,0 +1,261 @@
+/*
+ * Driver for UniPhier AIDET (ARM Interrupt Detector)
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define UNIPHIER_AIDET_NR_IRQS		256
+
+#define UNIPHIER_AIDET_DETCONF		0x04	/* inverter register base */
+
+struct uniphier_aidet_priv {
+	struct irq_domain *domain;
+	void __iomem *reg_base;
+	spinlock_t lock;
+	u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
+};
+
+static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
+				      unsigned int reg, u32 mask, u32 val)
+{
+	unsigned long flags;
+	u32 tmp;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	tmp = readl_relaxed(priv->reg_base + reg);
+	tmp &= ~mask;
+	tmp |= mask & val;
+	writel_relaxed(tmp, priv->reg_base + reg);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
+					  unsigned long index, unsigned int val)
+{
+	unsigned int reg;
+	u32 mask;
+
+	reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
+	mask = BIT(index % 32);
+
+	uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
+}
+
+static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	struct uniphier_aidet_priv *priv = data->chip_data;
+	unsigned int val;
+
+	/* enable inverter for active low triggers */
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		val = 0;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		val = 1;
+		type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		val = 1;
+		type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	uniphier_aidet_detconf_update(priv, data->hwirq, val);
+
+	return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip uniphier_aidet_irq_chip = {
+	.name = "AIDET",
+	.irq_mask = irq_chip_mask_parent,
+	.irq_unmask = irq_chip_unmask_parent,
+	.irq_eoi = irq_chip_eoi_parent,
+	.irq_set_affinity = irq_chip_set_affinity_parent,
+	.irq_set_type = uniphier_aidet_irq_set_type,
+};
+
+static int uniphier_aidet_domain_translate(struct irq_domain *domain,
+					   struct irq_fwspec *fwspec,
+					   unsigned long *out_hwirq,
+					   unsigned int *out_type)
+{
+	if (WARN_ON(fwspec->param_count < 2))
+		return -EINVAL;
+
+	*out_hwirq = fwspec->param[0];
+	*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
+				       unsigned int virq, unsigned int nr_irqs,
+				       void *arg)
+{
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	int ret;
+
+	if (nr_irqs != 1)
+		return -EINVAL;
+
+	ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
+		return -ENXIO;
+
+	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &uniphier_aidet_irq_chip,
+					    domain->host_data);
+	if (ret)
+		return ret;
+
+	/* parent is GIC */
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	parent_fwspec.param_count = 3;
+	parent_fwspec.param[0] = 0;		/* SPI */
+	parent_fwspec.param[1] = hwirq;
+	parent_fwspec.param[2] = type;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops uniphier_aidet_domain_ops = {
+	.alloc = uniphier_aidet_domain_alloc,
+	.free = irq_domain_free_irqs_common,
+	.translate = uniphier_aidet_domain_translate,
+};
+
+static int uniphier_aidet_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *parent_np;
+	struct irq_domain *parent_domain;
+	struct uniphier_aidet_priv *priv;
+	struct resource *res;
+
+	parent_np = of_irq_find_parent(dev->of_node);
+	if (!parent_np)
+		return -ENXIO;
+
+	parent_domain = irq_find_host(parent_np);
+	of_node_put(parent_np);
+	if (!parent_domain)
+		return -EPROBE_DEFER;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->reg_base))
+		return PTR_ERR(priv->reg_base);
+
+	spin_lock_init(&priv->lock);
+
+	priv->domain = irq_domain_create_hierarchy(
+					parent_domain, 0,
+					UNIPHIER_AIDET_NR_IRQS,
+					of_node_to_fwnode(dev->of_node),
+					&uniphier_aidet_domain_ops, priv);
+	if (!priv->domain)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+}
+
+static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
+{
+	struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+		priv->saved_vals[i] = readl_relaxed(
+			priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+	return 0;
+}
+
+static int __maybe_unused uniphier_aidet_resume(struct device *dev)
+{
+	struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+		writel_relaxed(priv->saved_vals[i],
+			       priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+	return 0;
+}
+
+static const struct dev_pm_ops uniphier_aidet_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
+				      uniphier_aidet_resume)
+};
+
+static const struct of_device_id uniphier_aidet_match[] = {
+	{ .compatible = "socionext,uniphier-ld4-aidet" },
+	{ .compatible = "socionext,uniphier-pro4-aidet" },
+	{ .compatible = "socionext,uniphier-sld8-aidet" },
+	{ .compatible = "socionext,uniphier-pro5-aidet" },
+	{ .compatible = "socionext,uniphier-pxs2-aidet" },
+	{ .compatible = "socionext,uniphier-ld11-aidet" },
+	{ .compatible = "socionext,uniphier-ld20-aidet" },
+	{ .compatible = "socionext,uniphier-pxs3-aidet" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver uniphier_aidet_driver = {
+	.probe = uniphier_aidet_probe,
+	.driver = {
+		.name = "uniphier-aidet",
+		.of_match_table = uniphier_aidet_match,
+		.pm = &uniphier_aidet_pm_ops,
+	},
+};
+builtin_platform_driver(uniphier_aidet_driver);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
new file mode 100644
index 0000000..928858d
--- /dev/null
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Support for Versatile FPGA-based IRQ controllers
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/versatile-fpga.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IRQ_STATUS		0x00
+#define IRQ_RAW_STATUS		0x04
+#define IRQ_ENABLE_SET		0x08
+#define IRQ_ENABLE_CLEAR	0x0c
+#define INT_SOFT_SET		0x10
+#define INT_SOFT_CLEAR		0x14
+#define FIQ_STATUS		0x20
+#define FIQ_RAW_STATUS		0x24
+#define FIQ_ENABLE		0x28
+#define FIQ_ENABLE_SET		0x28
+#define FIQ_ENABLE_CLEAR	0x2C
+
+#define PIC_ENABLES             0x20	/* set interrupt pass through bits */
+
+/**
+ * struct fpga_irq_data - irq data container for the FPGA IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ * @valid: mask for valid IRQs on this controller
+ * @used_irqs: number of active IRQs on this controller
+ */
+struct fpga_irq_data {
+	void __iomem *base;
+	struct irq_chip chip;
+	u32 valid;
+	struct irq_domain *domain;
+	u8 used_irqs;
+};
+
+/* we cannot allocate memory when the controllers are initially registered */
+static struct fpga_irq_data fpga_irq_devices[CONFIG_VERSATILE_FPGA_IRQ_NR];
+static int fpga_irq_id;
+
+static void fpga_irq_mask(struct irq_data *d)
+{
+	struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+	u32 mask = 1 << d->hwirq;
+
+	writel(mask, f->base + IRQ_ENABLE_CLEAR);
+}
+
+static void fpga_irq_unmask(struct irq_data *d)
+{
+	struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+	u32 mask = 1 << d->hwirq;
+
+	writel(mask, f->base + IRQ_ENABLE_SET);
+}
+
+static void fpga_irq_handle(struct irq_desc *desc)
+{
+	struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+	u32 status = readl(f->base + IRQ_STATUS);
+
+	if (status == 0) {
+		do_bad_IRQ(desc);
+		return;
+	}
+
+	do {
+		unsigned int irq = ffs(status) - 1;
+
+		status &= ~(1 << irq);
+		generic_handle_irq(irq_find_mapping(f->domain, irq));
+	} while (status);
+}
+
+/*
+ * Handle each interrupt in a single FPGA IRQ controller.  Returns non-zero
+ * if we've handled at least one interrupt.  This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
+{
+	int handled = 0;
+	int irq;
+	u32 status;
+
+	while ((status  = readl(f->base + IRQ_STATUS))) {
+		irq = ffs(status) - 1;
+		handle_domain_irq(f->domain, irq, regs);
+		handled = 1;
+	}
+
+	return handled;
+}
+
+/*
+ * Keep iterating over all registered FPGA IRQ controllers until there are
+ * no pending interrupts.
+ */
+asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+{
+	int i, handled;
+
+	do {
+		for (i = 0, handled = 0; i < fpga_irq_id; ++i)
+			handled |= handle_one_fpga(&fpga_irq_devices[i], regs);
+	} while (handled);
+}
+
+static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
+		irq_hw_number_t hwirq)
+{
+	struct fpga_irq_data *f = d->host_data;
+
+	/* Skip invalid IRQs, only register handlers for the real ones */
+	if (!(f->valid & BIT(hwirq)))
+		return -EPERM;
+	irq_set_chip_data(irq, f);
+	irq_set_chip_and_handler(irq, &f->chip,
+				handle_level_irq);
+	irq_set_probe(irq);
+	return 0;
+}
+
+static const struct irq_domain_ops fpga_irqdomain_ops = {
+	.map = fpga_irqdomain_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
+			  int parent_irq, u32 valid, struct device_node *node)
+{
+	struct fpga_irq_data *f;
+	int i;
+
+	if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) {
+		pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__);
+		return;
+	}
+	f = &fpga_irq_devices[fpga_irq_id];
+	f->base = base;
+	f->chip.name = name;
+	f->chip.irq_ack = fpga_irq_mask;
+	f->chip.irq_mask = fpga_irq_mask;
+	f->chip.irq_unmask = fpga_irq_unmask;
+	f->valid = valid;
+
+	if (parent_irq != -1) {
+		irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle,
+						 f);
+	}
+
+	/* This will also allocate irq descriptors */
+	f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
+					  &fpga_irqdomain_ops, f);
+
+	/* This will allocate all valid descriptors in the linear case */
+	for (i = 0; i < fls(valid); i++)
+		if (valid & BIT(i)) {
+			if (!irq_start)
+				irq_create_mapping(f->domain, i);
+			f->used_irqs++;
+		}
+
+	pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
+		fpga_irq_id, name, base, f->used_irqs);
+	if (parent_irq != -1)
+		pr_cont(", parent IRQ: %d\n", parent_irq);
+	else
+		pr_cont("\n");
+
+	fpga_irq_id++;
+}
+
+#ifdef CONFIG_OF
+int __init fpga_irq_of_init(struct device_node *node,
+			    struct device_node *parent)
+{
+	void __iomem *base;
+	u32 clear_mask;
+	u32 valid_mask;
+	int parent_irq;
+
+	if (WARN_ON(!node))
+		return -ENODEV;
+
+	base = of_iomap(node, 0);
+	WARN(!base, "unable to map fpga irq registers\n");
+
+	if (of_property_read_u32(node, "clear-mask", &clear_mask))
+		clear_mask = 0;
+
+	if (of_property_read_u32(node, "valid-mask", &valid_mask))
+		valid_mask = 0;
+
+	/* Some chips are cascaded from a parent IRQ */
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq) {
+		set_handle_irq(fpga_handle_irq);
+		parent_irq = -1;
+	}
+
+	fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
+
+	writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+	writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
+	/*
+	 * On Versatile AB/PB, some secondary interrupts have a direct
+	 * pass-thru to the primary controller for IRQs 20 and 22-31 which need
+	 * to be enabled. See section 3.10 of the Versatile AB user guide.
+	 */
+	if (of_device_is_compatible(node, "arm,versatile-sic"))
+		writel(0xffd00000, base + PIC_ENABLES);
+
+	return 0;
+}
+IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
+IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
+IRQCHIP_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-irq", fpga_irq_of_init);
+#endif
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
new file mode 100644
index 0000000..56b5e3c
--- /dev/null
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2014-2015 Toradex AG
+ * Author: Stefan Agner <stefan@agner.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * IRQ chip driver for MSCM interrupt router available on Vybrid SoC's.
+ * The interrupt router is between the CPU's interrupt controller and the
+ * peripheral. The router allows to route the peripheral interrupts to
+ * one of the two available CPU's on Vybrid VF6xx SoC's (Cortex-A5 or
+ * Cortex-M4). The router will be configured transparently on a IRQ
+ * request.
+ *
+ * o All peripheral interrupts of the Vybrid SoC can be routed to
+ *   CPU 0, CPU 1 or both. The routing is useful for dual-core
+ *   variants of Vybrid SoC such as VF6xx. This driver routes the
+ *   requested interrupt to the CPU currently running on.
+ *
+ * o It is required to setup the interrupt router even on single-core
+ *   variants of Vybrid.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#define MSCM_CPxNUM		0x4
+
+#define MSCM_IRSPRC(n)		(0x80 + 2 * (n))
+#define MSCM_IRSPRC_CPEN_MASK	0x3
+
+#define MSCM_IRSPRC_NUM		112
+
+struct vf610_mscm_ir_chip_data {
+	void __iomem *mscm_ir_base;
+	u16 cpu_mask;
+	u16 saved_irsprc[MSCM_IRSPRC_NUM];
+	bool is_nvic;
+};
+
+static struct vf610_mscm_ir_chip_data *mscm_ir_data;
+
+static inline void vf610_mscm_ir_save(struct vf610_mscm_ir_chip_data *data)
+{
+	int i;
+
+	for (i = 0; i < MSCM_IRSPRC_NUM; i++)
+		data->saved_irsprc[i] = readw_relaxed(data->mscm_ir_base + MSCM_IRSPRC(i));
+}
+
+static inline void vf610_mscm_ir_restore(struct vf610_mscm_ir_chip_data *data)
+{
+	int i;
+
+	for (i = 0; i < MSCM_IRSPRC_NUM; i++)
+		writew_relaxed(data->saved_irsprc[i], data->mscm_ir_base + MSCM_IRSPRC(i));
+}
+
+static int vf610_mscm_ir_notifier(struct notifier_block *self,
+				  unsigned long cmd, void *v)
+{
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		vf610_mscm_ir_save(mscm_ir_data);
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		vf610_mscm_ir_restore(mscm_ir_data);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block mscm_ir_notifier_block = {
+	.notifier_call = vf610_mscm_ir_notifier,
+};
+
+static void vf610_mscm_ir_enable(struct irq_data *data)
+{
+	irq_hw_number_t hwirq = data->hwirq;
+	struct vf610_mscm_ir_chip_data *chip_data = data->chip_data;
+	u16 irsprc;
+
+	irsprc = readw_relaxed(chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+	irsprc &= MSCM_IRSPRC_CPEN_MASK;
+
+	WARN_ON(irsprc & ~chip_data->cpu_mask);
+
+	writew_relaxed(chip_data->cpu_mask,
+		       chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+
+	irq_chip_enable_parent(data);
+}
+
+static void vf610_mscm_ir_disable(struct irq_data *data)
+{
+	irq_hw_number_t hwirq = data->hwirq;
+	struct vf610_mscm_ir_chip_data *chip_data = data->chip_data;
+
+	writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+
+	irq_chip_disable_parent(data);
+}
+
+static struct irq_chip vf610_mscm_ir_irq_chip = {
+	.name			= "mscm-ir",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_enable		= vf610_mscm_ir_enable,
+	.irq_disable		= vf610_mscm_ir_disable,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				      unsigned int nr_irqs, void *arg)
+{
+	int i;
+	irq_hw_number_t hwirq;
+	struct irq_fwspec *fwspec = arg;
+	struct irq_fwspec parent_fwspec;
+
+	if (!irq_domain_get_of_node(domain->parent))
+		return -EINVAL;
+
+	if (fwspec->param_count != 2)
+		return -EINVAL;
+
+	hwirq = fwspec->param[0];
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &vf610_mscm_ir_irq_chip,
+					      domain->host_data);
+
+	parent_fwspec.fwnode = domain->parent->fwnode;
+
+	if (mscm_ir_data->is_nvic) {
+		parent_fwspec.param_count = 1;
+		parent_fwspec.param[0] = fwspec->param[0];
+	} else {
+		parent_fwspec.param_count = 3;
+		parent_fwspec.param[0] = GIC_SPI;
+		parent_fwspec.param[1] = fwspec->param[0];
+		parent_fwspec.param[2] = fwspec->param[1];
+	}
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static int vf610_mscm_ir_domain_translate(struct irq_domain *d,
+					  struct irq_fwspec *fwspec,
+					  unsigned long *hwirq,
+					  unsigned int *type)
+{
+	if (WARN_ON(fwspec->param_count < 2))
+		return -EINVAL;
+	*hwirq = fwspec->param[0];
+	*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+
+static const struct irq_domain_ops mscm_irq_domain_ops = {
+	.translate = vf610_mscm_ir_domain_translate,
+	.alloc = vf610_mscm_ir_domain_alloc,
+	.free = irq_domain_free_irqs_common,
+};
+
+static int __init vf610_mscm_ir_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	struct irq_domain *domain, *domain_parent;
+	struct regmap *mscm_cp_regmap;
+	int ret, cpuid;
+
+	domain_parent = irq_find_host(parent);
+	if (!domain_parent) {
+		pr_err("vf610_mscm_ir: interrupt-parent not found\n");
+		return -EINVAL;
+	}
+
+	mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL);
+	if (!mscm_ir_data)
+		return -ENOMEM;
+
+	mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir");
+	if (IS_ERR(mscm_ir_data->mscm_ir_base)) {
+		pr_err("vf610_mscm_ir: unable to map mscm register\n");
+		ret = PTR_ERR(mscm_ir_data->mscm_ir_base);
+		goto out_free;
+	}
+
+	mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg");
+	if (IS_ERR(mscm_cp_regmap)) {
+		ret = PTR_ERR(mscm_cp_regmap);
+		pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n");
+		goto out_unmap;
+	}
+
+	regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid);
+	mscm_ir_data->cpu_mask = 0x1 << cpuid;
+
+	domain = irq_domain_add_hierarchy(domain_parent, 0,
+					  MSCM_IRSPRC_NUM, node,
+					  &mscm_irq_domain_ops, mscm_ir_data);
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	if (of_device_is_compatible(irq_domain_get_of_node(domain->parent),
+				    "arm,armv7m-nvic"))
+		mscm_ir_data->is_nvic = true;
+
+	cpu_pm_register_notifier(&mscm_ir_notifier_block);
+
+	return 0;
+
+out_unmap:
+	iounmap(mscm_ir_data->mscm_ir_base);
+out_free:
+	kfree(mscm_ir_data);
+	return ret;
+}
+IRQCHIP_DECLARE(vf610_mscm_ir, "fsl,vf610-mscm-ir", vf610_mscm_ir_of_init);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
new file mode 100644
index 0000000..74192f6
--- /dev/null
+++ b/drivers/irqchip/irq-vic.c
@@ -0,0 +1,546 @@
+/*
+ *  linux/arch/arm/common/vic.c
+ *
+ *  Copyright (C) 1999 - 2003 ARM Limited
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/irqchip/arm-vic.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+#define VIC_IRQ_STATUS			0x00
+#define VIC_FIQ_STATUS			0x04
+#define VIC_INT_SELECT			0x0c	/* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_SOFT			0x18
+#define VIC_INT_SOFT_CLEAR		0x1c
+#define VIC_PROTECT			0x20
+#define VIC_PL190_VECT_ADDR		0x30	/* PL190 only */
+#define VIC_PL190_DEF_VECT_ADDR		0x34	/* PL190 only */
+
+#define VIC_VECT_ADDR0			0x100	/* 0 to 15 (0..31 PL192) */
+#define VIC_VECT_CNTL0			0x200	/* 0 to 15 (0..31 PL192) */
+#define VIC_ITCR			0x300	/* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE		(1 << 5)
+
+#define VIC_PL192_VECT_ADDR		0xF00
+
+/**
+ * struct vic_device - VIC PM device
+ * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
+ * @irq: The IRQ number for the base of the VIC.
+ * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
+ * @resume_sources: A bitmask of interrupts for resume.
+ * @resume_irqs: The IRQs enabled for resume.
+ * @int_select: Save for VIC_INT_SELECT.
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
+ */
+struct vic_device {
+	void __iomem	*base;
+	int		irq;
+	u32		valid_sources;
+	u32		resume_sources;
+	u32		resume_irqs;
+	u32		int_select;
+	u32		int_enable;
+	u32		soft_int;
+	u32		protect;
+	struct irq_domain *domain;
+};
+
+/* we cannot allocate memory when VICs are initially registered */
+static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+static int vic_id;
+
+static void vic_handle_irq(struct pt_regs *regs);
+
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+		writel(VIC_VECT_CNTL_ENABLE | i, reg);
+	}
+
+	writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
+
+#ifdef CONFIG_PM
+static void resume_one_vic(struct vic_device *vic)
+{
+	void __iomem *base = vic->base;
+
+	printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
+
+	/* re-initialise static settings */
+	vic_init2(base);
+
+	writel(vic->int_select, base + VIC_INT_SELECT);
+	writel(vic->protect, base + VIC_PROTECT);
+
+	/* set the enabled ints and then clear the non-enabled */
+	writel(vic->int_enable, base + VIC_INT_ENABLE);
+	writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
+
+	/* and the same for the soft-int register */
+
+	writel(vic->soft_int, base + VIC_INT_SOFT);
+	writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_resume(void)
+{
+	int id;
+
+	for (id = vic_id - 1; id >= 0; id--)
+		resume_one_vic(vic_devices + id);
+}
+
+static void suspend_one_vic(struct vic_device *vic)
+{
+	void __iomem *base = vic->base;
+
+	printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
+
+	vic->int_select = readl(base + VIC_INT_SELECT);
+	vic->int_enable = readl(base + VIC_INT_ENABLE);
+	vic->soft_int = readl(base + VIC_INT_SOFT);
+	vic->protect = readl(base + VIC_PROTECT);
+
+	/* set the interrupts (if any) that are used for
+	 * resuming the system */
+
+	writel(vic->resume_irqs, base + VIC_INT_ENABLE);
+	writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static int vic_suspend(void)
+{
+	int id;
+
+	for (id = 0; id < vic_id; id++)
+		suspend_one_vic(vic_devices + id);
+
+	return 0;
+}
+
+static struct syscore_ops vic_syscore_ops = {
+	.suspend	= vic_suspend,
+	.resume		= vic_resume,
+};
+
+/**
+ * vic_pm_init - initicall to register VIC pm
+ *
+ * This is called via late_initcall() to register
+ * the resources for the VICs due to the early
+ * nature of the VIC's registration.
+*/
+static int __init vic_pm_init(void)
+{
+	if (vic_id > 0)
+		register_syscore_ops(&vic_syscore_ops);
+
+	return 0;
+}
+late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+			     irq_hw_number_t hwirq)
+{
+	struct vic_device *v = d->host_data;
+
+	/* Skip invalid IRQs, only register handlers for the real ones */
+	if (!(v->valid_sources & (1 << hwirq)))
+		return -EPERM;
+	irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+	irq_set_chip_data(irq, v->base);
+	irq_set_probe(irq);
+	return 0;
+}
+
+/*
+ * Handle each interrupt in a single VIC.  Returns non-zero if we've
+ * handled at least one interrupt.  This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+	u32 stat, irq;
+	int handled = 0;
+
+	while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+		irq = ffs(stat) - 1;
+		handle_domain_irq(vic->domain, irq, regs);
+		handled = 1;
+	}
+
+	return handled;
+}
+
+static void vic_handle_irq_cascaded(struct irq_desc *desc)
+{
+	u32 stat, hwirq;
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	struct vic_device *vic = irq_desc_get_handler_data(desc);
+
+	chained_irq_enter(host_chip, desc);
+
+	while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+		hwirq = ffs(stat) - 1;
+		generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
+	}
+
+	chained_irq_exit(host_chip, desc);
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+	int i, handled;
+
+	do {
+		for (i = 0, handled = 0; i < vic_id; ++i)
+			handled |= handle_one_vic(&vic_devices[i], regs);
+	} while (handled);
+}
+
+static const struct irq_domain_ops vic_irqdomain_ops = {
+	.map = vic_irqdomain_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @parent_irq: The parent IRQ if cascaded, else 0.
+ * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
+ */
+static void __init vic_register(void __iomem *base, unsigned int parent_irq,
+				unsigned int irq,
+				u32 valid_sources, u32 resume_sources,
+				struct device_node *node)
+{
+	struct vic_device *v;
+	int i;
+
+	if (vic_id >= ARRAY_SIZE(vic_devices)) {
+		printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+		return;
+	}
+
+	v = &vic_devices[vic_id];
+	v->base = base;
+	v->valid_sources = valid_sources;
+	v->resume_sources = resume_sources;
+	set_handle_irq(vic_handle_irq);
+	vic_id++;
+
+	if (parent_irq) {
+		irq_set_chained_handler_and_data(parent_irq,
+						 vic_handle_irq_cascaded, v);
+	}
+
+	v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+					  &vic_irqdomain_ops, v);
+	/* create an IRQ mapping for each valid IRQ */
+	for (i = 0; i < fls(valid_sources); i++)
+		if (valid_sources & (1 << i))
+			irq_create_mapping(v->domain, i);
+	/* If no base IRQ was passed, figure out our allocated base */
+	if (irq)
+		v->irq = irq;
+	else
+		v->irq = irq_find_mapping(v->domain, 0);
+}
+
+static void vic_ack_irq(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+	/* moreover, clear the soft-triggered, in case it was the reason */
+	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
+static struct vic_device *vic_from_irq(unsigned int irq)
+{
+        struct vic_device *v = vic_devices;
+	unsigned int base_irq = irq & ~31;
+	int id;
+
+	for (id = 0; id < vic_id; id++, v++) {
+		if (v->irq == base_irq)
+			return v;
+	}
+
+	return NULL;
+}
+
+static int vic_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct vic_device *v = vic_from_irq(d->irq);
+	unsigned int off = d->hwirq;
+	u32 bit = 1 << off;
+
+	if (!v)
+		return -EINVAL;
+
+	if (!(bit & v->resume_sources))
+		return -EINVAL;
+
+	if (on)
+		v->resume_irqs |= bit;
+	else
+		v->resume_irqs &= ~bit;
+
+	return 0;
+}
+#else
+#define vic_set_wake NULL
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip = {
+	.name		= "VIC",
+	.irq_ack	= vic_ack_irq,
+	.irq_mask	= vic_mask_irq,
+	.irq_unmask	= vic_unmask_irq,
+	.irq_set_wake	= vic_set_wake,
+};
+
+static void __init vic_disable(void __iomem *base)
+{
+	writel(0, base + VIC_INT_SELECT);
+	writel(0, base + VIC_INT_ENABLE);
+	writel(~0, base + VIC_INT_ENABLE_CLEAR);
+	writel(0, base + VIC_ITCR);
+	writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+	unsigned int i;
+
+	writel(0, base + VIC_PL190_VECT_ADDR);
+	for (i = 0; i < 19; i++) {
+		unsigned int value;
+
+		value = readl(base + VIC_PL190_VECT_ADDR);
+		writel(value, base + VIC_PL190_VECT_ADDR);
+	}
+}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ *  and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+			       u32 vic_sources, struct device_node *node)
+{
+	unsigned int i;
+	int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+	/* Disable all interrupts initially. */
+	vic_disable(base);
+
+	/*
+	 * Make sure we clear all existing interrupts. The vector registers
+	 * in this cell are after the second block of general registers,
+	 * so we can address them using standard offsets, but only from
+	 * the second base address, which is 0x20 in the page
+	 */
+	if (vic_2nd_block) {
+		vic_clear_interrupts(base);
+
+		/* ST has 16 vectors as well, but we don't enable them by now */
+		for (i = 0; i < 16; i++) {
+			void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+			writel(0, reg);
+		}
+
+		writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+	}
+
+	vic_register(base, 0, irq_start, vic_sources, 0, node);
+}
+
+void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
+			      u32 vic_sources, u32 resume_sources,
+			      struct device_node *node)
+{
+	unsigned int i;
+	u32 cellid = 0;
+	enum amba_vendor vendor;
+
+	/* Identify which VIC cell this one is, by reading the ID */
+	for (i = 0; i < 4; i++) {
+		void __iomem *addr;
+		addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+		cellid |= (readl(addr) & 0xff) << (8 * i);
+	}
+	vendor = (cellid >> 12) & 0xff;
+	printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+	       base, cellid, vendor);
+
+	switch(vendor) {
+	case AMBA_VENDOR_ST:
+		vic_init_st(base, irq_start, vic_sources, node);
+		return;
+	default:
+		printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+		/* fall through */
+	case AMBA_VENDOR_ARM:
+		break;
+	}
+
+	/* Disable all interrupts initially. */
+	vic_disable(base);
+
+	/* Make sure we clear all existing interrupts */
+	vic_clear_interrupts(base);
+
+	vic_init2(base);
+
+	vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+		     u32 vic_sources, u32 resume_sources)
+{
+	__vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
+}
+
+/**
+ * vic_init_cascaded() - initialise a cascaded vectored interrupt controller
+ * @base: iomem base address
+ * @parent_irq: the parent IRQ we're cascaded off
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ *
+ * This returns the base for the new interrupts or negative on error.
+ */
+int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
+			      u32 vic_sources, u32 resume_sources)
+{
+	struct vic_device *v;
+
+	v = &vic_devices[vic_id];
+	__vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL);
+	/* Return out acquired base */
+	return v->irq;
+}
+EXPORT_SYMBOL_GPL(vic_init_cascaded);
+
+#ifdef CONFIG_OF
+static int __init vic_of_init(struct device_node *node,
+			      struct device_node *parent)
+{
+	void __iomem *regs;
+	u32 interrupt_mask = ~0;
+	u32 wakeup_mask = ~0;
+
+	if (WARN(parent, "non-root VICs are not supported"))
+		return -EINVAL;
+
+	regs = of_iomap(node, 0);
+	if (WARN_ON(!regs))
+		return -EIO;
+
+	of_property_read_u32(node, "valid-mask", &interrupt_mask);
+	of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+
+	/*
+	 * Passing 0 as first IRQ makes the simple domain allocate descriptors
+	 */
+	__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
+
+	return 0;
+}
+IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
+#endif /* CONFIG OF */
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
new file mode 100644
index 0000000..f9af0af
--- /dev/null
+++ b/drivers/irqchip/irq-vt8500.c
@@ -0,0 +1,257 @@
+/*
+ *  arch/arm/mach-vt8500/irq.c
+ *
+ *  Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ *  Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * This file is copied and modified from the original irq.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define VT8500_ICPC_IRQ		0x20
+#define VT8500_ICPC_FIQ		0x24
+#define VT8500_ICDC		0x40		/* Destination Control 64*u32 */
+#define VT8500_ICIS		0x80		/* Interrupt status, 16*u32 */
+
+/* ICPC */
+#define ICPC_MASK		0x3F
+#define ICPC_ROTATE		BIT(6)
+
+/* IC_DCTR */
+#define ICDC_IRQ		0x00
+#define ICDC_FIQ		0x01
+#define ICDC_DSS0		0x02
+#define ICDC_DSS1		0x03
+#define ICDC_DSS2		0x04
+#define ICDC_DSS3		0x05
+#define ICDC_DSS4		0x06
+#define ICDC_DSS5		0x07
+
+#define VT8500_INT_DISABLE	0
+#define VT8500_INT_ENABLE	BIT(3)
+
+#define VT8500_TRIGGER_HIGH	0
+#define VT8500_TRIGGER_RISING	BIT(5)
+#define VT8500_TRIGGER_FALLING	BIT(6)
+#define VT8500_EDGE		( VT8500_TRIGGER_RISING \
+				| VT8500_TRIGGER_FALLING)
+
+/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */
+#define VT8500_INTC_MAX		2
+
+struct vt8500_irq_data {
+	void __iomem 		*base;		/* IO Memory base address */
+	struct irq_domain	*domain;	/* Domain for this controller */
+};
+
+/* Global variable for accessing io-mem addresses */
+static struct vt8500_irq_data intc[VT8500_INTC_MAX];
+static u32 active_cnt = 0;
+
+static void vt8500_irq_mask(struct irq_data *d)
+{
+	struct vt8500_irq_data *priv = d->domain->host_data;
+	void __iomem *base = priv->base;
+	void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
+	u8 edge, dctr;
+	u32 status;
+
+	edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
+	if (edge) {
+		status = readl(stat_reg);
+
+		status |= (1 << (d->hwirq & 0x1f));
+		writel(status, stat_reg);
+	} else {
+		dctr = readb(base + VT8500_ICDC + d->hwirq);
+		dctr &= ~VT8500_INT_ENABLE;
+		writeb(dctr, base + VT8500_ICDC + d->hwirq);
+	}
+}
+
+static void vt8500_irq_unmask(struct irq_data *d)
+{
+	struct vt8500_irq_data *priv = d->domain->host_data;
+	void __iomem *base = priv->base;
+	u8 dctr;
+
+	dctr = readb(base + VT8500_ICDC + d->hwirq);
+	dctr |= VT8500_INT_ENABLE;
+	writeb(dctr, base + VT8500_ICDC + d->hwirq);
+}
+
+static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+	struct vt8500_irq_data *priv = d->domain->host_data;
+	void __iomem *base = priv->base;
+	u8 dctr;
+
+	dctr = readb(base + VT8500_ICDC + d->hwirq);
+	dctr &= ~VT8500_EDGE;
+
+	switch (flow_type) {
+	case IRQF_TRIGGER_LOW:
+		return -EINVAL;
+	case IRQF_TRIGGER_HIGH:
+		dctr |= VT8500_TRIGGER_HIGH;
+		irq_set_handler_locked(d, handle_level_irq);
+		break;
+	case IRQF_TRIGGER_FALLING:
+		dctr |= VT8500_TRIGGER_FALLING;
+		irq_set_handler_locked(d, handle_edge_irq);
+		break;
+	case IRQF_TRIGGER_RISING:
+		dctr |= VT8500_TRIGGER_RISING;
+		irq_set_handler_locked(d, handle_edge_irq);
+		break;
+	}
+	writeb(dctr, base + VT8500_ICDC + d->hwirq);
+
+	return 0;
+}
+
+static struct irq_chip vt8500_irq_chip = {
+	.name = "vt8500",
+	.irq_ack = vt8500_irq_mask,
+	.irq_mask = vt8500_irq_mask,
+	.irq_unmask = vt8500_irq_unmask,
+	.irq_set_type = vt8500_irq_set_type,
+};
+
+static void __init vt8500_init_irq_hw(void __iomem *base)
+{
+	u32 i;
+
+	/* Enable rotating priority for IRQ */
+	writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ);
+	writel(0x00, base + VT8500_ICPC_FIQ);
+
+	/* Disable all interrupts and route them to IRQ */
+	for (i = 0; i < 64; i++)
+		writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i);
+}
+
+static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
+							irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops vt8500_irq_domain_ops = {
+	.map = vt8500_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+{
+	u32 stat, i;
+	int irqnr;
+	void __iomem *base;
+
+	/* Loop through each active controller */
+	for (i=0; i<active_cnt; i++) {
+		base = intc[i].base;
+		irqnr = readl_relaxed(base) & 0x3F;
+		/*
+		  Highest Priority register default = 63, so check that this
+		  is a real interrupt by checking the status register
+		*/
+		if (irqnr == 63) {
+			stat = readl_relaxed(base + VT8500_ICIS + 4);
+			if (!(stat & BIT(31)))
+				continue;
+		}
+
+		handle_domain_irq(intc[i].domain, irqnr, regs);
+	}
+}
+
+static int __init vt8500_irq_init(struct device_node *node,
+				  struct device_node *parent)
+{
+	int irq, i;
+	struct device_node *np = node;
+
+	if (active_cnt == VT8500_INTC_MAX) {
+		pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
+								__func__);
+		goto out;
+	}
+
+	intc[active_cnt].base = of_iomap(np, 0);
+	intc[active_cnt].domain = irq_domain_add_linear(node, 64,
+			&vt8500_irq_domain_ops,	&intc[active_cnt]);
+
+	if (!intc[active_cnt].base) {
+		pr_err("%s: Unable to map IO memory\n", __func__);
+		goto out;
+	}
+
+	if (!intc[active_cnt].domain) {
+		pr_err("%s: Unable to add irq domain!\n", __func__);
+		goto out;
+	}
+
+	set_handle_irq(vt8500_handle_irq);
+
+	vt8500_init_irq_hw(intc[active_cnt].base);
+
+	pr_info("vt8500-irq: Added interrupt controller\n");
+
+	active_cnt++;
+
+	/* check if this is a slaved controller */
+	if (of_irq_count(np) != 0) {
+		/* check that we have the correct number of interrupts */
+		if (of_irq_count(np) != 8) {
+			pr_err("%s: Incorrect IRQ map for slaved controller\n",
+					__func__);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < 8; i++) {
+			irq = irq_of_parse_and_map(np, i);
+			enable_irq(irq);
+		}
+
+		pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
+	}
+out:
+	return 0;
+}
+
+IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 0000000..e3043de
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00			/* Interrupt Status Register */
+#define IPR 0x04			/* Interrupt Pending Register */
+#define IER 0x08			/* Interrupt Enable Register */
+#define IAR 0x0c			/* Interrupt Acknowledge Register */
+#define SIE 0x10			/* Set Interrupt Enable bits */
+#define CIE 0x14			/* Clear Interrupt Enable bits */
+#define IVR 0x18			/* Interrupt Vector Register */
+#define MER 0x1c			/* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+	void		__iomem *base;
+	struct		irq_domain *root_domain;
+	u32		intr_mask;
+};
+
+static struct xintc_irq_chip *xintc_irqc;
+
+static void xintc_write(int reg, u32 data)
+{
+	if (static_branch_unlikely(&xintc_is_be))
+		iowrite32be(data, xintc_irqc->base + reg);
+	else
+		iowrite32(data, xintc_irqc->base + reg);
+}
+
+static unsigned int xintc_read(int reg)
+{
+	if (static_branch_unlikely(&xintc_is_be))
+		return ioread32be(xintc_irqc->base + reg);
+	else
+		return ioread32(xintc_irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+	unsigned long mask = 1 << d->hwirq;
+
+	pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+	/* ack level irqs because they can't be acked during
+	 * ack function since the handle_level_irq function
+	 * acks the irq before calling the interrupt handler
+	 */
+	if (irqd_is_level_type(d))
+		xintc_write(IAR, mask);
+
+	xintc_write(SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+	pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+	xintc_write(CIE, 1 << d->hwirq);
+}
+
+static void intc_ack(struct irq_data *d)
+{
+	pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+	xintc_write(IAR, 1 << d->hwirq);
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+	unsigned long mask = 1 << d->hwirq;
+
+	pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+	xintc_write(CIE, mask);
+	xintc_write(IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+	.name = "Xilinx INTC",
+	.irq_unmask = intc_enable_or_unmask,
+	.irq_mask = intc_disable_or_mask,
+	.irq_ack = intc_ack,
+	.irq_mask_ack = intc_mask_ack,
+};
+
+unsigned int xintc_get_irq(void)
+{
+	unsigned int hwirq, irq = -1;
+
+	hwirq = xintc_read(IVR);
+	if (hwirq != -1U)
+		irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+
+	pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+	return irq;
+}
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	if (xintc_irqc->intr_mask & (1 << hw)) {
+		irq_set_chip_and_handler_name(irq, &intc_dev,
+						handle_edge_irq, "edge");
+		irq_clear_status_flags(irq, IRQ_LEVEL);
+	} else {
+		irq_set_chip_and_handler_name(irq, &intc_dev,
+						handle_level_irq, "level");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+	}
+	return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onetwocell,
+	.map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 pending;
+
+	chained_irq_enter(chip, desc);
+	do {
+		pending = xintc_get_irq();
+		if (pending == -1U)
+			break;
+		generic_handle_irq(pending);
+	} while (true);
+	chained_irq_exit(chip, desc);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+					     struct device_node *parent)
+{
+	u32 nr_irq;
+	int ret, irq;
+	struct xintc_irq_chip *irqc;
+
+	if (xintc_irqc) {
+		pr_err("irq-xilinx: Multiple instances aren't supported\n");
+		return -EINVAL;
+	}
+
+	irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+	if (!irqc)
+		return -ENOMEM;
+
+	xintc_irqc = irqc;
+
+	irqc->base = of_iomap(intc, 0);
+	BUG_ON(!irqc->base);
+
+	ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+	if (ret < 0) {
+		pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+		goto err_alloc;
+	}
+
+	ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+	if (ret < 0) {
+		pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+		irqc->intr_mask = 0;
+	}
+
+	if (irqc->intr_mask >> nr_irq)
+		pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+	pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+		intc, nr_irq, irqc->intr_mask);
+
+
+	/*
+	 * Disable all external interrupts until they are
+	 * explicity requested.
+	 */
+	xintc_write(IER, 0);
+
+	/* Acknowledge any pending interrupts just in case. */
+	xintc_write(IAR, 0xffffffff);
+
+	/* Turn on the Master Enable. */
+	xintc_write(MER, MER_HIE | MER_ME);
+	if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+		static_branch_enable(&xintc_is_be);
+		xintc_write(MER, MER_HIE | MER_ME);
+	}
+
+	irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+						  &xintc_irq_domain_ops, irqc);
+	if (!irqc->root_domain) {
+		pr_err("irq-xilinx: Unable to create IRQ domain\n");
+		goto err_alloc;
+	}
+
+	if (parent) {
+		irq = irq_of_parse_and_map(intc, 0);
+		if (irq) {
+			irq_set_chained_handler_and_data(irq,
+							 xil_intc_irq_handler,
+							 irqc);
+		} else {
+			pr_err("irq-xilinx: interrupts property not in DT\n");
+			ret = -EINVAL;
+			goto err_alloc;
+		}
+	} else {
+		irq_set_default_host(irqc->root_domain);
+	}
+
+	return 0;
+
+err_alloc:
+	xintc_irqc = NULL;
+	kfree(irqc);
+	return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
new file mode 100644
index 0000000..e539500
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -0,0 +1,165 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+
+#define HW_IRQ_IPI_COUNT 2
+#define HW_IRQ_MX_BASE 2
+#define HW_IRQ_EXTERN_BASE 3
+
+static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
+
+static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
+		irq_hw_number_t hw)
+{
+	if (hw < HW_IRQ_IPI_COUNT) {
+		struct irq_chip *irq_chip = d->host_data;
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_percpu_irq, "ipi");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+		return 0;
+	}
+	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+	return xtensa_irq_map(d, irq, hw);
+}
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
+		struct device_node *ctrlr,
+		const u32 *intspec, unsigned int intsize,
+		unsigned long *out_hwirq, unsigned int *out_type)
+{
+	return xtensa_irq_domain_xlate(intspec, intsize,
+			intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
+			out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
+	.xlate = xtensa_mx_irq_domain_xlate,
+	.map = xtensa_mx_irq_map,
+};
+
+void secondary_init_irq(void)
+{
+	__this_cpu_write(cached_irq_mask,
+			XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+			XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
+	set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+			XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
+}
+
+static void xtensa_mx_irq_mask(struct irq_data *d)
+{
+	unsigned int mask = 1u << d->hwirq;
+
+	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+					HW_IRQ_MX_BASE), MIENG);
+	} else {
+		mask = __this_cpu_read(cached_irq_mask) & ~mask;
+		__this_cpu_write(cached_irq_mask, mask);
+		set_sr(mask, intenable);
+	}
+}
+
+static void xtensa_mx_irq_unmask(struct irq_data *d)
+{
+	unsigned int mask = 1u << d->hwirq;
+
+	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+					HW_IRQ_MX_BASE), MIENGSET);
+	} else {
+		mask |= __this_cpu_read(cached_irq_mask);
+		__this_cpu_write(cached_irq_mask, mask);
+		set_sr(mask, intenable);
+	}
+}
+
+static void xtensa_mx_irq_enable(struct irq_data *d)
+{
+	xtensa_mx_irq_unmask(d);
+}
+
+static void xtensa_mx_irq_disable(struct irq_data *d)
+{
+	xtensa_mx_irq_mask(d);
+}
+
+static void xtensa_mx_irq_ack(struct irq_data *d)
+{
+	set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_mx_irq_retrigger(struct irq_data *d)
+{
+	set_sr(1 << d->hwirq, intset);
+	return 1;
+}
+
+static int xtensa_mx_irq_set_affinity(struct irq_data *d,
+		const struct cpumask *dest, bool force)
+{
+	int cpu = cpumask_any_and(dest, cpu_online_mask);
+	unsigned mask = 1u << cpu;
+
+	set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return 0;
+
+}
+
+static struct irq_chip xtensa_mx_irq_chip = {
+	.name		= "xtensa-mx",
+	.irq_enable	= xtensa_mx_irq_enable,
+	.irq_disable	= xtensa_mx_irq_disable,
+	.irq_mask	= xtensa_mx_irq_mask,
+	.irq_unmask	= xtensa_mx_irq_unmask,
+	.irq_ack	= xtensa_mx_irq_ack,
+	.irq_retrigger	= xtensa_mx_irq_retrigger,
+	.irq_set_affinity = xtensa_mx_irq_set_affinity,
+};
+
+int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
+{
+	struct irq_domain *root_domain =
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+				&xtensa_mx_irq_domain_ops,
+				&xtensa_mx_irq_chip);
+	irq_set_default_host(root_domain);
+	secondary_init_irq();
+	return 0;
+}
+
+static int __init xtensa_mx_init(struct device_node *np,
+		struct device_node *interrupt_parent)
+{
+	struct irq_domain *root_domain =
+		irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+				&xtensa_mx_irq_chip);
+	irq_set_default_host(root_domain);
+	secondary_init_irq();
+	return 0;
+}
+IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
new file mode 100644
index 0000000..000cb54
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -0,0 +1,105 @@
+/*
+ * Xtensa built-in interrupt controller
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+unsigned int cached_irq_mask;
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
+		struct device_node *ctrlr,
+		const u32 *intspec, unsigned int intsize,
+		unsigned long *out_hwirq, unsigned int *out_type)
+{
+	return xtensa_irq_domain_xlate(intspec, intsize,
+			intspec[0], intspec[0],
+			out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_irq_domain_ops = {
+	.xlate = xtensa_pic_irq_domain_xlate,
+	.map = xtensa_irq_map,
+};
+
+static void xtensa_irq_mask(struct irq_data *d)
+{
+	cached_irq_mask &= ~(1 << d->hwirq);
+	set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_unmask(struct irq_data *d)
+{
+	cached_irq_mask |= 1 << d->hwirq;
+	set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_enable(struct irq_data *d)
+{
+	xtensa_irq_unmask(d);
+}
+
+static void xtensa_irq_disable(struct irq_data *d)
+{
+	xtensa_irq_mask(d);
+}
+
+static void xtensa_irq_ack(struct irq_data *d)
+{
+	set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_irq_retrigger(struct irq_data *d)
+{
+	set_sr(1 << d->hwirq, intset);
+	return 1;
+}
+
+static struct irq_chip xtensa_irq_chip = {
+	.name		= "xtensa",
+	.irq_enable	= xtensa_irq_enable,
+	.irq_disable	= xtensa_irq_disable,
+	.irq_mask	= xtensa_irq_mask,
+	.irq_unmask	= xtensa_irq_unmask,
+	.irq_ack	= xtensa_irq_ack,
+	.irq_retrigger	= xtensa_irq_retrigger,
+};
+
+int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
+{
+	struct irq_domain *root_domain =
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+				&xtensa_irq_domain_ops, &xtensa_irq_chip);
+	irq_set_default_host(root_domain);
+	return 0;
+}
+
+static int __init xtensa_pic_init(struct device_node *np,
+		struct device_node *interrupt_parent)
+{
+	struct irq_domain *root_domain =
+		irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+				&xtensa_irq_chip);
+	irq_set_default_host(root_domain);
+	return 0;
+}
+IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 0000000..cb9d8ec
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,124 @@
+/*
+ *  linux/drivers/irqchip/irq-zevio.c
+ *
+ *  Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#define IO_STATUS	0x000
+#define IO_RAW_STATUS	0x004
+#define IO_ENABLE	0x008
+#define IO_DISABLE	0x00C
+#define IO_CURRENT	0x020
+#define IO_RESET	0x028
+#define IO_MAX_PRIOTY	0x02C
+
+#define IO_IRQ_BASE	0x000
+#define IO_FIQ_BASE	0x100
+
+#define IO_INVERT_SEL	0x200
+#define IO_STICKY_SEL	0x204
+#define IO_PRIORITY_SEL	0x300
+
+#define MAX_INTRS	32
+#define FIQ_START	MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+	struct irq_chip_regs *regs = &irq_data_get_chip_type(irqd)->regs;
+
+	readl(gc->reg_base + regs->ack);
+}
+
+static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+	int irqnr;
+
+	while (readl(zevio_irq_io + IO_STATUS)) {
+		irqnr = readl(zevio_irq_io + IO_CURRENT);
+		handle_domain_irq(zevio_irq_domain, irqnr, regs);
+	};
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+	/* Disable all interrupts */
+	writel(~0, base + IO_DISABLE);
+
+	/* Accept interrupts of all priorities */
+	writel(0xF, base + IO_MAX_PRIOTY);
+
+	/* Reset existing interrupts */
+	readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct irq_chip_generic *gc;
+	int ret;
+
+	if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+		return -EBUSY;
+
+	zevio_irq_io = of_iomap(node, 0);
+	BUG_ON(!zevio_irq_io);
+
+	/* Do not invert interrupt status bits */
+	writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+	/* Disable sticky interrupts */
+	writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+	/* We don't use IRQ priorities. Set each IRQ to highest priority. */
+	memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+	/* Init IRQ and FIQ */
+	zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+	zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+	zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+						 &irq_generic_chip_ops, NULL);
+	BUG_ON(!zevio_irq_domain);
+
+	ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+					     "zevio_intc", handle_level_irq,
+					     clr, 0, IRQ_GC_INIT_MASK_CACHE);
+	BUG_ON(ret);
+
+	gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+	gc->reg_base				= zevio_irq_io;
+	gc->chip_types[0].chip.irq_ack		= zevio_irq_ack;
+	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_disable_reg;
+	gc->chip_types[0].chip.irq_unmask	= irq_gc_unmask_enable_reg;
+	gc->chip_types[0].regs.mask		= IO_IRQ_BASE + IO_ENABLE;
+	gc->chip_types[0].regs.enable		= IO_IRQ_BASE + IO_ENABLE;
+	gc->chip_types[0].regs.disable		= IO_IRQ_BASE + IO_DISABLE;
+	gc->chip_types[0].regs.ack		= IO_IRQ_BASE + IO_RESET;
+
+	set_handle_irq(zevio_handle_irq);
+
+	pr_info("TI-NSPIRE classic IRQ controller\n");
+	return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
new file mode 100644
index 0000000..2b35e68
--- /dev/null
+++ b/drivers/irqchip/irqchip.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+
+/*
+ * This special of_device_id is the sentinel at the end of the
+ * of_device_id[] array of all irqchips. It is automatically placed at
+ * the end of the array by the linker, thanks to being part of a
+ * special section.
+ */
+static const struct of_device_id
+irqchip_of_match_end __used __section(__irqchip_of_table_end);
+
+extern struct of_device_id __irqchip_of_table[];
+
+void __init irqchip_init(void)
+{
+	of_irq_init(__irqchip_of_table);
+	acpi_probe_device_table(irqchip);
+}
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
new file mode 100644
index 0000000..7f0c0be
--- /dev/null
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -0,0 +1,291 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Driver for interrupt combiners in the Top-level Control and Status
+ * Registers (TCSR) hardware block in Qualcomm Technologies chips.
+ * An interrupt combiner in this block combines a set of interrupts by
+ * OR'ing the individual interrupt signals into a summary interrupt
+ * signal routed to a parent interrupt controller, and provides read-
+ * only, 32-bit registers to query the status of individual interrupts.
+ * The status bit for IRQ n is bit (n % 32) within register (n / 32)
+ * of the given combiner. Thus, each combiner can be described as a set
+ * of register offsets and the number of IRQs managed.
+ */
+
+#define pr_fmt(fmt) "QCOM80B1:" fmt
+
+#include <linux/acpi.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#define REG_SIZE 32
+
+struct combiner_reg {
+	void __iomem *addr;
+	unsigned long enabled;
+};
+
+struct combiner {
+	struct irq_domain   *domain;
+	int                 parent_irq;
+	u32                 nirqs;
+	u32                 nregs;
+	struct combiner_reg regs[0];
+};
+
+static inline int irq_nr(u32 reg, u32 bit)
+{
+	return reg * REG_SIZE + bit;
+}
+
+/*
+ * Handler for the cascaded IRQ.
+ */
+static void combiner_handle_irq(struct irq_desc *desc)
+{
+	struct combiner *combiner = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 reg;
+
+	chained_irq_enter(chip, desc);
+
+	for (reg = 0; reg < combiner->nregs; reg++) {
+		int virq;
+		int hwirq;
+		u32 bit;
+		u32 status;
+
+		bit = readl_relaxed(combiner->regs[reg].addr);
+		status = bit & combiner->regs[reg].enabled;
+		if (bit && !status)
+			pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
+					    smp_processor_id(), bit,
+					    combiner->regs[reg].enabled,
+					    combiner->regs[reg].addr);
+
+		while (status) {
+			bit = __ffs(status);
+			status &= ~(1 << bit);
+			hwirq = irq_nr(reg, bit);
+			virq = irq_find_mapping(combiner->domain, hwirq);
+			if (virq > 0)
+				generic_handle_irq(virq);
+
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void combiner_irq_chip_mask_irq(struct irq_data *data)
+{
+	struct combiner *combiner = irq_data_get_irq_chip_data(data);
+	struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+	clear_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static void combiner_irq_chip_unmask_irq(struct irq_data *data)
+{
+	struct combiner *combiner = irq_data_get_irq_chip_data(data);
+	struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+	set_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static struct irq_chip irq_chip = {
+	.irq_mask = combiner_irq_chip_mask_irq,
+	.irq_unmask = combiner_irq_chip_unmask_irq,
+	.name = "qcom-irq-combiner"
+};
+
+static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
+				   irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	irq_set_noprobe(irq);
+	return 0;
+}
+
+static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
+{
+	irq_domain_reset_irq_data(irq_get_irq_data(irq));
+}
+
+static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
+				  unsigned long *hwirq, unsigned int *type)
+{
+	struct combiner *combiner = d->host_data;
+
+	if (is_acpi_node(fws->fwnode)) {
+		if (WARN_ON((fws->param_count != 2) ||
+			    (fws->param[0] >= combiner->nirqs) ||
+			    (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
+			    (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
+			return -EINVAL;
+
+		*hwirq = fws->param[0];
+		*type = fws->param[1];
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct irq_domain_ops domain_ops = {
+	.map = combiner_irq_map,
+	.unmap = combiner_irq_unmap,
+	.translate = combiner_irq_translate
+};
+
+static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
+{
+	int *count = context;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+		++(*count);
+	return AE_OK;
+}
+
+static int count_registers(struct platform_device *pdev)
+{
+	acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+	acpi_status status;
+	int count = 0;
+
+	if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+		return -EINVAL;
+
+	status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+				     count_registers_cb, &count);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+	return count;
+}
+
+struct get_registers_context {
+	struct device *dev;
+	struct combiner *combiner;
+	int err;
+};
+
+static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
+{
+	struct get_registers_context *ctx = context;
+	struct acpi_resource_generic_register *reg;
+	phys_addr_t paddr;
+	void __iomem *vaddr;
+
+	if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+		return AE_OK;
+
+	reg = &ares->data.generic_reg;
+	paddr = reg->address;
+	if ((reg->space_id != ACPI_SPACE_MEM) ||
+	    (reg->bit_offset != 0) ||
+	    (reg->bit_width > REG_SIZE)) {
+		dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
+		ctx->err = -EINVAL;
+		return AE_ERROR;
+	}
+
+	vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
+	if (!vaddr) {
+		dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
+		ctx->err = -ENOMEM;
+		return AE_ERROR;
+	}
+
+	ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
+	ctx->combiner->nirqs += reg->bit_width;
+	ctx->combiner->nregs++;
+	return AE_OK;
+}
+
+static int get_registers(struct platform_device *pdev, struct combiner *comb)
+{
+	acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+	acpi_status status;
+	struct get_registers_context ctx;
+
+	if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+		return -EINVAL;
+
+	ctx.dev = &pdev->dev;
+	ctx.combiner = comb;
+	ctx.err = 0;
+
+	status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+				     get_registers_cb, &ctx);
+	if (ACPI_FAILURE(status))
+		return ctx.err;
+	return 0;
+}
+
+static int __init combiner_probe(struct platform_device *pdev)
+{
+	struct combiner *combiner;
+	size_t alloc_sz;
+	int nregs;
+	int err;
+
+	nregs = count_registers(pdev);
+	if (nregs <= 0) {
+		dev_err(&pdev->dev, "Error reading register resources\n");
+		return -EINVAL;
+	}
+
+	alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
+	combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
+	if (!combiner)
+		return -ENOMEM;
+
+	err = get_registers(pdev, combiner);
+	if (err < 0)
+		return err;
+
+	combiner->parent_irq = platform_get_irq(pdev, 0);
+	if (combiner->parent_irq <= 0) {
+		dev_err(&pdev->dev, "Error getting IRQ resource\n");
+		return -EPROBE_DEFER;
+	}
+
+	combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
+						    &domain_ops, combiner);
+	if (!combiner->domain)
+		/* Errors printed by irq_domain_create_linear */
+		return -ENODEV;
+
+	irq_set_chained_handler_and_data(combiner->parent_irq,
+					 combiner_handle_irq, combiner);
+
+	dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
+		 combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
+	return 0;
+}
+
+static const struct acpi_device_id qcom_irq_combiner_ids[] = {
+	{ "QCOM80B1", },
+	{ }
+};
+
+static struct platform_driver qcom_irq_combiner_probe = {
+	.driver = {
+		.name = "qcom-irq-combiner",
+		.acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
+	},
+	.probe = combiner_probe,
+};
+builtin_platform_driver(qcom_irq_combiner_probe);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
new file mode 100644
index 0000000..faa7d61
--- /dev/null
+++ b/drivers/irqchip/qcom-pdc.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define PDC_MAX_IRQS		126
+
+#define CLEAR_INTR(reg, intr)	(reg & ~(1 << intr))
+#define ENABLE_INTR(reg, intr)	(reg | (1 << intr))
+
+#define IRQ_ENABLE_BANK		0x10
+#define IRQ_i_CFG		0x110
+
+struct pdc_pin_region {
+	u32 pin_base;
+	u32 parent_base;
+	u32 cnt;
+};
+
+static DEFINE_RAW_SPINLOCK(pdc_lock);
+static void __iomem *pdc_base;
+static struct pdc_pin_region *pdc_region;
+static int pdc_region_cnt;
+
+static void pdc_reg_write(int reg, u32 i, u32 val)
+{
+	writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+}
+
+static u32 pdc_reg_read(int reg, u32 i)
+{
+	return readl_relaxed(pdc_base + reg + i * sizeof(u32));
+}
+
+static void pdc_enable_intr(struct irq_data *d, bool on)
+{
+	int pin_out = d->hwirq;
+	u32 index, mask;
+	u32 enable;
+
+	index = pin_out / 32;
+	mask = pin_out % 32;
+
+	raw_spin_lock(&pdc_lock);
+	enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
+	enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
+	pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+	raw_spin_unlock(&pdc_lock);
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_mask_parent(d);
+}
+
+static void qcom_pdc_gic_unmask(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_unmask_parent(d);
+}
+
+/*
+ * GIC does not handle falling edge or active low. To allow falling edge and
+ * active low interrupts to be handled at GIC, PDC has an inverter that inverts
+ * falling edge into a rising edge and active low into an active high.
+ * For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
+ * set as per the table below.
+ * Level sensitive active low    LOW
+ * Rising edge sensitive         NOT USED
+ * Falling edge sensitive        LOW
+ * Dual Edge sensitive           NOT USED
+ * Level sensitive active High   HIGH
+ * Falling Edge sensitive        NOT USED
+ * Rising edge sensitive         HIGH
+ * Dual Edge sensitive           HIGH
+ */
+enum pdc_irq_config_bits {
+	PDC_LEVEL_LOW		= 0b000,
+	PDC_EDGE_FALLING	= 0b010,
+	PDC_LEVEL_HIGH		= 0b100,
+	PDC_EDGE_RISING		= 0b110,
+	PDC_EDGE_DUAL		= 0b111,
+};
+
+/**
+ * qcom_pdc_gic_set_type: Configure PDC for the interrupt
+ *
+ * @d: the interrupt data
+ * @type: the interrupt type
+ *
+ * If @type is edge triggered, forward that as Rising edge as PDC
+ * takes care of converting falling edge to rising edge signal
+ * If @type is level, then forward that as level high as PDC
+ * takes care of converting falling edge to rising edge signal
+ */
+static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
+{
+	int pin_out = d->hwirq;
+	enum pdc_irq_config_bits pdc_type;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		pdc_type = PDC_EDGE_RISING;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		pdc_type = PDC_EDGE_FALLING;
+		type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		pdc_type = PDC_EDGE_DUAL;
+		type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		pdc_type = PDC_LEVEL_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		pdc_type = PDC_LEVEL_LOW;
+		type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
+
+	return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_pdc_gic_chip = {
+	.name			= "PDC",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= qcom_pdc_gic_mask,
+	.irq_unmask		= qcom_pdc_gic_unmask,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= qcom_pdc_gic_set_type,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static irq_hw_number_t get_parent_hwirq(int pin)
+{
+	int i;
+	struct pdc_pin_region *region;
+
+	for (i = 0; i < pdc_region_cnt; i++) {
+		region = &pdc_region[i];
+		if (pin >= region->pin_base &&
+		    pin < region->pin_base + region->cnt)
+			return (region->parent_base + pin - region->pin_base);
+	}
+
+	WARN_ON(1);
+	return ~0UL;
+}
+
+static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+			      unsigned long *hwirq, unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 2)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
+			  unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq, parent_hwirq;
+	unsigned int type;
+	int ret;
+
+	ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return -EINVAL;
+
+	parent_hwirq = get_parent_hwirq(hwirq);
+	if (parent_hwirq == ~0UL)
+		return -EINVAL;
+
+	ret  = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					     &qcom_pdc_gic_chip, NULL);
+	if (ret)
+		return ret;
+
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		type = IRQ_TYPE_EDGE_RISING;
+
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		type = IRQ_TYPE_LEVEL_HIGH;
+
+	parent_fwspec.fwnode      = domain->parent->fwnode;
+	parent_fwspec.param_count = 3;
+	parent_fwspec.param[0]    = 0;
+	parent_fwspec.param[1]    = parent_hwirq;
+	parent_fwspec.param[2]    = type;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_pdc_ops = {
+	.translate	= qcom_pdc_translate,
+	.alloc		= qcom_pdc_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int pdc_setup_pin_mapping(struct device_node *np)
+{
+	int ret, n;
+
+	n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
+	if (n <= 0 || n % 3)
+		return -EINVAL;
+
+	pdc_region_cnt = n / 3;
+	pdc_region = kcalloc(pdc_region_cnt, sizeof(*pdc_region), GFP_KERNEL);
+	if (!pdc_region) {
+		pdc_region_cnt = 0;
+		return -ENOMEM;
+	}
+
+	for (n = 0; n < pdc_region_cnt; n++) {
+		ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+						 n * 3 + 0,
+						 &pdc_region[n].pin_base);
+		if (ret)
+			return ret;
+		ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+						 n * 3 + 1,
+						 &pdc_region[n].parent_base);
+		if (ret)
+			return ret;
+		ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+						 n * 3 + 2,
+						 &pdc_region[n].cnt);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *pdc_domain;
+	int ret;
+
+	pdc_base = of_iomap(node, 0);
+	if (!pdc_base) {
+		pr_err("%pOF: unable to map PDC registers\n", node);
+		return -ENXIO;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to find PDC's parent domain\n", node);
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	ret = pdc_setup_pin_mapping(node);
+	if (ret) {
+		pr_err("%pOF: failed to init PDC pin-hwirq mapping\n", node);
+		goto fail;
+	}
+
+	pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS,
+						 of_fwnode_handle(node),
+						 &qcom_pdc_ops, NULL);
+	if (!pdc_domain) {
+		pr_err("%pOF: GIC domain add failed\n", node);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	kfree(pdc_region);
+	iounmap(pdc_base);
+	return ret;
+}
+
+IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
new file mode 100644
index 0000000..1518ba3
--- /dev/null
+++ b/drivers/irqchip/spear-shirq.c
@@ -0,0 +1,288 @@
+/*
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009-2012 ST Microelectronics
+ * Viresh Kumar <vireshk@kernel.org>
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * struct spear_shirq: shared irq structure
+ *
+ * base:	Base register address
+ * status_reg:	Status register offset for chained interrupt handler
+ * mask_reg:	Mask register offset for irq chip
+ * mask:	Mask to apply to the status register
+ * virq_base:	Base virtual interrupt number
+ * nr_irqs:	Number of interrupts handled by this block
+ * offset:	Bit offset of the first interrupt
+ * irq_chip:	Interrupt controller chip used for this instance,
+ *		if NULL group is disabled, but accounted
+ */
+struct spear_shirq {
+	void __iomem		*base;
+	u32			status_reg;
+	u32			mask_reg;
+	u32			mask;
+	u32			virq_base;
+	u32			nr_irqs;
+	u32			offset;
+	struct irq_chip		*irq_chip;
+};
+
+/* spear300 shared irq registers offsets and masks */
+#define SPEAR300_INT_ENB_MASK_REG	0x54
+#define SPEAR300_INT_STS_MASK_REG	0x58
+
+static DEFINE_RAW_SPINLOCK(shirq_lock);
+
+static void shirq_irq_mask(struct irq_data *d)
+{
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
+	u32 __iomem *reg = shirq->base + shirq->mask_reg;
+
+	raw_spin_lock(&shirq_lock);
+	val = readl(reg) & ~(0x1 << shift);
+	writel(val, reg);
+	raw_spin_unlock(&shirq_lock);
+}
+
+static void shirq_irq_unmask(struct irq_data *d)
+{
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
+	u32 __iomem *reg = shirq->base + shirq->mask_reg;
+
+	raw_spin_lock(&shirq_lock);
+	val = readl(reg) | (0x1 << shift);
+	writel(val, reg);
+	raw_spin_unlock(&shirq_lock);
+}
+
+static struct irq_chip shirq_chip = {
+	.name		= "spear-shirq",
+	.irq_mask	= shirq_irq_mask,
+	.irq_unmask	= shirq_irq_unmask,
+};
+
+static struct spear_shirq spear300_shirq_ras1 = {
+	.offset		= 0,
+	.nr_irqs	= 9,
+	.mask		= ((0x1 << 9) - 1) << 0,
+	.irq_chip	= &shirq_chip,
+	.status_reg	= SPEAR300_INT_STS_MASK_REG,
+	.mask_reg	= SPEAR300_INT_ENB_MASK_REG,
+};
+
+static struct spear_shirq *spear300_shirq_blocks[] = {
+	&spear300_shirq_ras1,
+};
+
+/* spear310 shared irq registers offsets and masks */
+#define SPEAR310_INT_STS_MASK_REG	0x04
+
+static struct spear_shirq spear310_shirq_ras1 = {
+	.offset		= 0,
+	.nr_irqs	= 8,
+	.mask		= ((0x1 << 8) - 1) << 0,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_ras2 = {
+	.offset		= 8,
+	.nr_irqs	= 5,
+	.mask		= ((0x1 << 5) - 1) << 8,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_ras3 = {
+	.offset		= 13,
+	.nr_irqs	= 1,
+	.mask		= ((0x1 << 1) - 1) << 13,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_intrcomm_ras = {
+	.offset		= 14,
+	.nr_irqs	= 3,
+	.mask		= ((0x1 << 3) - 1) << 14,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq *spear310_shirq_blocks[] = {
+	&spear310_shirq_ras1,
+	&spear310_shirq_ras2,
+	&spear310_shirq_ras3,
+	&spear310_shirq_intrcomm_ras,
+};
+
+/* spear320 shared irq registers offsets and masks */
+#define SPEAR320_INT_STS_MASK_REG		0x04
+#define SPEAR320_INT_CLR_MASK_REG		0x04
+#define SPEAR320_INT_ENB_MASK_REG		0x08
+
+static struct spear_shirq spear320_shirq_ras3 = {
+	.offset		= 0,
+	.nr_irqs	= 7,
+	.mask		= ((0x1 << 7) - 1) << 0,
+};
+
+static struct spear_shirq spear320_shirq_ras1 = {
+	.offset		= 7,
+	.nr_irqs	= 3,
+	.mask		= ((0x1 << 3) - 1) << 7,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear320_shirq_ras2 = {
+	.offset		= 10,
+	.nr_irqs	= 1,
+	.mask		= ((0x1 << 1) - 1) << 10,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear320_shirq_intrcomm_ras = {
+	.offset		= 11,
+	.nr_irqs	= 11,
+	.mask		= ((0x1 << 11) - 1) << 11,
+	.irq_chip	= &dummy_irq_chip,
+	.status_reg	= SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq *spear320_shirq_blocks[] = {
+	&spear320_shirq_ras3,
+	&spear320_shirq_ras1,
+	&spear320_shirq_ras2,
+	&spear320_shirq_intrcomm_ras,
+};
+
+static void shirq_handler(struct irq_desc *desc)
+{
+	struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
+	u32 pend;
+
+	pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
+	pend >>= shirq->offset;
+
+	while (pend) {
+		int irq = __ffs(pend);
+
+		pend &= ~(0x1 << irq);
+		generic_handle_irq(shirq->virq_base + irq);
+	}
+}
+
+static void __init spear_shirq_register(struct spear_shirq *shirq,
+					int parent_irq)
+{
+	int i;
+
+	if (!shirq->irq_chip)
+		return;
+
+	irq_set_chained_handler_and_data(parent_irq, shirq_handler, shirq);
+
+	for (i = 0; i < shirq->nr_irqs; i++) {
+		irq_set_chip_and_handler(shirq->virq_base + i,
+					 shirq->irq_chip, handle_simple_irq);
+		irq_set_chip_data(shirq->virq_base + i, shirq);
+	}
+}
+
+static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
+		struct device_node *np)
+{
+	int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
+	struct irq_domain *shirq_domain;
+	void __iomem *base;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("%s: failed to map shirq registers\n", __func__);
+		return -ENXIO;
+	}
+
+	for (i = 0; i < block_nr; i++)
+		nr_irqs += shirq_blocks[i]->nr_irqs;
+
+	virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+	if (virq_base < 0) {
+		pr_err("%s: irq desc alloc failed\n", __func__);
+		goto err_unmap;
+	}
+
+	shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
+			&irq_domain_simple_ops, NULL);
+	if (WARN_ON(!shirq_domain)) {
+		pr_warn("%s: irq domain init failed\n", __func__);
+		goto err_free_desc;
+	}
+
+	for (i = 0; i < block_nr; i++) {
+		shirq_blocks[i]->base = base;
+		shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
+				hwirq);
+
+		parent_irq = irq_of_parse_and_map(np, i);
+		spear_shirq_register(shirq_blocks[i], parent_irq);
+		hwirq += shirq_blocks[i]->nr_irqs;
+	}
+
+	return 0;
+
+err_free_desc:
+	irq_free_descs(virq_base, nr_irqs);
+err_unmap:
+	iounmap(base);
+	return -ENXIO;
+}
+
+static int __init spear300_shirq_of_init(struct device_node *np,
+					 struct device_node *parent)
+{
+	return shirq_init(spear300_shirq_blocks,
+			ARRAY_SIZE(spear300_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
+
+static int __init spear310_shirq_of_init(struct device_node *np,
+					 struct device_node *parent)
+{
+	return shirq_init(spear310_shirq_blocks,
+			ARRAY_SIZE(spear310_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
+
+static int __init spear320_shirq_of_init(struct device_node *np,
+					 struct device_node *parent)
+{
+	return shirq_init(spear320_shirq_blocks,
+			ARRAY_SIZE(spear320_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);